From 8665bd53f2f2e27e5511d90428cb3f60e6d0ce15 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 18 May 2024 20:50:12 +0200 Subject: Merging upstream version 6.8.9. Signed-off-by: Daniel Baumann --- drivers/gpu/drm/Kconfig | 38 +- drivers/gpu/drm/Makefile | 15 +- drivers/gpu/drm/amd/amdgpu/Makefile | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 38 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 53 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 25 +- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c | 4 +- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 220 +- drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | 69 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 +- drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 28 +- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 146 +- drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 4 + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 36 +- drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c | 4 + drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c | 17 +- drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 65 +- drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h | 5 + drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h | 97 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 33 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 28 +- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 68 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 15 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 12 + drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c | 11 +- drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c | 247 + drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h | 49 + drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 15 + drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 65 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 3 + drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c | 7 +- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 117 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 7 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c | 249 + drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h | 12 + drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 106 +- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c | 414 ++ drivers/gpu/drm/amd/amdgpu/athub_v3_0.c | 8 + drivers/gpu/drm/amd/amdgpu/atom.c | 1 - drivers/gpu/drm/amd/amdgpu/atombios_encoders.c | 1 + drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 1 + drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 1 + drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 1 + drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 1 + drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 82 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 160 +- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c | 10 +- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c | 9 - drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c | 10 - drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 2 + drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 4 +- drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c | 3 +- drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 3 +- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 5 +- drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c | 26 +- drivers/gpu/drm/amd/amdgpu/soc15.c | 1 + drivers/gpu/drm/amd/amdgpu/soc15.h | 4 + drivers/gpu/drm/amd/amdgpu/soc21.c | 32 +- drivers/gpu/drm/amd/amdgpu/umc_v12_0.c | 80 +- drivers/gpu/drm/amd/amdgpu/umc_v12_0.h | 8 +- drivers/gpu/drm/amd/amdgpu/umc_v6_7.c | 2 +- drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c | 2 + drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c | 65 +- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c | 19 - drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c | 15 + drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 676 +-- .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm | 8 +- .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 27 +- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 1 + drivers/gpu/drm/amd/amdkfd/kfd_events.c | 4 + drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 181 +- drivers/gpu/drm/amd/amdkfd/kfd_migrate.h | 4 + drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 9 + drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 13 +- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 138 +- .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 191 +- drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 9 +- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 10 +- drivers/gpu/drm/amd/display/Makefile | 3 + drivers/gpu/drm/amd/display/amdgpu_dm/Makefile | 14 +- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 475 +- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 116 + .../drm/amd/display/amdgpu_dm/amdgpu_dm_color.c | 829 ++- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c | 3 + .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c | 72 + .../drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 82 + .../drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 56 +- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c | 22 +- .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 55 +- .../drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c | 232 +- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c | 11 +- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h | 2 +- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c | 214 + .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h | 36 + drivers/gpu/drm/amd/display/dc/Makefile | 9 +- drivers/gpu/drm/amd/display/dc/basics/conversion.c | 3 +- drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c | 2 +- drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 64 +- drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c | 5 +- .../drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c | 2 + .../amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c | 19 +- .../amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c | 108 +- .../amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c | 81 +- .../drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c | 46 +- drivers/gpu/drm/amd/display/dc/core/dc.c | 461 +- .../gpu/drm/amd/display/dc/core/dc_hw_sequencer.c | 185 +- .../gpu/drm/amd/display/dc/core/dc_link_exports.c | 7 + drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 476 +- drivers/gpu/drm/amd/display/dc/core/dc_state.c | 880 +++ drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 147 +- drivers/gpu/drm/amd/display/dc/core/dc_surface.c | 8 +- drivers/gpu/drm/amd/display/dc/dc.h | 71 +- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 122 +- drivers/gpu/drm/amd/display/dc/dc_plane.h | 38 + drivers/gpu/drm/amd/display/dc/dc_plane_priv.h | 34 + drivers/gpu/drm/amd/display/dc/dc_state.h | 78 + drivers/gpu/drm/amd/display/dc/dc_state_priv.h | 102 + drivers/gpu/drm/amd/display/dc/dc_stream.h | 80 +- drivers/gpu/drm/amd/display/dc/dc_stream_priv.h | 37 + drivers/gpu/drm/amd/display/dc/dc_types.h | 88 +- drivers/gpu/drm/amd/display/dc/dce/dce_abm.c | 4 +- drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c | 8 +- drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c | 4 +- drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h | 2 +- drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c | 96 +- drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h | 4 + drivers/gpu/drm/amd/display/dc/dce100/Makefile | 46 - .../drm/amd/display/dc/dce100/dce100_resource.c | 1179 ---- .../drm/amd/display/dc/dce100/dce100_resource.h | 54 - drivers/gpu/drm/amd/display/dc/dce110/Makefile | 6 +- .../drm/amd/display/dc/dce110/dce110_resource.c | 1551 ----- .../drm/amd/display/dc/dce110/dce110_resource.h | 54 - drivers/gpu/drm/amd/display/dc/dce112/Makefile | 5 +- .../drm/amd/display/dc/dce112/dce112_resource.c | 1431 ----- .../drm/amd/display/dc/dce112/dce112_resource.h | 57 - drivers/gpu/drm/amd/display/dc/dce120/Makefile | 4 +- .../drm/amd/display/dc/dce120/dce120_resource.c | 1288 ---- .../drm/amd/display/dc/dce120/dce120_resource.h | 39 - drivers/gpu/drm/amd/display/dc/dce60/Makefile | 2 +- drivers/gpu/drm/amd/display/dc/dce80/Makefile | 5 +- .../gpu/drm/amd/display/dc/dce80/dce80_resource.c | 1544 ----- .../gpu/drm/amd/display/dc/dce80/dce80_resource.h | 47 - drivers/gpu/drm/amd/display/dc/dcn10/Makefile | 4 +- .../display/dc/dcn10/dcn10_hw_sequencer_debug.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c | 128 - drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h | 33 - drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c | 1 + drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 1621 ----- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h | 599 -- .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 1686 ------ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.h | 56 - drivers/gpu/drm/amd/display/dc/dcn20/Makefile | 6 +- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h | 38 +- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c | 780 --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h | 589 -- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c | 145 - drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h | 33 - drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c | 14 + drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h | 2 + drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c | 587 -- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h | 124 - .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 2793 --------- .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.h | 171 - drivers/gpu/drm/amd/display/dc/dcn201/Makefile | 5 +- .../gpu/drm/amd/display/dc/dcn201/dcn201_init.c | 136 - .../gpu/drm/amd/display/dc/dcn201/dcn201_init.h | 33 - drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c | 1 + .../gpu/drm/amd/display/dc/dcn201/dcn201_optc.c | 202 - .../gpu/drm/amd/display/dc/dcn201/dcn201_optc.h | 74 - .../drm/amd/display/dc/dcn201/dcn201_resource.c | 1308 ----- .../drm/amd/display/dc/dcn201/dcn201_resource.h | 50 - drivers/gpu/drm/amd/display/dc/dcn21/Makefile | 2 +- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c | 151 - drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h | 33 - .../gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 1745 ------ .../gpu/drm/amd/display/dc/dcn21/dcn21_resource.h | 56 - drivers/gpu/drm/amd/display/dc/dcn30/Makefile | 6 +- .../gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c | 5 +- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c | 23 + drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h | 2 + .../gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c | 3 + drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c | 154 - drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h | 33 - drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c | 393 -- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h | 359 -- .../gpu/drm/amd/display/dc/dcn30/dcn30_resource.c | 2611 --------- .../gpu/drm/amd/display/dc/dcn30/dcn30_resource.h | 108 - drivers/gpu/drm/amd/display/dc/dcn301/Makefile | 5 +- .../gpu/drm/amd/display/dc/dcn301/dcn301_init.c | 154 - .../gpu/drm/amd/display/dc/dcn301/dcn301_init.h | 33 - .../gpu/drm/amd/display/dc/dcn301/dcn301_optc.c | 185 - .../gpu/drm/amd/display/dc/dcn301/dcn301_optc.h | 36 - .../drm/amd/display/dc/dcn301/dcn301_resource.c | 1728 ------ .../drm/amd/display/dc/dcn301/dcn301_resource.h | 45 - drivers/gpu/drm/amd/display/dc/dcn302/Makefile | 12 - .../gpu/drm/amd/display/dc/dcn302/dcn302_init.c | 41 - .../gpu/drm/amd/display/dc/dcn302/dcn302_init.h | 33 - .../drm/amd/display/dc/dcn302/dcn302_resource.c | 1518 ----- .../drm/amd/display/dc/dcn302/dcn302_resource.h | 38 - drivers/gpu/drm/amd/display/dc/dcn303/Makefile | 2 +- .../gpu/drm/amd/display/dc/dcn303/dcn303_init.c | 40 - .../gpu/drm/amd/display/dc/dcn303/dcn303_init.h | 33 - .../drm/amd/display/dc/dcn303/dcn303_resource.c | 1448 ----- .../drm/amd/display/dc/dcn303/dcn303_resource.h | 38 - drivers/gpu/drm/amd/display/dc/dcn31/Makefile | 4 +- drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c | 157 - drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h | 33 - drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c | 310 - drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h | 267 - .../gpu/drm/amd/display/dc/dcn31/dcn31_resource.c | 2218 ------- .../gpu/drm/amd/display/dc/dcn31/dcn31_resource.h | 97 - drivers/gpu/drm/amd/display/dc/dcn314/Makefile | 3 +- .../gpu/drm/amd/display/dc/dcn314/dcn314_init.c | 163 - .../gpu/drm/amd/display/dc/dcn314/dcn314_init.h | 34 - .../gpu/drm/amd/display/dc/dcn314/dcn314_optc.c | 273 - .../gpu/drm/amd/display/dc/dcn314/dcn314_optc.h | 255 - .../drm/amd/display/dc/dcn314/dcn314_resource.c | 2180 ------- .../drm/amd/display/dc/dcn314/dcn314_resource.h | 50 - drivers/gpu/drm/amd/display/dc/dcn315/Makefile | 30 - .../drm/amd/display/dc/dcn315/dcn315_resource.c | 2151 ------- .../drm/amd/display/dc/dcn315/dcn315_resource.h | 44 - drivers/gpu/drm/amd/display/dc/dcn316/Makefile | 30 - .../drm/amd/display/dc/dcn316/dcn316_resource.c | 2038 ------- .../drm/amd/display/dc/dcn316/dcn316_resource.h | 44 - drivers/gpu/drm/amd/display/dc/dcn32/Makefile | 8 +- .../amd/display/dc/dcn32/dcn32_dio_link_encoder.c | 85 +- .../amd/display/dc/dcn32/dcn32_dio_link_encoder.h | 5 + drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c | 169 - drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.h | 33 - drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c | 3 +- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c | 374 -- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h | 187 - .../gpu/drm/amd/display/dc/dcn32/dcn32_resource.c | 2880 --------- .../gpu/drm/amd/display/dc/dcn32/dcn32_resource.h | 1268 ---- .../amd/display/dc/dcn32/dcn32_resource_helpers.c | 148 +- drivers/gpu/drm/amd/display/dc/dcn321/Makefile | 2 +- .../drm/amd/display/dc/dcn321/dcn321_resource.c | 2068 ------- .../drm/amd/display/dc/dcn321/dcn321_resource.h | 45 - drivers/gpu/drm/amd/display/dc/dcn35/Makefile | 6 +- drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c | 92 +- drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h | 58 +- .../amd/display/dc/dcn35/dcn35_dio_link_encoder.c | 4 +- drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c | 60 - drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h | 59 - drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c | 171 - drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h | 34 - drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c | 300 - drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h | 74 - .../gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c | 10 +- .../gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h | 1 - .../gpu/drm/amd/display/dc/dcn35/dcn35_resource.c | 2148 ------- .../gpu/drm/amd/display/dc/dcn35/dcn35_resource.h | 310 - drivers/gpu/drm/amd/display/dc/dm_helpers.h | 12 +- drivers/gpu/drm/amd/display/dc/dm_pp_smu.h | 2 + .../gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c | 2 +- .../gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c | 5 +- .../amd/display/dc/dml/dcn30/display_mode_vba_30.c | 29 +- .../gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 158 +- .../gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c | 29 +- .../amd/display/dc/dml2/dml2_dc_resource_mgmt.c | 26 +- .../gpu/drm/amd/display/dc/dml2/dml2_dc_types.h | 1 + .../drm/amd/display/dc/dml2/dml2_mall_phantom.c | 89 +- .../amd/display/dc/dml2/dml2_translation_helper.c | 26 +- drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c | 12 +- drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h | 2 +- drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c | 4 +- drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h | 35 +- drivers/gpu/drm/amd/display/dc/dsc/Makefile | 26 + drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c | 10 +- .../gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c | 780 +++ .../gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h | 589 ++ .../gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c | 60 + .../gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.h | 59 + drivers/gpu/drm/amd/display/dc/dsc/dsc.h | 112 + drivers/gpu/drm/amd/display/dc/hwss/Makefile | 28 +- .../gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h | 15 +- .../drm/amd/display/dc/hwss/dce110/dce110_hwseq.c | 24 +- .../drm/amd/display/dc/hwss/dce110/dce110_hwseq.h | 4 + .../drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c | 39 +- .../drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h | 7 +- .../gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c | 128 + .../gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.h | 33 + .../drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c | 82 +- .../drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h | 6 +- .../gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c | 145 + .../gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.h | 33 + .../drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c | 8 +- .../drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h | 2 +- .../drm/amd/display/dc/hwss/dcn201/dcn201_init.c | 136 + .../drm/amd/display/dc/hwss/dcn201/dcn201_init.h | 33 + .../drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c | 4 +- .../gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c | 151 + .../gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.h | 33 + .../drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c | 15 +- .../gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c | 154 + .../gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.h | 33 + .../drm/amd/display/dc/hwss/dcn301/dcn301_init.c | 154 + .../drm/amd/display/dc/hwss/dcn301/dcn301_init.h | 33 + .../drm/amd/display/dc/hwss/dcn302/dcn302_init.c | 41 + .../drm/amd/display/dc/hwss/dcn302/dcn302_init.h | 33 + .../drm/amd/display/dc/hwss/dcn303/dcn303_init.c | 40 + .../drm/amd/display/dc/hwss/dcn303/dcn303_init.h | 33 + .../drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c | 10 +- .../gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c | 157 + .../gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.h | 33 + .../drm/amd/display/dc/hwss/dcn314/dcn314_init.c | 163 + .../drm/amd/display/dc/hwss/dcn314/dcn314_init.h | 34 + .../drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c | 176 +- .../drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h | 4 + .../gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c | 172 + .../gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.h | 33 + .../drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c | 272 +- .../drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h | 12 +- .../gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c | 172 + .../gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.h | 34 + .../drm/amd/display/dc/hwss/dcn351/CMakeLists.txt | 4 + .../gpu/drm/amd/display/dc/hwss/dcn351/Makefile | 17 + .../drm/amd/display/dc/hwss/dcn351/dcn351_init.c | 171 + .../drm/amd/display/dc/hwss/dcn351/dcn351_init.h | 33 + drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h | 24 +- .../drm/amd/display/dc/hwss/hw_sequencer_private.h | 8 + drivers/gpu/drm/amd/display/dc/inc/core_types.h | 31 +- drivers/gpu/drm/amd/display/dc/inc/hw/abm.h | 2 +- drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h | 19 + drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h | 4 +- drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h | 112 - drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h | 4 + drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h | 1 + drivers/gpu/drm/amd/display/dc/inc/hw/opp.h | 3 + drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h | 1 + drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h | 2 - .../drm/amd/display/dc/inc/hw/timing_generator.h | 1 + drivers/gpu/drm/amd/display/dc/inc/link.h | 7 +- drivers/gpu/drm/amd/display/dc/inc/resource.h | 16 +- drivers/gpu/drm/amd/display/dc/link/link_dpms.c | 105 +- drivers/gpu/drm/amd/display/dc/link/link_factory.c | 2 + .../gpu/drm/amd/display/dc/link/link_validation.h | 1 + .../display/dc/link/protocols/link_dp_capability.c | 14 +- .../amd/display/dc/link/protocols/link_dp_dpia.c | 36 +- .../dc/link/protocols/link_dp_irq_handler.c | 6 +- .../dc/link/protocols/link_dp_training_dpia.c | 6 +- .../link_dp_training_fixed_vs_pe_retimer.c | 10 + .../dc/link/protocols/link_edp_panel_control.c | 62 +- .../dc/link/protocols/link_edp_panel_control.h | 7 +- drivers/gpu/drm/amd/display/dc/optc/Makefile | 108 + .../gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c | 1621 +++++ .../gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h | 600 ++ .../gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c | 587 ++ .../gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h | 124 + .../drm/amd/display/dc/optc/dcn201/dcn201_optc.c | 202 + .../drm/amd/display/dc/optc/dcn201/dcn201_optc.h | 74 + .../gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c | 393 ++ .../gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h | 359 ++ .../drm/amd/display/dc/optc/dcn301/dcn301_optc.c | 185 + .../drm/amd/display/dc/optc/dcn301/dcn301_optc.h | 36 + .../gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c | 310 + .../gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h | 267 + .../drm/amd/display/dc/optc/dcn314/dcn314_optc.c | 273 + .../drm/amd/display/dc/optc/dcn314/dcn314_optc.h | 255 + .../gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c | 379 ++ .../gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h | 188 + .../gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c | 300 + .../gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h | 74 + drivers/gpu/drm/amd/display/dc/resource/Makefile | 199 + .../display/dc/resource/dce100/dce100_resource.c | 1179 ++++ .../display/dc/resource/dce100/dce100_resource.h | 54 + .../display/dc/resource/dce110/dce110_resource.c | 1551 +++++ .../display/dc/resource/dce110/dce110_resource.h | 54 + .../display/dc/resource/dce112/dce112_resource.c | 1431 +++++ .../display/dc/resource/dce112/dce112_resource.h | 57 + .../display/dc/resource/dce120/dce120_resource.c | 1288 ++++ .../display/dc/resource/dce120/dce120_resource.h | 39 + .../amd/display/dc/resource/dce80/CMakeLists.txt | 4 + .../amd/display/dc/resource/dce80/dce80_resource.c | 1544 +++++ .../amd/display/dc/resource/dce80/dce80_resource.h | 47 + .../amd/display/dc/resource/dcn10/dcn10_resource.c | 1692 ++++++ .../amd/display/dc/resource/dcn10/dcn10_resource.h | 56 + .../amd/display/dc/resource/dcn20/dcn20_resource.c | 2793 +++++++++ .../amd/display/dc/resource/dcn20/dcn20_resource.h | 171 + .../display/dc/resource/dcn201/dcn201_resource.c | 1308 +++++ .../display/dc/resource/dcn201/dcn201_resource.h | 50 + .../amd/display/dc/resource/dcn21/dcn21_resource.c | 1744 ++++++ .../amd/display/dc/resource/dcn21/dcn21_resource.h | 56 + .../amd/display/dc/resource/dcn30/dcn30_resource.c | 2613 +++++++++ .../amd/display/dc/resource/dcn30/dcn30_resource.h | 108 + .../display/dc/resource/dcn301/dcn301_resource.c | 1728 ++++++ .../display/dc/resource/dcn301/dcn301_resource.h | 45 + .../display/dc/resource/dcn302/dcn302_resource.c | 1518 +++++ .../display/dc/resource/dcn302/dcn302_resource.h | 38 + .../display/dc/resource/dcn303/dcn303_resource.c | 1448 +++++ .../display/dc/resource/dcn303/dcn303_resource.h | 38 + .../amd/display/dc/resource/dcn31/dcn31_resource.c | 2218 +++++++ .../amd/display/dc/resource/dcn31/dcn31_resource.h | 97 + .../display/dc/resource/dcn314/dcn314_resource.c | 2180 +++++++ .../display/dc/resource/dcn314/dcn314_resource.h | 50 + .../display/dc/resource/dcn315/dcn315_resource.c | 2153 +++++++ .../display/dc/resource/dcn315/dcn315_resource.h | 44 + .../display/dc/resource/dcn316/dcn316_resource.c | 2038 +++++++ .../display/dc/resource/dcn316/dcn316_resource.h | 44 + .../amd/display/dc/resource/dcn32/dcn32_resource.c | 2784 +++++++++ .../amd/display/dc/resource/dcn32/dcn32_resource.h | 1264 ++++ .../display/dc/resource/dcn321/dcn321_resource.c | 2069 +++++++ .../display/dc/resource/dcn321/dcn321_resource.h | 45 + .../amd/display/dc/resource/dcn35/dcn35_resource.c | 2180 +++++++ .../amd/display/dc/resource/dcn35/dcn35_resource.h | 311 + drivers/gpu/drm/amd/display/dmub/dmub_srv.h | 1 + drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 212 +- drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c | 6 +- .../gpu/drm/amd/display/include/hdcp_msg_types.h | 5 + .../drm/amd/display/modules/freesync/freesync.c | 10 +- .../drm/amd/display/modules/hdcp/hdcp1_execution.c | 4 +- .../drm/amd/display/modules/hdcp/hdcp2_execution.c | 6 +- .../gpu/drm/amd/display/modules/hdcp/hdcp_log.h | 10 +- .../gpu/drm/amd/display/modules/hdcp/hdcp_psp.c | 4 +- .../gpu/drm/amd/display/modules/hdcp/hdcp_psp.h | 10 +- .../gpu/drm/amd/display/modules/inc/mod_freesync.h | 28 - .../gpu/drm/amd/display/modules/inc/mod_stats.h | 4 +- .../drm/amd/display/modules/power/power_helpers.c | 35 +- .../drm/amd/display/modules/power/power_helpers.h | 6 + drivers/gpu/drm/amd/include/amd_shared.h | 4 + drivers/gpu/drm/amd/include/amdgpu_reg_state.h | 153 + .../amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h | 8 + .../amd/include/asic_reg/nbio/nbio_7_11_0_offset.h | 8 +- .../include/asic_reg/smuio/smuio_10_0_2_offset.h | 102 + .../include/asic_reg/smuio/smuio_10_0_2_sh_mask.h | 184 + drivers/gpu/drm/amd/include/kgd_pp_interface.h | 18 +- drivers/gpu/drm/amd/include/mes_v11_api_def.h | 1 + drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 35 +- drivers/gpu/drm/amd/pm/amdgpu_pm.c | 75 +- drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h | 13 + drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c | 11 +- drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c | 7 +- drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h | 2 +- drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c | 9 +- drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h | 2 +- .../gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c | 9 +- .../gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h | 2 +- drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h | 2 +- .../gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c | 1 + .../drm/amd/pm/powerplay/smumgr/iceland_smumgr.c | 1 + drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 235 +- drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 52 + .../pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h | 3 +- .../pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h | 3 +- .../pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h | 5 - .../amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h | 5 +- .../amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h | 3 +- drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h | 4 +- drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h | 11 +- drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h | 2 +- drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 2 - drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 2 - .../drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 2 - drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 1 - drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 129 +- .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 84 +- .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c | 12 +- .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 100 +- .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 51 +- drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c | 6 +- .../gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c | 20 + drivers/gpu/drm/amd/pm/swsmu/smu_internal.h | 3 + drivers/gpu/drm/arm/malidp_crtc.c | 2 +- drivers/gpu/drm/armada/armada_crtc.c | 29 +- drivers/gpu/drm/armada/armada_drv.c | 5 +- drivers/gpu/drm/aspeed/aspeed_gfx_drv.c | 10 +- drivers/gpu/drm/ast/ast_dp.c | 3 + drivers/gpu/drm/ast/ast_drv.c | 263 +- drivers/gpu/drm/ast/ast_drv.h | 101 +- drivers/gpu/drm/ast/ast_i2c.c | 1 - drivers/gpu/drm/ast/ast_main.c | 244 +- drivers/gpu/drm/ast/ast_mode.c | 26 +- drivers/gpu/drm/ast/ast_post.c | 73 +- drivers/gpu/drm/ast/ast_reg.h | 12 +- drivers/gpu/drm/bridge/Kconfig | 17 + drivers/gpu/drm/bridge/Makefile | 2 + drivers/gpu/drm/bridge/aux-bridge.c | 141 + drivers/gpu/drm/bridge/aux-hpd-bridge.c | 203 + .../gpu/drm/bridge/cadence/cdns-mhdp8546-core.c | 22 +- drivers/gpu/drm/bridge/lontium-lt8912b.c | 58 + drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 1 - drivers/gpu/drm/bridge/ti-sn65dsi86.c | 20 +- drivers/gpu/drm/bridge/ti-tpd12s015.c | 6 +- drivers/gpu/drm/ci/arm64.config | 1 + drivers/gpu/drm/ci/build.sh | 19 +- drivers/gpu/drm/ci/gitlab-ci.yml | 16 +- drivers/gpu/drm/ci/igt_runner.sh | 10 +- drivers/gpu/drm/ci/test.yml | 14 +- .../gpu/drm/ci/xfails/mediatek-mt8173-fails.txt | 13 +- drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt | 5 + .../gpu/drm/ci/xfails/virtio_gpu-none-fails.txt | 46 + drivers/gpu/drm/display/drm_dp_helper.c | 169 +- drivers/gpu/drm/display/drm_dp_mst_topology.c | 223 +- drivers/gpu/drm/drm_agpsupport.c | 451 -- drivers/gpu/drm/drm_atomic.c | 10 + drivers/gpu/drm/drm_atomic_helper.c | 20 +- drivers/gpu/drm/drm_atomic_state_helper.c | 15 + drivers/gpu/drm/drm_atomic_uapi.c | 149 +- drivers/gpu/drm/drm_auth.c | 8 +- drivers/gpu/drm/drm_bridge.c | 44 - drivers/gpu/drm/drm_bridge_connector.c | 6 - drivers/gpu/drm/drm_bufs.c | 1627 ----- drivers/gpu/drm/drm_client.c | 12 +- drivers/gpu/drm/drm_client_modeset.c | 3 +- drivers/gpu/drm/drm_connector.c | 6 + drivers/gpu/drm/drm_context.c | 513 -- drivers/gpu/drm/drm_crtc.c | 1 + drivers/gpu/drm/drm_crtc_helper.c | 7 +- drivers/gpu/drm/drm_crtc_internal.h | 4 +- drivers/gpu/drm/drm_debugfs.c | 65 +- drivers/gpu/drm/drm_dma.c | 178 - drivers/gpu/drm/drm_drv.c | 17 - drivers/gpu/drm/drm_edid.c | 43 +- drivers/gpu/drm/drm_edid_load.c | 16 - drivers/gpu/drm/drm_eld.c | 55 + drivers/gpu/drm/drm_encoder.c | 4 + drivers/gpu/drm/drm_exec.c | 13 +- drivers/gpu/drm/drm_file.c | 66 +- drivers/gpu/drm/drm_flip_work.c | 27 +- drivers/gpu/drm/drm_format_helper.c | 215 +- drivers/gpu/drm/drm_framebuffer.c | 80 +- drivers/gpu/drm/drm_gem_atomic_helper.c | 9 + drivers/gpu/drm/drm_gpuvm.c | 1168 +++- drivers/gpu/drm/drm_hashtab.c | 203 - drivers/gpu/drm/drm_internal.h | 23 +- drivers/gpu/drm/drm_ioc32.c | 613 +- drivers/gpu/drm/drm_ioctl.c | 96 +- drivers/gpu/drm/drm_irq.c | 204 - drivers/gpu/drm/drm_kms_helper_common.c | 32 - drivers/gpu/drm/drm_legacy.h | 290 - drivers/gpu/drm/drm_legacy_misc.c | 105 - drivers/gpu/drm/drm_lock.c | 373 -- drivers/gpu/drm/drm_memory.c | 138 - drivers/gpu/drm/drm_mipi_dbi.c | 19 +- drivers/gpu/drm/drm_mode_object.c | 2 +- drivers/gpu/drm/drm_modes.c | 6 +- drivers/gpu/drm/drm_modeset_helper.c | 19 +- drivers/gpu/drm/drm_panel_orientation_quirks.c | 12 + drivers/gpu/drm/drm_pci.c | 204 +- drivers/gpu/drm/drm_plane.c | 137 +- drivers/gpu/drm/drm_plane_helper.c | 32 - drivers/gpu/drm/drm_prime.c | 7 +- drivers/gpu/drm/drm_probe_helper.c | 29 +- drivers/gpu/drm/drm_property.c | 59 + drivers/gpu/drm/drm_scatter.c | 220 - drivers/gpu/drm/drm_syncobj.c | 64 +- drivers/gpu/drm/drm_vblank.c | 101 - drivers/gpu/drm/drm_vm.c | 665 --- drivers/gpu/drm/etnaviv/etnaviv_drv.c | 6 +- drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 2 +- drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 7 +- drivers/gpu/drm/etnaviv/etnaviv_sched.c | 2 +- drivers/gpu/drm/exynos/exynos5433_drm_decon.c | 6 +- drivers/gpu/drm/exynos/exynos7_drm_decon.c | 6 +- drivers/gpu/drm/exynos/exynos_dp.c | 6 +- drivers/gpu/drm/exynos/exynos_drm_dpi.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_drv.c | 5 +- drivers/gpu/drm/exynos/exynos_drm_fimc.c | 6 +- drivers/gpu/drm/exynos/exynos_drm_fimd.c | 8 +- drivers/gpu/drm/exynos/exynos_drm_g2d.c | 6 +- drivers/gpu/drm/exynos/exynos_drm_gsc.c | 15 +- drivers/gpu/drm/exynos/exynos_drm_mic.c | 6 +- drivers/gpu/drm/exynos/exynos_drm_rotator.c | 6 +- drivers/gpu/drm/exynos/exynos_drm_scaler.c | 6 +- drivers/gpu/drm/exynos/exynos_drm_vidi.c | 6 +- drivers/gpu/drm/exynos/exynos_hdmi.c | 6 +- drivers/gpu/drm/exynos/exynos_mixer.c | 6 +- drivers/gpu/drm/gma500/Makefile | 1 - drivers/gpu/drm/gma500/cdv_intel_dp.c | 1 - drivers/gpu/drm/gma500/intel_gmbus.c | 1 - drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c | 1 - drivers/gpu/drm/gma500/psb_device.c | 5 +- drivers/gpu/drm/gma500/psb_drv.h | 9 - drivers/gpu/drm/gma500/psb_intel_sdvo.c | 1 - drivers/gpu/drm/gma500/psb_lid.c | 80 - drivers/gpu/drm/gud/gud_pipe.c | 30 +- drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c | 1 - drivers/gpu/drm/hyperv/hyperv_drm_drv.c | 8 +- drivers/gpu/drm/i915/Kconfig | 4 +- drivers/gpu/drm/i915/Kconfig.debug | 18 + drivers/gpu/drm/i915/Makefile | 192 +- drivers/gpu/drm/i915/display/g4x_dp.c | 44 +- drivers/gpu/drm/i915/display/g4x_hdmi.c | 66 +- drivers/gpu/drm/i915/display/hsw_ips.c | 4 +- drivers/gpu/drm/i915/display/i9xx_wm.c | 12 +- drivers/gpu/drm/i915/display/icl_dsi.c | 10 +- drivers/gpu/drm/i915/display/intel_atomic.c | 3 - drivers/gpu/drm/i915/display/intel_atomic_plane.c | 83 +- drivers/gpu/drm/i915/display/intel_audio.c | 17 +- drivers/gpu/drm/i915/display/intel_backlight.c | 15 +- drivers/gpu/drm/i915/display/intel_bios.c | 40 +- drivers/gpu/drm/i915/display/intel_bw.c | 7 +- drivers/gpu/drm/i915/display/intel_cdclk.c | 160 +- drivers/gpu/drm/i915/display/intel_cdclk.h | 3 + drivers/gpu/drm/i915/display/intel_color.c | 70 +- drivers/gpu/drm/i915/display/intel_crt.c | 4 +- drivers/gpu/drm/i915/display/intel_crtc.c | 9 +- .../gpu/drm/i915/display/intel_crtc_state_dump.c | 10 + drivers/gpu/drm/i915/display/intel_cursor.c | 42 +- drivers/gpu/drm/i915/display/intel_cx0_phy.c | 251 +- drivers/gpu/drm/i915/display/intel_cx0_phy.h | 16 +- drivers/gpu/drm/i915/display/intel_ddi.c | 230 +- drivers/gpu/drm/i915/display/intel_ddi.h | 8 +- drivers/gpu/drm/i915/display/intel_display.c | 589 +- drivers/gpu/drm/i915/display/intel_display.h | 9 +- drivers/gpu/drm/i915/display/intel_display_core.h | 26 +- .../gpu/drm/i915/display/intel_display_debugfs.c | 237 +- .../i915/display/intel_display_debugfs_params.c | 176 + .../i915/display/intel_display_debugfs_params.h | 13 + .../gpu/drm/i915/display/intel_display_device.c | 13 +- .../gpu/drm/i915/display/intel_display_device.h | 6 +- .../gpu/drm/i915/display/intel_display_driver.c | 14 +- drivers/gpu/drm/i915/display/intel_display_irq.c | 19 +- .../gpu/drm/i915/display/intel_display_params.c | 217 + .../gpu/drm/i915/display/intel_display_params.h | 61 + drivers/gpu/drm/i915/display/intel_display_power.c | 24 +- .../drm/i915/display/intel_display_power_well.c | 23 +- drivers/gpu/drm/i915/display/intel_display_reset.c | 2 +- drivers/gpu/drm/i915/display/intel_display_types.h | 45 +- drivers/gpu/drm/i915/display/intel_dmc.c | 106 +- drivers/gpu/drm/i915/display/intel_dmc_regs.h | 1 + drivers/gpu/drm/i915/display/intel_dp.c | 514 +- drivers/gpu/drm/i915/display/intel_dp.h | 28 +- drivers/gpu/drm/i915/display/intel_dp_aux.c | 99 +- .../gpu/drm/i915/display/intel_dp_aux_backlight.c | 4 +- drivers/gpu/drm/i915/display/intel_dp_aux_regs.h | 14 +- drivers/gpu/drm/i915/display/intel_dp_hdcp.c | 47 +- drivers/gpu/drm/i915/display/intel_dp_mst.c | 662 ++- drivers/gpu/drm/i915/display/intel_dp_mst.h | 5 + drivers/gpu/drm/i915/display/intel_dpio_phy.c | 171 +- drivers/gpu/drm/i915/display/intel_dpio_phy.h | 5 + drivers/gpu/drm/i915/display/intel_dpll.c | 270 +- drivers/gpu/drm/i915/display/intel_dpll.h | 9 +- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 59 +- drivers/gpu/drm/i915/display/intel_dpll_mgr.h | 6 + drivers/gpu/drm/i915/display/intel_dpt.c | 24 - drivers/gpu/drm/i915/display/intel_dpt.h | 2 - drivers/gpu/drm/i915/display/intel_dpt_common.c | 34 + drivers/gpu/drm/i915/display/intel_dpt_common.h | 13 + drivers/gpu/drm/i915/display/intel_dsb.c | 98 +- drivers/gpu/drm/i915/display/intel_dsb_buffer.c | 82 + drivers/gpu/drm/i915/display/intel_dsb_buffer.h | 29 + drivers/gpu/drm/i915/display/intel_dsi_vbt.c | 368 +- drivers/gpu/drm/i915/display/intel_dsi_vbt.h | 1 - drivers/gpu/drm/i915/display/intel_fb.c | 168 +- drivers/gpu/drm/i915/display/intel_fb.h | 2 + drivers/gpu/drm/i915/display/intel_fb_bo.c | 97 + drivers/gpu/drm/i915/display/intel_fb_bo.h | 26 + drivers/gpu/drm/i915/display/intel_fb_pin.c | 10 + drivers/gpu/drm/i915/display/intel_fbc.c | 59 +- drivers/gpu/drm/i915/display/intel_fbdev.c | 112 +- drivers/gpu/drm/i915/display/intel_fbdev_fb.c | 115 + drivers/gpu/drm/i915/display/intel_fbdev_fb.h | 21 + drivers/gpu/drm/i915/display/intel_fdi.c | 8 +- drivers/gpu/drm/i915/display/intel_gmbus.c | 1 - drivers/gpu/drm/i915/display/intel_hdcp.c | 37 +- drivers/gpu/drm/i915/display/intel_hdcp.h | 8 +- drivers/gpu/drm/i915/display/intel_hdmi.c | 10 - drivers/gpu/drm/i915/display/intel_hotplug_irq.c | 16 + drivers/gpu/drm/i915/display/intel_link_bw.c | 30 +- drivers/gpu/drm/i915/display/intel_link_bw.h | 1 + drivers/gpu/drm/i915/display/intel_lvds.c | 6 +- drivers/gpu/drm/i915/display/intel_modeset_setup.c | 7 +- .../gpu/drm/i915/display/intel_modeset_verify.c | 2 +- drivers/gpu/drm/i915/display/intel_opregion.c | 2 +- drivers/gpu/drm/i915/display/intel_panel.c | 4 +- drivers/gpu/drm/i915/display/intel_pch_display.c | 1 + drivers/gpu/drm/i915/display/intel_pps.c | 2 +- drivers/gpu/drm/i915/display/intel_psr.c | 471 +- drivers/gpu/drm/i915/display/intel_psr.h | 17 +- drivers/gpu/drm/i915/display/intel_psr_regs.h | 2 + drivers/gpu/drm/i915/display/intel_qp_tables.c | 3 - drivers/gpu/drm/i915/display/intel_sdvo.c | 25 +- drivers/gpu/drm/i915/display/intel_snps_phy.c | 2 +- drivers/gpu/drm/i915/display/intel_sprite.c | 7 +- drivers/gpu/drm/i915/display/intel_tc.c | 25 +- drivers/gpu/drm/i915/display/intel_tv.c | 6 - drivers/gpu/drm/i915/display/intel_vblank.c | 51 +- drivers/gpu/drm/i915/display/intel_vdsc.c | 50 +- drivers/gpu/drm/i915/display/intel_vrr.c | 7 + drivers/gpu/drm/i915/display/skl_universal_plane.c | 109 +- drivers/gpu/drm/i915/display/skl_watermark.c | 5 +- drivers/gpu/drm/i915/display/vlv_dsi.c | 31 +- drivers/gpu/drm/i915/gem/i915_gem_context.c | 11 +- drivers/gpu/drm/i915/gem/i915_gem_context_types.h | 7 +- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 27 +- drivers/gpu/drm/i915/gem/i915_gem_internal.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_object.c | 21 +- drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 12 + drivers/gpu/drm/i915/gem/i915_gem_phys.c | 10 +- drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 6 +- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 21 + drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 8 +- .../drm/i915/gem/selftests/i915_gem_coherency.c | 22 +- .../gpu/drm/i915/gem/selftests/i915_gem_context.c | 8 +- .../gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c | 2 +- drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 14 +- drivers/gpu/drm/i915/gem/selftests/mock_context.c | 4 +- drivers/gpu/drm/i915/gt/gen8_engine_cs.c | 4 +- drivers/gpu/drm/i915/gt/gen8_ppgtt.c | 46 + drivers/gpu/drm/i915/gt/intel_breadcrumbs.c | 13 +- drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h | 3 +- drivers/gpu/drm/i915/gt/intel_context.c | 14 + drivers/gpu/drm/i915/gt/intel_context.h | 4 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 2 + drivers/gpu/drm/i915/gt/intel_engine_cs.c | 19 +- drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c | 2 +- drivers/gpu/drm/i915/gt/intel_engine_pm.c | 7 +- drivers/gpu/drm/i915/gt/intel_engine_pm.h | 1 + drivers/gpu/drm/i915/gt/intel_engine_regs.h | 8 + drivers/gpu/drm/i915/gt/intel_engine_types.h | 2 + .../gpu/drm/i915/gt/intel_execlists_submission.c | 2 +- drivers/gpu/drm/i915/gt/intel_ggtt.c | 23 +- drivers/gpu/drm/i915/gt/intel_gsc.h | 7 +- drivers/gpu/drm/i915/gt/intel_gt.c | 8 +- drivers/gpu/drm/i915/gt/intel_gt.h | 24 + drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c | 39 + drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h | 13 + drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c | 2 +- drivers/gpu/drm/i915/gt/intel_gt_mcr.c | 3 +- drivers/gpu/drm/i915/gt/intel_gt_pm.c | 14 +- drivers/gpu/drm/i915/gt/intel_gt_pm.h | 38 +- drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c | 4 +- drivers/gpu/drm/i915/gt/intel_gt_regs.h | 12 + drivers/gpu/drm/i915/gt/intel_gtt.c | 26 + drivers/gpu/drm/i915/gt/intel_gtt.h | 5 + drivers/gpu/drm/i915/gt/intel_lrc.c | 100 +- drivers/gpu/drm/i915/gt/intel_sseu.c | 7 +- drivers/gpu/drm/i915/gt/intel_workarounds.c | 96 +- drivers/gpu/drm/i915/gt/selftest_engine_cs.c | 20 +- .../gpu/drm/i915/gt/selftest_engine_heartbeat.c | 2 +- drivers/gpu/drm/i915/gt/selftest_gt_pm.c | 5 +- drivers/gpu/drm/i915/gt/selftest_lrc.c | 65 +- drivers/gpu/drm/i915/gt/selftest_reset.c | 10 +- drivers/gpu/drm/i915/gt/selftest_rps.c | 17 +- drivers/gpu/drm/i915/gt/selftest_slpc.c | 5 +- drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c | 2 + drivers/gpu/drm/i915/gt/uc/intel_guc.c | 2 +- drivers/gpu/drm/i915/gt/uc/intel_guc.h | 79 +- drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c | 2 +- drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c | 11 +- drivers/gpu/drm/i915/gt/uc/intel_guc_log.c | 10 +- drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c | 2 +- drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c | 2 +- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 18 +- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 5 - drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 5 +- drivers/gpu/drm/i915/gt/uc/selftest_guc.c | 115 + .../gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c | 2 +- drivers/gpu/drm/i915/gvt/cmd_parser.c | 2 +- drivers/gpu/drm/i915/gvt/fb_decoder.c | 6 +- drivers/gpu/drm/i915/gvt/handlers.c | 3 +- drivers/gpu/drm/i915/i915_cmd_parser.c | 4 +- drivers/gpu/drm/i915/i915_debugfs.c | 112 +- drivers/gpu/drm/i915/i915_driver.c | 14 +- drivers/gpu/drm/i915/i915_drm_client.c | 108 + drivers/gpu/drm/i915/i915_drm_client.h | 42 + drivers/gpu/drm/i915/i915_drv.h | 20 +- drivers/gpu/drm/i915/i915_gem.c | 2 - drivers/gpu/drm/i915/i915_gpu_error.c | 199 +- drivers/gpu/drm/i915/i915_gpu_error.h | 34 +- drivers/gpu/drm/i915/i915_memcpy.c | 2 + drivers/gpu/drm/i915/i915_params.c | 89 - drivers/gpu/drm/i915/i915_params.h | 22 - drivers/gpu/drm/i915/i915_perf.c | 2 +- drivers/gpu/drm/i915/i915_perf_types.h | 9 +- drivers/gpu/drm/i915/i915_pmu.c | 77 +- drivers/gpu/drm/i915/i915_reg.h | 2 - drivers/gpu/drm/i915/i915_sysfs.c | 79 +- drivers/gpu/drm/i915/i915_utils.h | 2 +- drivers/gpu/drm/i915/i915_vma.c | 50 +- drivers/gpu/drm/i915/intel_gvt.c | 2 +- drivers/gpu/drm/i915/intel_memory_region.c | 19 + drivers/gpu/drm/i915/intel_memory_region.h | 1 + drivers/gpu/drm/i915/intel_runtime_pm.c | 243 +- drivers/gpu/drm/i915/intel_runtime_pm.h | 13 +- drivers/gpu/drm/i915/intel_wakeref.c | 35 +- drivers/gpu/drm/i915/intel_wakeref.h | 73 +- drivers/gpu/drm/i915/pxp/intel_pxp.c | 18 +- drivers/gpu/drm/i915/pxp/intel_pxp_irq.c | 5 +- drivers/gpu/drm/i915/pxp/intel_pxp_session.c | 6 +- drivers/gpu/drm/i915/pxp/intel_pxp_types.h | 1 + drivers/gpu/drm/i915/selftests/i915_syncmap.c | 2 +- .../drm/i915/selftests/intel_scheduler_helpers.c | 6 +- drivers/gpu/drm/i915/selftests/intel_uncore.c | 2 + drivers/gpu/drm/i915/soc/intel_gmch.c | 27 +- drivers/gpu/drm/i915/vlv_sideband.c | 29 +- drivers/gpu/drm/i915/vlv_sideband.h | 9 +- drivers/gpu/drm/imagination/Kconfig | 18 + drivers/gpu/drm/imagination/Makefile | 35 + drivers/gpu/drm/imagination/pvr_ccb.c | 645 ++ drivers/gpu/drm/imagination/pvr_ccb.h | 71 + drivers/gpu/drm/imagination/pvr_cccb.c | 267 + drivers/gpu/drm/imagination/pvr_cccb.h | 110 + drivers/gpu/drm/imagination/pvr_context.c | 464 ++ drivers/gpu/drm/imagination/pvr_context.h | 205 + drivers/gpu/drm/imagination/pvr_debugfs.c | 53 + drivers/gpu/drm/imagination/pvr_debugfs.h | 29 + drivers/gpu/drm/imagination/pvr_device.c | 658 +++ drivers/gpu/drm/imagination/pvr_device.h | 725 +++ drivers/gpu/drm/imagination/pvr_device_info.c | 255 + drivers/gpu/drm/imagination/pvr_device_info.h | 186 + drivers/gpu/drm/imagination/pvr_drv.c | 1501 +++++ drivers/gpu/drm/imagination/pvr_drv.h | 129 + drivers/gpu/drm/imagination/pvr_free_list.c | 625 ++ drivers/gpu/drm/imagination/pvr_free_list.h | 195 + drivers/gpu/drm/imagination/pvr_fw.c | 1489 +++++ drivers/gpu/drm/imagination/pvr_fw.h | 509 ++ drivers/gpu/drm/imagination/pvr_fw_info.h | 135 + drivers/gpu/drm/imagination/pvr_fw_meta.c | 555 ++ drivers/gpu/drm/imagination/pvr_fw_meta.h | 14 + drivers/gpu/drm/imagination/pvr_fw_mips.c | 252 + drivers/gpu/drm/imagination/pvr_fw_mips.h | 48 + drivers/gpu/drm/imagination/pvr_fw_startstop.c | 306 + drivers/gpu/drm/imagination/pvr_fw_startstop.h | 13 + drivers/gpu/drm/imagination/pvr_fw_trace.c | 471 ++ drivers/gpu/drm/imagination/pvr_fw_trace.h | 78 + drivers/gpu/drm/imagination/pvr_gem.c | 414 ++ drivers/gpu/drm/imagination/pvr_gem.h | 170 + drivers/gpu/drm/imagination/pvr_hwrt.c | 550 ++ drivers/gpu/drm/imagination/pvr_hwrt.h | 166 + drivers/gpu/drm/imagination/pvr_job.c | 786 +++ drivers/gpu/drm/imagination/pvr_job.h | 161 + drivers/gpu/drm/imagination/pvr_mmu.c | 2640 +++++++++ drivers/gpu/drm/imagination/pvr_mmu.h | 108 + drivers/gpu/drm/imagination/pvr_params.c | 147 + drivers/gpu/drm/imagination/pvr_params.h | 72 + drivers/gpu/drm/imagination/pvr_power.c | 433 ++ drivers/gpu/drm/imagination/pvr_power.h | 41 + drivers/gpu/drm/imagination/pvr_queue.c | 1432 +++++ drivers/gpu/drm/imagination/pvr_queue.h | 169 + drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h | 6193 ++++++++++++++++++++ .../gpu/drm/imagination/pvr_rogue_cr_defs_client.h | 159 + drivers/gpu/drm/imagination/pvr_rogue_defs.h | 179 + drivers/gpu/drm/imagination/pvr_rogue_fwif.h | 2188 +++++++ drivers/gpu/drm/imagination/pvr_rogue_fwif_check.h | 493 ++ .../gpu/drm/imagination/pvr_rogue_fwif_client.h | 373 ++ .../drm/imagination/pvr_rogue_fwif_client_check.h | 133 + .../gpu/drm/imagination/pvr_rogue_fwif_common.h | 60 + .../gpu/drm/imagination/pvr_rogue_fwif_dev_info.h | 113 + .../imagination/pvr_rogue_fwif_resetframework.h | 28 + drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h | 1648 ++++++ .../gpu/drm/imagination/pvr_rogue_fwif_shared.h | 258 + .../drm/imagination/pvr_rogue_fwif_shared_check.h | 108 + .../gpu/drm/imagination/pvr_rogue_fwif_stream.h | 78 + .../gpu/drm/imagination/pvr_rogue_heap_config.h | 113 + drivers/gpu/drm/imagination/pvr_rogue_meta.h | 356 ++ drivers/gpu/drm/imagination/pvr_rogue_mips.h | 335 ++ drivers/gpu/drm/imagination/pvr_rogue_mips_check.h | 58 + drivers/gpu/drm/imagination/pvr_rogue_mmu_defs.h | 136 + drivers/gpu/drm/imagination/pvr_stream.c | 285 + drivers/gpu/drm/imagination/pvr_stream.h | 75 + drivers/gpu/drm/imagination/pvr_stream_defs.c | 351 ++ drivers/gpu/drm/imagination/pvr_stream_defs.h | 16 + drivers/gpu/drm/imagination/pvr_sync.c | 289 + drivers/gpu/drm/imagination/pvr_sync.h | 84 + drivers/gpu/drm/imagination/pvr_vm.c | 1090 ++++ drivers/gpu/drm/imagination/pvr_vm.h | 66 + drivers/gpu/drm/imagination/pvr_vm_mips.c | 237 + drivers/gpu/drm/imagination/pvr_vm_mips.h | 22 + drivers/gpu/drm/imx/dcss/dcss-drv.c | 6 +- drivers/gpu/drm/imx/ipuv3/imx-ldb.c | 9 +- drivers/gpu/drm/imx/lcdc/imx-lcdc.c | 6 +- drivers/gpu/drm/kmb/kmb_drv.c | 5 +- drivers/gpu/drm/lima/lima_ctx.c | 1 + drivers/gpu/drm/lima/lima_device.c | 2 +- drivers/gpu/drm/lima/lima_sched.c | 4 +- drivers/gpu/drm/loongson/Kconfig | 1 + drivers/gpu/drm/loongson/lsdc_i2c.c | 1 - drivers/gpu/drm/loongson/lsdc_plane.c | 1 - drivers/gpu/drm/mediatek/Makefile | 3 +- drivers/gpu/drm/mediatek/mtk_cec.c | 4 +- drivers/gpu/drm/mediatek/mtk_disp_aal.c | 4 +- drivers/gpu/drm/mediatek/mtk_disp_ccorr.c | 4 +- drivers/gpu/drm/mediatek/mtk_disp_drv.h | 8 + drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c | 253 +- drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 10 +- drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c | 2 + drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h | 20 + drivers/gpu/drm/mediatek/mtk_drm_drv.c | 5 +- drivers/gpu/drm/mediatek/mtk_drm_drv.h | 2 +- drivers/gpu/drm/mediatek/mtk_ethdr.c | 5 +- drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c | 1 - drivers/gpu/drm/mediatek/mtk_mdp_rdma.c | 16 + drivers/gpu/drm/mediatek/mtk_padding.c | 160 + drivers/gpu/drm/meson/meson_dw_mipi_dsi.c | 6 +- drivers/gpu/drm/mgag200/mgag200_i2c.c | 1 - drivers/gpu/drm/msm/Kconfig | 1 + drivers/gpu/drm/msm/Makefile | 1 + drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 21 +- drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 110 +- drivers/gpu/drm/msm/adreno/adreno_device.c | 6 +- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 3 + drivers/gpu/drm/msm/adreno/adreno_gpu.h | 9 + .../drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h | 457 ++ .../drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h | 17 +- .../gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h | 17 +- .../gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h | 104 + .../gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h | 17 +- .../drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h | 17 +- .../gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h | 8 +- .../gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h | 30 +- .../gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h | 15 +- .../gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h | 7 +- .../gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h | 11 +- .../drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h | 4 +- .../gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h | 7 +- .../gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h | 37 +- .../gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h | 14 +- .../drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h | 25 +- .../gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h | 29 +- .../gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h | 33 +- drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c | 10 +- drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c | 25 +- drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 180 +- drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h | 17 +- .../gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c | 43 +- .../gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c | 22 +- .../gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c | 130 +- drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c | 262 +- drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h | 69 +- drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c | 247 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h | 142 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c | 33 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h | 12 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c | 22 +- drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h | 11 +- drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h | 10 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c | 20 +- drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h | 21 - drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c | 50 +- drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h | 9 +- drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c | 14 +- drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h | 8 +- drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c | 66 +- drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h | 1 + drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c | 105 +- drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c | 54 +- drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h | 2 + drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c | 33 +- drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c | 32 +- drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c | 37 +- drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c | 87 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c | 24 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h | 1 - drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c | 30 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c | 21 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h | 1 - drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c | 29 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c | 25 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c | 10 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h | 4 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c | 10 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h | 4 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c | 19 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h | 1 - drivers/gpu/drm/msm/dp/dp_aux.c | 39 +- drivers/gpu/drm/msm/dp/dp_debug.c | 69 +- drivers/gpu/drm/msm/dp/dp_debug.h | 23 +- drivers/gpu/drm/msm/dp/dp_display.c | 349 +- drivers/gpu/drm/msm/dp/dp_display.h | 4 +- drivers/gpu/drm/msm/dp/dp_drm.c | 30 +- drivers/gpu/drm/msm/dp/dp_power.c | 32 +- drivers/gpu/drm/msm/dp/dp_power.h | 11 - drivers/gpu/drm/msm/dsi/dsi_cfg.c | 17 + drivers/gpu/drm/msm/dsi/dsi_cfg.h | 1 + drivers/gpu/drm/msm/dsi/phy/dsi_phy.c | 2 + drivers/gpu/drm/msm/dsi/phy/dsi_phy.h | 1 + drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c | 27 + drivers/gpu/drm/msm/hdmi/hdmi_i2c.c | 1 - drivers/gpu/drm/msm/msm_debugfs.c | 41 +- drivers/gpu/drm/msm/msm_drv.c | 94 +- drivers/gpu/drm/msm/msm_drv.h | 15 +- drivers/gpu/drm/msm/msm_fb.c | 6 +- drivers/gpu/drm/msm/msm_gem.c | 7 +- drivers/gpu/drm/msm/msm_gem.h | 17 +- drivers/gpu/drm/msm/msm_gem_shrinker.c | 2 +- drivers/gpu/drm/msm/msm_gem_submit.c | 235 +- drivers/gpu/drm/msm/msm_gpu.c | 44 +- drivers/gpu/drm/msm/msm_gpu.h | 2 +- drivers/gpu/drm/msm/msm_kms.c | 4 +- drivers/gpu/drm/msm/msm_mdss.c | 105 +- drivers/gpu/drm/msm/msm_mdss.h | 1 + drivers/gpu/drm/msm/msm_rd.c | 3 + drivers/gpu/drm/msm/msm_ringbuffer.c | 5 +- drivers/gpu/drm/mxsfb/mxsfb_drv.c | 10 +- drivers/gpu/drm/nouveau/Kconfig | 8 + drivers/gpu/drm/nouveau/dispnv50/disp.c | 7 +- drivers/gpu/drm/nouveau/nouveau_abi16.c | 40 +- drivers/gpu/drm/nouveau/nouveau_abi16.h | 2 +- drivers/gpu/drm/nouveau/nouveau_bios.c | 13 +- drivers/gpu/drm/nouveau/nouveau_bo.c | 15 +- drivers/gpu/drm/nouveau/nouveau_bo.h | 5 + drivers/gpu/drm/nouveau/nouveau_drm.c | 38 +- drivers/gpu/drm/nouveau/nouveau_drv.h | 19 +- drivers/gpu/drm/nouveau/nouveau_exec.c | 68 +- drivers/gpu/drm/nouveau/nouveau_exec.h | 6 +- drivers/gpu/drm/nouveau/nouveau_gem.c | 10 +- drivers/gpu/drm/nouveau/nouveau_platform.c | 5 +- drivers/gpu/drm/nouveau/nouveau_sched.c | 239 +- drivers/gpu/drm/nouveau/nouveau_sched.h | 43 +- drivers/gpu/drm/nouveau/nouveau_uvmm.c | 386 +- drivers/gpu/drm/nouveau/nouveau_uvmm.h | 12 +- drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c | 1 - .../gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c | 7 +- .../gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c | 12 +- drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c | 1 + drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c | 6 +- drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c | 7 +- drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c | 9 +- drivers/gpu/drm/omapdrm/dss/dispc.c | 4 +- drivers/gpu/drm/omapdrm/dss/dss.c | 5 +- drivers/gpu/drm/omapdrm/omap_gem.c | 14 +- drivers/gpu/drm/panel/Kconfig | 18 + drivers/gpu/drm/panel/Makefile | 2 + drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c | 8 +- drivers/gpu/drm/panel/panel-edp.c | 87 +- drivers/gpu/drm/panel/panel-elida-kd35t133.c | 35 +- drivers/gpu/drm/panel/panel-himax-hx8394.c | 180 +- drivers/gpu/drm/panel/panel-ilitek-ili9805.c | 405 ++ drivers/gpu/drm/panel/panel-ilitek-ili9881c.c | 225 + drivers/gpu/drm/panel/panel-newvision-nv3051d.c | 55 +- drivers/gpu/drm/panel/panel-newvision-nv3052c.c | 515 +- drivers/gpu/drm/panel/panel-novatek-nt35510.c | 2 +- drivers/gpu/drm/panel/panel-simple.c | 116 + drivers/gpu/drm/panel/panel-sitronix-st7701.c | 136 + drivers/gpu/drm/panel/panel-synaptics-r63353.c | 362 ++ drivers/gpu/drm/panel/panel-visionox-rm69299.c | 2 - drivers/gpu/drm/panfrost/panfrost_device.c | 81 +- drivers/gpu/drm/panfrost/panfrost_device.h | 23 + drivers/gpu/drm/panfrost/panfrost_drv.c | 5 +- drivers/gpu/drm/panfrost/panfrost_dump.c | 12 +- drivers/gpu/drm/panfrost/panfrost_gpu.c | 55 +- drivers/gpu/drm/panfrost/panfrost_gpu.h | 1 + drivers/gpu/drm/panfrost/panfrost_job.c | 30 +- drivers/gpu/drm/panfrost/panfrost_job.h | 1 + drivers/gpu/drm/panfrost/panfrost_mmu.c | 45 +- drivers/gpu/drm/panfrost/panfrost_mmu.h | 1 + drivers/gpu/drm/panfrost/panfrost_regs.h | 1 + drivers/gpu/drm/qxl/qxl_display.c | 14 +- drivers/gpu/drm/qxl/qxl_drv.h | 7 - drivers/gpu/drm/qxl/qxl_release.c | 50 +- drivers/gpu/drm/radeon/atombios_encoders.c | 1 + drivers/gpu/drm/radeon/clearstate_evergreen.h | 8 +- drivers/gpu/drm/radeon/dce3_1_afmt.c | 1 + drivers/gpu/drm/radeon/dce6_afmt.c | 1 + drivers/gpu/drm/radeon/evergreen.c | 1 + drivers/gpu/drm/radeon/evergreen_hdmi.c | 1 + drivers/gpu/drm/radeon/radeon_atombios.c | 9 +- drivers/gpu/drm/radeon/radeon_audio.c | 2 + drivers/gpu/drm/radeon/radeon_audio.h | 4 +- drivers/gpu/drm/radeon/radeon_combios.c | 1 + drivers/gpu/drm/radeon/radeon_drv.h | 1 - drivers/gpu/drm/radeon/radeon_encoders.c | 1 + drivers/gpu/drm/radeon/radeon_i2c.c | 1 - drivers/gpu/drm/radeon/radeon_mode.h | 2 +- drivers/gpu/drm/radeon/radeon_ring.c | 2 +- drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c | 1 - drivers/gpu/drm/rockchip/analogix_dp-rockchip.c | 1 - drivers/gpu/drm/rockchip/cdn-dp-core.c | 1 - drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c | 1 - drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c | 1 - drivers/gpu/drm/rockchip/inno_hdmi.c | 2 - drivers/gpu/drm/rockchip/rk3066_hdmi.c | 47 +- drivers/gpu/drm/rockchip/rockchip_drm_drv.h | 18 + drivers/gpu/drm/rockchip/rockchip_drm_vop.h | 12 - drivers/gpu/drm/rockchip/rockchip_drm_vop2.c | 505 +- drivers/gpu/drm/rockchip/rockchip_drm_vop2.h | 100 +- drivers/gpu/drm/rockchip/rockchip_lvds.c | 1 - drivers/gpu/drm/rockchip/rockchip_rgb.c | 1 - drivers/gpu/drm/rockchip/rockchip_vop2_reg.c | 227 +- drivers/gpu/drm/scheduler/gpu_scheduler_trace.h | 2 +- drivers/gpu/drm/scheduler/sched_entity.c | 9 +- drivers/gpu/drm/scheduler/sched_main.c | 494 +- drivers/gpu/drm/solomon/ssd130x.c | 40 +- drivers/gpu/drm/solomon/ssd130x.h | 1 - drivers/gpu/drm/sprd/sprd_dpu.c | 6 +- drivers/gpu/drm/sprd/sprd_drm.c | 5 +- drivers/gpu/drm/sprd/sprd_dsi.c | 6 +- drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c | 1 - drivers/gpu/drm/tegra/drm.c | 3 +- drivers/gpu/drm/tegra/hdmi.c | 1 + drivers/gpu/drm/tegra/sor.c | 1 + drivers/gpu/drm/tests/Makefile | 5 +- drivers/gpu/drm/tests/drm_buddy_test.c | 693 +-- drivers/gpu/drm/tests/drm_dp_mst_helper_test.c | 160 + drivers/gpu/drm/tests/drm_exec_test.c | 16 +- drivers/gpu/drm/tests/drm_format_helper_test.c | 72 +- drivers/gpu/drm/tests/drm_gem_shmem_test.c | 383 ++ drivers/gpu/drm/tests/drm_kunit_helpers.c | 78 +- drivers/gpu/drm/tests/drm_mm_test.c | 2025 +------ drivers/gpu/drm/tidss/tidss_crtc.c | 4 - drivers/gpu/drm/tidss/tidss_dispc.c | 79 +- drivers/gpu/drm/tidss/tidss_dispc.h | 3 + drivers/gpu/drm/tidss/tidss_drv.c | 16 +- drivers/gpu/drm/tidss/tidss_irq.c | 54 +- drivers/gpu/drm/tidss/tidss_kms.c | 2 +- drivers/gpu/drm/tilcdc/tilcdc_drv.c | 9 +- drivers/gpu/drm/tiny/arcpgu.c | 6 +- drivers/gpu/drm/tiny/cirrus.c | 3 +- drivers/gpu/drm/tiny/ili9225.c | 10 +- drivers/gpu/drm/tiny/ofdrm.c | 17 +- drivers/gpu/drm/tiny/repaper.c | 10 +- drivers/gpu/drm/tiny/simpledrm.c | 44 +- drivers/gpu/drm/tiny/st7586.c | 19 +- drivers/gpu/drm/ttm/tests/ttm_device_test.c | 2 +- drivers/gpu/drm/ttm/tests/ttm_pool_test.c | 8 +- drivers/gpu/drm/ttm/ttm_bo.c | 15 +- drivers/gpu/drm/ttm/ttm_device.c | 25 +- drivers/gpu/drm/ttm/ttm_pool.c | 56 +- drivers/gpu/drm/udl/udl_modeset.c | 19 +- drivers/gpu/drm/v3d/Makefile | 4 +- drivers/gpu/drm/v3d/v3d_bo.c | 51 + drivers/gpu/drm/v3d/v3d_debugfs.c | 178 +- drivers/gpu/drm/v3d/v3d_drv.c | 50 +- drivers/gpu/drm/v3d/v3d_drv.h | 165 +- drivers/gpu/drm/v3d/v3d_gem.c | 779 +-- drivers/gpu/drm/v3d/v3d_irq.c | 89 +- drivers/gpu/drm/v3d/v3d_regs.h | 94 +- drivers/gpu/drm/v3d/v3d_sched.c | 397 +- drivers/gpu/drm/v3d/v3d_submit.c | 1341 +++++ drivers/gpu/drm/v3d/v3d_sysfs.c | 69 + drivers/gpu/drm/v3d/v3d_trace.h | 57 + drivers/gpu/drm/vboxvideo/vbox_mode.c | 4 +- drivers/gpu/drm/vc4/tests/vc4_mock.c | 9 +- drivers/gpu/drm/vc4/vc4_hdmi.c | 12 +- drivers/gpu/drm/vc4/vc4_plane.c | 5 +- drivers/gpu/drm/virtio/virtgpu_drv.h | 5 + drivers/gpu/drm/virtio/virtgpu_ioctl.c | 41 +- drivers/gpu/drm/virtio/virtgpu_plane.c | 8 +- drivers/gpu/drm/vkms/vkms_writeback.c | 25 +- drivers/gpu/drm/vmwgfx/vmwgfx_blit.c | 35 +- drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 7 +- drivers/gpu/drm/vmwgfx/vmwgfx_bo.h | 2 + drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 12 +- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 3 + drivers/gpu/drm/vmwgfx/vmwgfx_gem.c | 32 + drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 31 +- drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | 4 +- drivers/gpu/drm/vmwgfx/vmwgfx_prime.c | 15 +- drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c | 44 +- drivers/gpu/drm/xe/.gitignore | 4 + drivers/gpu/drm/xe/.kunitconfig | 13 + drivers/gpu/drm/xe/Kconfig | 96 + drivers/gpu/drm/xe/Kconfig.debug | 107 + drivers/gpu/drm/xe/Kconfig.profile | 54 + drivers/gpu/drm/xe/Makefile | 305 + drivers/gpu/drm/xe/abi/gsc_command_header_abi.h | 46 + drivers/gpu/drm/xe/abi/gsc_mkhi_commands_abi.h | 39 + drivers/gpu/drm/xe/abi/gsc_pxp_commands_abi.h | 59 + drivers/gpu/drm/xe/abi/guc_actions_abi.h | 219 + drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h | 249 + drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h | 127 + .../gpu/drm/xe/abi/guc_communication_mmio_abi.h | 49 + drivers/gpu/drm/xe/abi/guc_errors_abi.h | 37 + drivers/gpu/drm/xe/abi/guc_klvs_abi.h | 322 + drivers/gpu/drm/xe/abi/guc_messages_abi.h | 234 + .../drm/xe/compat-i915-headers/gem/i915_gem_lmem.h | 1 + .../drm/xe/compat-i915-headers/gem/i915_gem_mman.h | 17 + .../xe/compat-i915-headers/gem/i915_gem_object.h | 64 + .../gem/i915_gem_object_frontbuffer.h | 12 + .../gpu/drm/xe/compat-i915-headers/gt/intel_rps.h | 11 + .../gpu/drm/xe/compat-i915-headers/i915_active.h | 22 + .../drm/xe/compat-i915-headers/i915_active_types.h | 13 + .../gpu/drm/xe/compat-i915-headers/i915_config.h | 19 + .../gpu/drm/xe/compat-i915-headers/i915_debugfs.h | 14 + drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h | 233 + .../gpu/drm/xe/compat-i915-headers/i915_fixed.h | 6 + drivers/gpu/drm/xe/compat-i915-headers/i915_gem.h | 9 + .../drm/xe/compat-i915-headers/i915_gem_stolen.h | 79 + .../drm/xe/compat-i915-headers/i915_gpu_error.h | 17 + drivers/gpu/drm/xe/compat-i915-headers/i915_irq.h | 6 + drivers/gpu/drm/xe/compat-i915-headers/i915_reg.h | 6 + .../gpu/drm/xe/compat-i915-headers/i915_reg_defs.h | 6 + .../gpu/drm/xe/compat-i915-headers/i915_trace.h | 6 + .../gpu/drm/xe/compat-i915-headers/i915_utils.h | 6 + drivers/gpu/drm/xe/compat-i915-headers/i915_vgpu.h | 44 + drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h | 34 + .../drm/xe/compat-i915-headers/i915_vma_types.h | 74 + .../xe/compat-i915-headers/intel_clock_gating.h | 6 + .../drm/xe/compat-i915-headers/intel_gt_types.h | 11 + .../drm/xe/compat-i915-headers/intel_mchbar_regs.h | 6 + .../drm/xe/compat-i915-headers/intel_pci_config.h | 6 + .../gpu/drm/xe/compat-i915-headers/intel_pcode.h | 42 + .../drm/xe/compat-i915-headers/intel_runtime_pm.h | 16 + .../gpu/drm/xe/compat-i915-headers/intel_step.h | 20 + .../gpu/drm/xe/compat-i915-headers/intel_uc_fw.h | 11 + .../gpu/drm/xe/compat-i915-headers/intel_uncore.h | 175 + .../gpu/drm/xe/compat-i915-headers/intel_wakeref.h | 8 + .../gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h | 28 + .../drm/xe/compat-i915-headers/soc/intel_dram.h | 6 + .../drm/xe/compat-i915-headers/soc/intel_gmch.h | 6 + .../gpu/drm/xe/compat-i915-headers/soc/intel_pch.h | 6 + .../gpu/drm/xe/compat-i915-headers/vlv_sideband.h | 132 + .../drm/xe/compat-i915-headers/vlv_sideband_reg.h | 6 + drivers/gpu/drm/xe/display/ext/i915_irq.c | 77 + drivers/gpu/drm/xe/display/ext/i915_utils.c | 26 + drivers/gpu/drm/xe/display/intel_fb_bo.c | 78 + drivers/gpu/drm/xe/display/intel_fb_bo.h | 24 + drivers/gpu/drm/xe/display/intel_fbdev_fb.c | 104 + drivers/gpu/drm/xe/display/intel_fbdev_fb.h | 21 + drivers/gpu/drm/xe/display/xe_display_misc.c | 16 + drivers/gpu/drm/xe/display/xe_display_rps.c | 17 + drivers/gpu/drm/xe/display/xe_dsb_buffer.c | 71 + drivers/gpu/drm/xe/display/xe_fb_pin.c | 384 ++ drivers/gpu/drm/xe/display/xe_hdcp_gsc.c | 34 + drivers/gpu/drm/xe/display/xe_plane_initial.c | 291 + .../gpu/drm/xe/instructions/xe_gfxpipe_commands.h | 160 + drivers/gpu/drm/xe/instructions/xe_gsc_commands.h | 36 + drivers/gpu/drm/xe/instructions/xe_instr_defs.h | 33 + drivers/gpu/drm/xe/instructions/xe_mi_commands.h | 61 + drivers/gpu/drm/xe/regs/xe_engine_regs.h | 184 + drivers/gpu/drm/xe/regs/xe_gpu_commands.h | 70 + drivers/gpu/drm/xe/regs/xe_gsc_regs.h | 41 + drivers/gpu/drm/xe/regs/xe_gt_regs.h | 478 ++ drivers/gpu/drm/xe/regs/xe_guc_regs.h | 143 + drivers/gpu/drm/xe/regs/xe_lrc_layout.h | 17 + drivers/gpu/drm/xe/regs/xe_mchbar_regs.h | 44 + drivers/gpu/drm/xe/regs/xe_reg_defs.h | 120 + drivers/gpu/drm/xe/regs/xe_regs.h | 68 + drivers/gpu/drm/xe/regs/xe_sriov_regs.h | 17 + drivers/gpu/drm/xe/tests/Makefile | 10 + drivers/gpu/drm/xe/tests/xe_bo.c | 352 ++ drivers/gpu/drm/xe/tests/xe_bo_test.c | 26 + drivers/gpu/drm/xe/tests/xe_bo_test.h | 14 + drivers/gpu/drm/xe/tests/xe_dma_buf.c | 278 + drivers/gpu/drm/xe/tests/xe_dma_buf_test.c | 25 + drivers/gpu/drm/xe/tests/xe_dma_buf_test.h | 13 + drivers/gpu/drm/xe/tests/xe_lmtt_test.c | 73 + drivers/gpu/drm/xe/tests/xe_migrate.c | 444 ++ drivers/gpu/drm/xe/tests/xe_migrate_test.c | 25 + drivers/gpu/drm/xe/tests/xe_migrate_test.h | 13 + drivers/gpu/drm/xe/tests/xe_mocs.c | 130 + drivers/gpu/drm/xe/tests/xe_mocs_test.c | 25 + drivers/gpu/drm/xe/tests/xe_mocs_test.h | 13 + drivers/gpu/drm/xe/tests/xe_pci.c | 166 + drivers/gpu/drm/xe/tests/xe_pci_test.c | 71 + drivers/gpu/drm/xe/tests/xe_pci_test.h | 36 + drivers/gpu/drm/xe/tests/xe_rtp_test.c | 319 + drivers/gpu/drm/xe/tests/xe_test.h | 67 + drivers/gpu/drm/xe/tests/xe_wa_test.c | 167 + drivers/gpu/drm/xe/xe_assert.h | 174 + drivers/gpu/drm/xe/xe_bb.c | 110 + drivers/gpu/drm/xe/xe_bb.h | 25 + drivers/gpu/drm/xe/xe_bb_types.h | 20 + drivers/gpu/drm/xe/xe_bo.c | 2235 +++++++ drivers/gpu/drm/xe/xe_bo.h | 356 ++ drivers/gpu/drm/xe/xe_bo_doc.h | 179 + drivers/gpu/drm/xe/xe_bo_evict.c | 228 + drivers/gpu/drm/xe/xe_bo_evict.h | 15 + drivers/gpu/drm/xe/xe_bo_types.h | 77 + drivers/gpu/drm/xe/xe_debugfs.c | 148 + drivers/gpu/drm/xe/xe_debugfs.h | 13 + drivers/gpu/drm/xe/xe_devcoredump.c | 196 + drivers/gpu/drm/xe/xe_devcoredump.h | 20 + drivers/gpu/drm/xe/xe_devcoredump_types.h | 55 + drivers/gpu/drm/xe/xe_device.c | 661 +++ drivers/gpu/drm/xe/xe_device.h | 169 + drivers/gpu/drm/xe/xe_device_sysfs.c | 89 + drivers/gpu/drm/xe/xe_device_sysfs.h | 13 + drivers/gpu/drm/xe/xe_device_types.h | 537 ++ drivers/gpu/drm/xe/xe_display.c | 411 ++ drivers/gpu/drm/xe/xe_display.h | 72 + drivers/gpu/drm/xe/xe_dma_buf.c | 322 + drivers/gpu/drm/xe/xe_dma_buf.h | 15 + drivers/gpu/drm/xe/xe_drm_client.c | 196 + drivers/gpu/drm/xe/xe_drm_client.h | 70 + drivers/gpu/drm/xe/xe_drv.h | 23 + drivers/gpu/drm/xe/xe_exec.c | 339 ++ drivers/gpu/drm/xe/xe_exec.h | 14 + drivers/gpu/drm/xe/xe_exec_queue.c | 862 +++ drivers/gpu/drm/xe/xe_exec_queue.h | 69 + drivers/gpu/drm/xe/xe_exec_queue_types.h | 211 + drivers/gpu/drm/xe/xe_execlist.c | 472 ++ drivers/gpu/drm/xe/xe_execlist.h | 21 + drivers/gpu/drm/xe/xe_execlist_types.h | 49 + drivers/gpu/drm/xe/xe_force_wake.c | 199 + drivers/gpu/drm/xe/xe_force_wake.h | 38 + drivers/gpu/drm/xe/xe_force_wake_types.h | 86 + drivers/gpu/drm/xe/xe_gen_wa_oob.c | 165 + drivers/gpu/drm/xe/xe_ggtt.c | 428 ++ drivers/gpu/drm/xe/xe_ggtt.h | 33 + drivers/gpu/drm/xe/xe_ggtt_types.h | 39 + drivers/gpu/drm/xe/xe_gpu_scheduler.c | 101 + drivers/gpu/drm/xe/xe_gpu_scheduler.h | 73 + drivers/gpu/drm/xe/xe_gpu_scheduler_types.h | 57 + drivers/gpu/drm/xe/xe_gsc.c | 438 ++ drivers/gpu/drm/xe/xe_gsc.h | 20 + drivers/gpu/drm/xe/xe_gsc_submit.c | 184 + drivers/gpu/drm/xe/xe_gsc_submit.h | 30 + drivers/gpu/drm/xe/xe_gsc_types.h | 39 + drivers/gpu/drm/xe/xe_gt.c | 783 +++ drivers/gpu/drm/xe/xe_gt.h | 72 + drivers/gpu/drm/xe/xe_gt_ccs_mode.c | 186 + drivers/gpu/drm/xe/xe_gt_ccs_mode.h | 24 + drivers/gpu/drm/xe/xe_gt_clock.c | 85 + drivers/gpu/drm/xe/xe_gt_clock.h | 15 + drivers/gpu/drm/xe/xe_gt_debugfs.c | 249 + drivers/gpu/drm/xe/xe_gt_debugfs.h | 13 + drivers/gpu/drm/xe/xe_gt_freq.c | 222 + drivers/gpu/drm/xe/xe_gt_freq.h | 13 + drivers/gpu/drm/xe/xe_gt_idle.c | 192 + drivers/gpu/drm/xe/xe_gt_idle.h | 17 + drivers/gpu/drm/xe/xe_gt_idle_types.h | 38 + drivers/gpu/drm/xe/xe_gt_mcr.c | 685 +++ drivers/gpu/drm/xe/xe_gt_mcr.h | 29 + drivers/gpu/drm/xe/xe_gt_pagefault.c | 651 ++ drivers/gpu/drm/xe/xe_gt_pagefault.h | 19 + drivers/gpu/drm/xe/xe_gt_printk.h | 46 + drivers/gpu/drm/xe/xe_gt_sysfs.c | 61 + drivers/gpu/drm/xe/xe_gt_sysfs.h | 19 + drivers/gpu/drm/xe/xe_gt_sysfs_types.h | 26 + drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c | 251 + drivers/gpu/drm/xe/xe_gt_throttle_sysfs.h | 16 + drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c | 418 ++ drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h | 26 + drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h | 28 + drivers/gpu/drm/xe/xe_gt_topology.c | 169 + drivers/gpu/drm/xe/xe_gt_topology.h | 25 + drivers/gpu/drm/xe/xe_gt_types.h | 363 ++ drivers/gpu/drm/xe/xe_guc.c | 916 +++ drivers/gpu/drm/xe/xe_guc.h | 72 + drivers/gpu/drm/xe/xe_guc_ads.c | 672 +++ drivers/gpu/drm/xe/xe_guc_ads.h | 17 + drivers/gpu/drm/xe/xe_guc_ads_types.h | 25 + drivers/gpu/drm/xe/xe_guc_ct.c | 1320 +++++ drivers/gpu/drm/xe/xe_guc_ct.h | 59 + drivers/gpu/drm/xe/xe_guc_ct_types.h | 115 + drivers/gpu/drm/xe/xe_guc_debugfs.c | 74 + drivers/gpu/drm/xe/xe_guc_debugfs.h | 14 + drivers/gpu/drm/xe/xe_guc_exec_queue_types.h | 54 + drivers/gpu/drm/xe/xe_guc_fwif.h | 361 ++ drivers/gpu/drm/xe/xe_guc_hwconfig.c | 104 + drivers/gpu/drm/xe/xe_guc_hwconfig.h | 17 + drivers/gpu/drm/xe/xe_guc_log.c | 97 + drivers/gpu/drm/xe/xe_guc_log.h | 48 + drivers/gpu/drm/xe/xe_guc_log_types.h | 23 + drivers/gpu/drm/xe/xe_guc_pc.c | 1002 ++++ drivers/gpu/drm/xe/xe_guc_pc.h | 31 + drivers/gpu/drm/xe/xe_guc_pc_types.h | 34 + drivers/gpu/drm/xe/xe_guc_submit.c | 1987 +++++++ drivers/gpu/drm/xe/xe_guc_submit.h | 38 + drivers/gpu/drm/xe/xe_guc_submit_types.h | 155 + drivers/gpu/drm/xe/xe_guc_types.h | 81 + drivers/gpu/drm/xe/xe_heci_gsc.c | 234 + drivers/gpu/drm/xe/xe_heci_gsc.h | 35 + drivers/gpu/drm/xe/xe_huc.c | 300 + drivers/gpu/drm/xe/xe_huc.h | 26 + drivers/gpu/drm/xe/xe_huc_debugfs.c | 70 + drivers/gpu/drm/xe/xe_huc_debugfs.h | 14 + drivers/gpu/drm/xe/xe_huc_types.h | 24 + drivers/gpu/drm/xe/xe_hw_engine.c | 883 +++ drivers/gpu/drm/xe/xe_hw_engine.h | 70 + drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c | 675 +++ drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.h | 36 + drivers/gpu/drm/xe/xe_hw_engine_types.h | 225 + drivers/gpu/drm/xe/xe_hw_fence.c | 230 + drivers/gpu/drm/xe/xe_hw_fence.h | 30 + drivers/gpu/drm/xe/xe_hw_fence_types.h | 72 + drivers/gpu/drm/xe/xe_hwmon.c | 776 +++ drivers/gpu/drm/xe/xe_hwmon.h | 19 + drivers/gpu/drm/xe/xe_irq.c | 666 +++ drivers/gpu/drm/xe/xe_irq.h | 19 + drivers/gpu/drm/xe/xe_lmtt.c | 506 ++ drivers/gpu/drm/xe/xe_lmtt.h | 27 + drivers/gpu/drm/xe/xe_lmtt_2l.c | 150 + drivers/gpu/drm/xe/xe_lmtt_ml.c | 161 + drivers/gpu/drm/xe/xe_lmtt_types.h | 63 + drivers/gpu/drm/xe/xe_lrc.c | 1264 ++++ drivers/gpu/drm/xe/xe_lrc.h | 58 + drivers/gpu/drm/xe/xe_lrc_types.h | 46 + drivers/gpu/drm/xe/xe_macros.h | 18 + drivers/gpu/drm/xe/xe_map.h | 93 + drivers/gpu/drm/xe/xe_migrate.c | 1455 +++++ drivers/gpu/drm/xe/xe_migrate.h | 110 + drivers/gpu/drm/xe/xe_migrate_doc.h | 88 + drivers/gpu/drm/xe/xe_mmio.c | 524 ++ drivers/gpu/drm/xe/xe_mmio.h | 107 + drivers/gpu/drm/xe/xe_mocs.c | 580 ++ drivers/gpu/drm/xe/xe_mocs.h | 17 + drivers/gpu/drm/xe/xe_module.c | 101 + drivers/gpu/drm/xe/xe_module.h | 26 + drivers/gpu/drm/xe/xe_pat.c | 459 ++ drivers/gpu/drm/xe/xe_pat.h | 61 + drivers/gpu/drm/xe/xe_pci.c | 951 +++ drivers/gpu/drm/xe/xe_pci.h | 12 + drivers/gpu/drm/xe/xe_pci_types.h | 46 + drivers/gpu/drm/xe/xe_pcode.c | 296 + drivers/gpu/drm/xe/xe_pcode.h | 30 + drivers/gpu/drm/xe/xe_pcode_api.h | 49 + drivers/gpu/drm/xe/xe_platform_types.h | 37 + drivers/gpu/drm/xe/xe_pm.c | 405 ++ drivers/gpu/drm/xe/xe_pm.h | 35 + drivers/gpu/drm/xe/xe_preempt_fence.c | 158 + drivers/gpu/drm/xe/xe_preempt_fence.h | 61 + drivers/gpu/drm/xe/xe_preempt_fence_types.h | 32 + drivers/gpu/drm/xe/xe_pt.c | 1671 ++++++ drivers/gpu/drm/xe/xe_pt.h | 48 + drivers/gpu/drm/xe/xe_pt_types.h | 77 + drivers/gpu/drm/xe/xe_pt_walk.c | 160 + drivers/gpu/drm/xe/xe_pt_walk.h | 148 + drivers/gpu/drm/xe/xe_query.c | 552 ++ drivers/gpu/drm/xe/xe_query.h | 14 + drivers/gpu/drm/xe/xe_range_fence.c | 161 + drivers/gpu/drm/xe/xe_range_fence.h | 75 + drivers/gpu/drm/xe/xe_reg_sr.c | 284 + drivers/gpu/drm/xe/xe_reg_sr.h | 28 + drivers/gpu/drm/xe/xe_reg_sr_types.h | 37 + drivers/gpu/drm/xe/xe_reg_whitelist.c | 146 + drivers/gpu/drm/xe/xe_reg_whitelist.h | 23 + drivers/gpu/drm/xe/xe_res_cursor.h | 240 + drivers/gpu/drm/xe/xe_ring_ops.c | 479 ++ drivers/gpu/drm/xe/xe_ring_ops.h | 17 + drivers/gpu/drm/xe/xe_ring_ops_types.h | 22 + drivers/gpu/drm/xe/xe_rtp.c | 325 + drivers/gpu/drm/xe/xe_rtp.h | 430 ++ drivers/gpu/drm/xe/xe_rtp_helpers.h | 81 + drivers/gpu/drm/xe/xe_rtp_types.h | 124 + drivers/gpu/drm/xe/xe_sa.c | 106 + drivers/gpu/drm/xe/xe_sa.h | 40 + drivers/gpu/drm/xe/xe_sa_types.h | 19 + drivers/gpu/drm/xe/xe_sched_job.c | 289 + drivers/gpu/drm/xe/xe_sched_job.h | 80 + drivers/gpu/drm/xe/xe_sched_job_types.h | 48 + drivers/gpu/drm/xe/xe_sriov.c | 55 + drivers/gpu/drm/xe/xe_sriov.h | 42 + drivers/gpu/drm/xe/xe_sriov_printk.h | 46 + drivers/gpu/drm/xe/xe_sriov_types.h | 28 + drivers/gpu/drm/xe/xe_step.c | 264 + drivers/gpu/drm/xe/xe_step.h | 23 + drivers/gpu/drm/xe/xe_step_types.h | 50 + drivers/gpu/drm/xe/xe_sync.c | 380 ++ drivers/gpu/drm/xe/xe_sync.h | 45 + drivers/gpu/drm/xe/xe_sync_types.h | 28 + drivers/gpu/drm/xe/xe_tile.c | 186 + drivers/gpu/drm/xe/xe_tile.h | 18 + drivers/gpu/drm/xe/xe_tile_sysfs.c | 57 + drivers/gpu/drm/xe/xe_tile_sysfs.h | 19 + drivers/gpu/drm/xe/xe_tile_sysfs_types.h | 27 + drivers/gpu/drm/xe/xe_trace.c | 9 + drivers/gpu/drm/xe/xe_trace.h | 631 ++ drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c | 334 ++ drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h | 21 + drivers/gpu/drm/xe/xe_ttm_sys_mgr.c | 118 + drivers/gpu/drm/xe/xe_ttm_sys_mgr.h | 13 + drivers/gpu/drm/xe/xe_ttm_vram_mgr.c | 480 ++ drivers/gpu/drm/xe/xe_ttm_vram_mgr.h | 44 + drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h | 52 + drivers/gpu/drm/xe/xe_tuning.c | 121 + drivers/gpu/drm/xe/xe_tuning.h | 16 + drivers/gpu/drm/xe/xe_uc.c | 258 + drivers/gpu/drm/xe/xe_uc.h | 24 + drivers/gpu/drm/xe/xe_uc_debugfs.c | 26 + drivers/gpu/drm/xe/xe_uc_debugfs.h | 14 + drivers/gpu/drm/xe/xe_uc_fw.c | 882 +++ drivers/gpu/drm/xe/xe_uc_fw.h | 184 + drivers/gpu/drm/xe/xe_uc_fw_abi.h | 321 + drivers/gpu/drm/xe/xe_uc_fw_types.h | 146 + drivers/gpu/drm/xe/xe_uc_types.h | 28 + drivers/gpu/drm/xe/xe_vm.c | 3286 +++++++++++ drivers/gpu/drm/xe/xe_vm.h | 275 + drivers/gpu/drm/xe/xe_vm_doc.h | 555 ++ drivers/gpu/drm/xe/xe_vm_types.h | 373 ++ drivers/gpu/drm/xe/xe_wa.c | 895 +++ drivers/gpu/drm/xe/xe_wa.h | 32 + drivers/gpu/drm/xe/xe_wa_oob.rules | 24 + drivers/gpu/drm/xe/xe_wait_user_fence.c | 179 + drivers/gpu/drm/xe/xe_wait_user_fence.h | 15 + drivers/gpu/drm/xe/xe_wopcm.c | 270 + drivers/gpu/drm/xe/xe_wopcm.h | 16 + drivers/gpu/drm/xe/xe_wopcm_types.h | 26 + drivers/gpu/drm/xlnx/zynqmp_kms.c | 1 - 1495 files changed, 173114 insertions(+), 70542 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h create mode 100644 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c create mode 100644 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h create mode 100644 drivers/gpu/drm/amd/display/dc/core/dc_state.c create mode 100644 drivers/gpu/drm/amd/display/dc/dc_plane.h create mode 100644 drivers/gpu/drm/amd/display/dc/dc_plane_priv.h create mode 100644 drivers/gpu/drm/amd/display/dc/dc_state.h create mode 100644 drivers/gpu/drm/amd/display/dc/dc_state_priv.h create mode 100644 drivers/gpu/drm/amd/display/dc/dc_stream_priv.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dce100/Makefile delete mode 100644 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn302/Makefile delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn315/Makefile delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn316/Makefile delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.h create mode 100644 drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c create mode 100644 drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h create mode 100644 drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c create mode 100644 drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.h create mode 100644 drivers/gpu/drm/amd/display/dc/dsc/dsc.h create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.h create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.h create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.h create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.h create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.h create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.c create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.h create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.c create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.h create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.h create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.h create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.h create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.h create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h delete mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h create mode 100644 drivers/gpu/drm/amd/display/dc/optc/Makefile create mode 100644 drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c create mode 100644 drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h create mode 100644 drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c create mode 100644 drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h create mode 100644 drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c create mode 100644 drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.h create mode 100644 drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c create mode 100644 drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h create mode 100644 drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c create mode 100644 drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.h create mode 100644 drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c create mode 100644 drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h create mode 100644 drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c create mode 100644 drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h create mode 100644 drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c create mode 100644 drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h create mode 100644 drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c create mode 100644 drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h create mode 100644 drivers/gpu/drm/amd/display/dc/resource/Makefile create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.h create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.h create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dce80/CMakeLists.txt create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.h create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.h create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.h create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.h create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.h create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.h create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.h create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.h create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c create mode 100644 drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h create mode 100644 drivers/gpu/drm/amd/include/amdgpu_reg_state.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_sh_mask.h create mode 100644 drivers/gpu/drm/bridge/aux-bridge.c create mode 100644 drivers/gpu/drm/bridge/aux-hpd-bridge.c delete mode 100644 drivers/gpu/drm/drm_agpsupport.c delete mode 100644 drivers/gpu/drm/drm_bufs.c delete mode 100644 drivers/gpu/drm/drm_context.c delete mode 100644 drivers/gpu/drm/drm_dma.c create mode 100644 drivers/gpu/drm/drm_eld.c delete mode 100644 drivers/gpu/drm/drm_hashtab.c delete mode 100644 drivers/gpu/drm/drm_irq.c delete mode 100644 drivers/gpu/drm/drm_legacy.h delete mode 100644 drivers/gpu/drm/drm_legacy_misc.c delete mode 100644 drivers/gpu/drm/drm_lock.c delete mode 100644 drivers/gpu/drm/drm_memory.c delete mode 100644 drivers/gpu/drm/drm_scatter.c delete mode 100644 drivers/gpu/drm/drm_vm.c delete mode 100644 drivers/gpu/drm/gma500/psb_lid.c create mode 100644 drivers/gpu/drm/i915/display/intel_display_debugfs_params.c create mode 100644 drivers/gpu/drm/i915/display/intel_display_debugfs_params.h create mode 100644 drivers/gpu/drm/i915/display/intel_display_params.c create mode 100644 drivers/gpu/drm/i915/display/intel_display_params.h create mode 100644 drivers/gpu/drm/i915/display/intel_dpt_common.c create mode 100644 drivers/gpu/drm/i915/display/intel_dpt_common.h create mode 100644 drivers/gpu/drm/i915/display/intel_dsb_buffer.c create mode 100644 drivers/gpu/drm/i915/display/intel_dsb_buffer.h create mode 100644 drivers/gpu/drm/i915/display/intel_fb_bo.c create mode 100644 drivers/gpu/drm/i915/display/intel_fb_bo.h create mode 100644 drivers/gpu/drm/i915/display/intel_fbdev_fb.c create mode 100644 drivers/gpu/drm/i915/display/intel_fbdev_fb.h create mode 100644 drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c create mode 100644 drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h create mode 100644 drivers/gpu/drm/imagination/Kconfig create mode 100644 drivers/gpu/drm/imagination/Makefile create mode 100644 drivers/gpu/drm/imagination/pvr_ccb.c create mode 100644 drivers/gpu/drm/imagination/pvr_ccb.h create mode 100644 drivers/gpu/drm/imagination/pvr_cccb.c create mode 100644 drivers/gpu/drm/imagination/pvr_cccb.h create mode 100644 drivers/gpu/drm/imagination/pvr_context.c create mode 100644 drivers/gpu/drm/imagination/pvr_context.h create mode 100644 drivers/gpu/drm/imagination/pvr_debugfs.c create mode 100644 drivers/gpu/drm/imagination/pvr_debugfs.h create mode 100644 drivers/gpu/drm/imagination/pvr_device.c create mode 100644 drivers/gpu/drm/imagination/pvr_device.h create mode 100644 drivers/gpu/drm/imagination/pvr_device_info.c create mode 100644 drivers/gpu/drm/imagination/pvr_device_info.h create mode 100644 drivers/gpu/drm/imagination/pvr_drv.c create mode 100644 drivers/gpu/drm/imagination/pvr_drv.h create mode 100644 drivers/gpu/drm/imagination/pvr_free_list.c create mode 100644 drivers/gpu/drm/imagination/pvr_free_list.h create mode 100644 drivers/gpu/drm/imagination/pvr_fw.c create mode 100644 drivers/gpu/drm/imagination/pvr_fw.h create mode 100644 drivers/gpu/drm/imagination/pvr_fw_info.h create mode 100644 drivers/gpu/drm/imagination/pvr_fw_meta.c create mode 100644 drivers/gpu/drm/imagination/pvr_fw_meta.h create mode 100644 drivers/gpu/drm/imagination/pvr_fw_mips.c create mode 100644 drivers/gpu/drm/imagination/pvr_fw_mips.h create mode 100644 drivers/gpu/drm/imagination/pvr_fw_startstop.c create mode 100644 drivers/gpu/drm/imagination/pvr_fw_startstop.h create mode 100644 drivers/gpu/drm/imagination/pvr_fw_trace.c create mode 100644 drivers/gpu/drm/imagination/pvr_fw_trace.h create mode 100644 drivers/gpu/drm/imagination/pvr_gem.c create mode 100644 drivers/gpu/drm/imagination/pvr_gem.h create mode 100644 drivers/gpu/drm/imagination/pvr_hwrt.c create mode 100644 drivers/gpu/drm/imagination/pvr_hwrt.h create mode 100644 drivers/gpu/drm/imagination/pvr_job.c create mode 100644 drivers/gpu/drm/imagination/pvr_job.h create mode 100644 drivers/gpu/drm/imagination/pvr_mmu.c create mode 100644 drivers/gpu/drm/imagination/pvr_mmu.h create mode 100644 drivers/gpu/drm/imagination/pvr_params.c create mode 100644 drivers/gpu/drm/imagination/pvr_params.h create mode 100644 drivers/gpu/drm/imagination/pvr_power.c create mode 100644 drivers/gpu/drm/imagination/pvr_power.h create mode 100644 drivers/gpu/drm/imagination/pvr_queue.c create mode 100644 drivers/gpu/drm/imagination/pvr_queue.h create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_cr_defs_client.h create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_defs.h create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_fwif.h create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_fwif_check.h create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_fwif_client.h create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_fwif_client_check.h create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_fwif_common.h create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_fwif_dev_info.h create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_fwif_resetframework.h create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_fwif_shared.h create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_fwif_shared_check.h create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_fwif_stream.h create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_heap_config.h create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_meta.h create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_mips.h create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_mips_check.h create mode 100644 drivers/gpu/drm/imagination/pvr_rogue_mmu_defs.h create mode 100644 drivers/gpu/drm/imagination/pvr_stream.c create mode 100644 drivers/gpu/drm/imagination/pvr_stream.h create mode 100644 drivers/gpu/drm/imagination/pvr_stream_defs.c create mode 100644 drivers/gpu/drm/imagination/pvr_stream_defs.h create mode 100644 drivers/gpu/drm/imagination/pvr_sync.c create mode 100644 drivers/gpu/drm/imagination/pvr_sync.h create mode 100644 drivers/gpu/drm/imagination/pvr_vm.c create mode 100644 drivers/gpu/drm/imagination/pvr_vm.h create mode 100644 drivers/gpu/drm/imagination/pvr_vm_mips.c create mode 100644 drivers/gpu/drm/imagination/pvr_vm_mips.h create mode 100644 drivers/gpu/drm/mediatek/mtk_padding.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h create mode 100644 drivers/gpu/drm/panel/panel-ilitek-ili9805.c create mode 100644 drivers/gpu/drm/panel/panel-synaptics-r63353.c create mode 100644 drivers/gpu/drm/tests/drm_gem_shmem_test.c create mode 100644 drivers/gpu/drm/v3d/v3d_submit.c create mode 100644 drivers/gpu/drm/v3d/v3d_sysfs.c create mode 100644 drivers/gpu/drm/xe/.gitignore create mode 100644 drivers/gpu/drm/xe/.kunitconfig create mode 100644 drivers/gpu/drm/xe/Kconfig create mode 100644 drivers/gpu/drm/xe/Kconfig.debug create mode 100644 drivers/gpu/drm/xe/Kconfig.profile create mode 100644 drivers/gpu/drm/xe/Makefile create mode 100644 drivers/gpu/drm/xe/abi/gsc_command_header_abi.h create mode 100644 drivers/gpu/drm/xe/abi/gsc_mkhi_commands_abi.h create mode 100644 drivers/gpu/drm/xe/abi/gsc_pxp_commands_abi.h create mode 100644 drivers/gpu/drm/xe/abi/guc_actions_abi.h create mode 100644 drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h create mode 100644 drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h create mode 100644 drivers/gpu/drm/xe/abi/guc_communication_mmio_abi.h create mode 100644 drivers/gpu/drm/xe/abi/guc_errors_abi.h create mode 100644 drivers/gpu/drm/xe/abi/guc_klvs_abi.h create mode 100644 drivers/gpu/drm/xe/abi/guc_messages_abi.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_lmem.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_mman.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_frontbuffer.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/i915_active.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/i915_active_types.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/i915_config.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/i915_debugfs.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/i915_fixed.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/i915_gem.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/i915_gem_stolen.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/i915_gpu_error.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/i915_irq.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/i915_reg.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/i915_reg_defs.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/i915_trace.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/i915_vgpu.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/i915_vma_types.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/intel_clock_gating.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/intel_gt_types.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/intel_mchbar_regs.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/intel_pci_config.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/intel_step.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/intel_uc_fw.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/soc/intel_dram.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/soc/intel_gmch.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband.h create mode 100644 drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband_reg.h create mode 100644 drivers/gpu/drm/xe/display/ext/i915_irq.c create mode 100644 drivers/gpu/drm/xe/display/ext/i915_utils.c create mode 100644 drivers/gpu/drm/xe/display/intel_fb_bo.c create mode 100644 drivers/gpu/drm/xe/display/intel_fb_bo.h create mode 100644 drivers/gpu/drm/xe/display/intel_fbdev_fb.c create mode 100644 drivers/gpu/drm/xe/display/intel_fbdev_fb.h create mode 100644 drivers/gpu/drm/xe/display/xe_display_misc.c create mode 100644 drivers/gpu/drm/xe/display/xe_display_rps.c create mode 100644 drivers/gpu/drm/xe/display/xe_dsb_buffer.c create mode 100644 drivers/gpu/drm/xe/display/xe_fb_pin.c create mode 100644 drivers/gpu/drm/xe/display/xe_hdcp_gsc.c create mode 100644 drivers/gpu/drm/xe/display/xe_plane_initial.c create mode 100644 drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h create mode 100644 drivers/gpu/drm/xe/instructions/xe_gsc_commands.h create mode 100644 drivers/gpu/drm/xe/instructions/xe_instr_defs.h create mode 100644 drivers/gpu/drm/xe/instructions/xe_mi_commands.h create mode 100644 drivers/gpu/drm/xe/regs/xe_engine_regs.h create mode 100644 drivers/gpu/drm/xe/regs/xe_gpu_commands.h create mode 100644 drivers/gpu/drm/xe/regs/xe_gsc_regs.h create mode 100644 drivers/gpu/drm/xe/regs/xe_gt_regs.h create mode 100644 drivers/gpu/drm/xe/regs/xe_guc_regs.h create mode 100644 drivers/gpu/drm/xe/regs/xe_lrc_layout.h create mode 100644 drivers/gpu/drm/xe/regs/xe_mchbar_regs.h create mode 100644 drivers/gpu/drm/xe/regs/xe_reg_defs.h create mode 100644 drivers/gpu/drm/xe/regs/xe_regs.h create mode 100644 drivers/gpu/drm/xe/regs/xe_sriov_regs.h create mode 100644 drivers/gpu/drm/xe/tests/Makefile create mode 100644 drivers/gpu/drm/xe/tests/xe_bo.c create mode 100644 drivers/gpu/drm/xe/tests/xe_bo_test.c create mode 100644 drivers/gpu/drm/xe/tests/xe_bo_test.h create mode 100644 drivers/gpu/drm/xe/tests/xe_dma_buf.c create mode 100644 drivers/gpu/drm/xe/tests/xe_dma_buf_test.c create mode 100644 drivers/gpu/drm/xe/tests/xe_dma_buf_test.h create mode 100644 drivers/gpu/drm/xe/tests/xe_lmtt_test.c create mode 100644 drivers/gpu/drm/xe/tests/xe_migrate.c create mode 100644 drivers/gpu/drm/xe/tests/xe_migrate_test.c create mode 100644 drivers/gpu/drm/xe/tests/xe_migrate_test.h create mode 100644 drivers/gpu/drm/xe/tests/xe_mocs.c create mode 100644 drivers/gpu/drm/xe/tests/xe_mocs_test.c create mode 100644 drivers/gpu/drm/xe/tests/xe_mocs_test.h create mode 100644 drivers/gpu/drm/xe/tests/xe_pci.c create mode 100644 drivers/gpu/drm/xe/tests/xe_pci_test.c create mode 100644 drivers/gpu/drm/xe/tests/xe_pci_test.h create mode 100644 drivers/gpu/drm/xe/tests/xe_rtp_test.c create mode 100644 drivers/gpu/drm/xe/tests/xe_test.h create mode 100644 drivers/gpu/drm/xe/tests/xe_wa_test.c create mode 100644 drivers/gpu/drm/xe/xe_assert.h create mode 100644 drivers/gpu/drm/xe/xe_bb.c create mode 100644 drivers/gpu/drm/xe/xe_bb.h create mode 100644 drivers/gpu/drm/xe/xe_bb_types.h create mode 100644 drivers/gpu/drm/xe/xe_bo.c create mode 100644 drivers/gpu/drm/xe/xe_bo.h create mode 100644 drivers/gpu/drm/xe/xe_bo_doc.h create mode 100644 drivers/gpu/drm/xe/xe_bo_evict.c create mode 100644 drivers/gpu/drm/xe/xe_bo_evict.h create mode 100644 drivers/gpu/drm/xe/xe_bo_types.h create mode 100644 drivers/gpu/drm/xe/xe_debugfs.c create mode 100644 drivers/gpu/drm/xe/xe_debugfs.h create mode 100644 drivers/gpu/drm/xe/xe_devcoredump.c create mode 100644 drivers/gpu/drm/xe/xe_devcoredump.h create mode 100644 drivers/gpu/drm/xe/xe_devcoredump_types.h create mode 100644 drivers/gpu/drm/xe/xe_device.c create mode 100644 drivers/gpu/drm/xe/xe_device.h create mode 100644 drivers/gpu/drm/xe/xe_device_sysfs.c create mode 100644 drivers/gpu/drm/xe/xe_device_sysfs.h create mode 100644 drivers/gpu/drm/xe/xe_device_types.h create mode 100644 drivers/gpu/drm/xe/xe_display.c create mode 100644 drivers/gpu/drm/xe/xe_display.h create mode 100644 drivers/gpu/drm/xe/xe_dma_buf.c create mode 100644 drivers/gpu/drm/xe/xe_dma_buf.h create mode 100644 drivers/gpu/drm/xe/xe_drm_client.c create mode 100644 drivers/gpu/drm/xe/xe_drm_client.h create mode 100644 drivers/gpu/drm/xe/xe_drv.h create mode 100644 drivers/gpu/drm/xe/xe_exec.c create mode 100644 drivers/gpu/drm/xe/xe_exec.h create mode 100644 drivers/gpu/drm/xe/xe_exec_queue.c create mode 100644 drivers/gpu/drm/xe/xe_exec_queue.h create mode 100644 drivers/gpu/drm/xe/xe_exec_queue_types.h create mode 100644 drivers/gpu/drm/xe/xe_execlist.c create mode 100644 drivers/gpu/drm/xe/xe_execlist.h create mode 100644 drivers/gpu/drm/xe/xe_execlist_types.h create mode 100644 drivers/gpu/drm/xe/xe_force_wake.c create mode 100644 drivers/gpu/drm/xe/xe_force_wake.h create mode 100644 drivers/gpu/drm/xe/xe_force_wake_types.h create mode 100644 drivers/gpu/drm/xe/xe_gen_wa_oob.c create mode 100644 drivers/gpu/drm/xe/xe_ggtt.c create mode 100644 drivers/gpu/drm/xe/xe_ggtt.h create mode 100644 drivers/gpu/drm/xe/xe_ggtt_types.h create mode 100644 drivers/gpu/drm/xe/xe_gpu_scheduler.c create mode 100644 drivers/gpu/drm/xe/xe_gpu_scheduler.h create mode 100644 drivers/gpu/drm/xe/xe_gpu_scheduler_types.h create mode 100644 drivers/gpu/drm/xe/xe_gsc.c create mode 100644 drivers/gpu/drm/xe/xe_gsc.h create mode 100644 drivers/gpu/drm/xe/xe_gsc_submit.c create mode 100644 drivers/gpu/drm/xe/xe_gsc_submit.h create mode 100644 drivers/gpu/drm/xe/xe_gsc_types.h create mode 100644 drivers/gpu/drm/xe/xe_gt.c create mode 100644 drivers/gpu/drm/xe/xe_gt.h create mode 100644 drivers/gpu/drm/xe/xe_gt_ccs_mode.c create mode 100644 drivers/gpu/drm/xe/xe_gt_ccs_mode.h create mode 100644 drivers/gpu/drm/xe/xe_gt_clock.c create mode 100644 drivers/gpu/drm/xe/xe_gt_clock.h create mode 100644 drivers/gpu/drm/xe/xe_gt_debugfs.c create mode 100644 drivers/gpu/drm/xe/xe_gt_debugfs.h create mode 100644 drivers/gpu/drm/xe/xe_gt_freq.c create mode 100644 drivers/gpu/drm/xe/xe_gt_freq.h create mode 100644 drivers/gpu/drm/xe/xe_gt_idle.c create mode 100644 drivers/gpu/drm/xe/xe_gt_idle.h create mode 100644 drivers/gpu/drm/xe/xe_gt_idle_types.h create mode 100644 drivers/gpu/drm/xe/xe_gt_mcr.c create mode 100644 drivers/gpu/drm/xe/xe_gt_mcr.h create mode 100644 drivers/gpu/drm/xe/xe_gt_pagefault.c create mode 100644 drivers/gpu/drm/xe/xe_gt_pagefault.h create mode 100644 drivers/gpu/drm/xe/xe_gt_printk.h create mode 100644 drivers/gpu/drm/xe/xe_gt_sysfs.c create mode 100644 drivers/gpu/drm/xe/xe_gt_sysfs.h create mode 100644 drivers/gpu/drm/xe/xe_gt_sysfs_types.h create mode 100644 drivers/gpu/drm/xe/xe_gt_throttle_sysfs.c create mode 100644 drivers/gpu/drm/xe/xe_gt_throttle_sysfs.h create mode 100644 drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c create mode 100644 drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h create mode 100644 drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h create mode 100644 drivers/gpu/drm/xe/xe_gt_topology.c create mode 100644 drivers/gpu/drm/xe/xe_gt_topology.h create mode 100644 drivers/gpu/drm/xe/xe_gt_types.h create mode 100644 drivers/gpu/drm/xe/xe_guc.c create mode 100644 drivers/gpu/drm/xe/xe_guc.h create mode 100644 drivers/gpu/drm/xe/xe_guc_ads.c create mode 100644 drivers/gpu/drm/xe/xe_guc_ads.h create mode 100644 drivers/gpu/drm/xe/xe_guc_ads_types.h create mode 100644 drivers/gpu/drm/xe/xe_guc_ct.c create mode 100644 drivers/gpu/drm/xe/xe_guc_ct.h create mode 100644 drivers/gpu/drm/xe/xe_guc_ct_types.h create mode 100644 drivers/gpu/drm/xe/xe_guc_debugfs.c create mode 100644 drivers/gpu/drm/xe/xe_guc_debugfs.h create mode 100644 drivers/gpu/drm/xe/xe_guc_exec_queue_types.h create mode 100644 drivers/gpu/drm/xe/xe_guc_fwif.h create mode 100644 drivers/gpu/drm/xe/xe_guc_hwconfig.c create mode 100644 drivers/gpu/drm/xe/xe_guc_hwconfig.h create mode 100644 drivers/gpu/drm/xe/xe_guc_log.c create mode 100644 drivers/gpu/drm/xe/xe_guc_log.h create mode 100644 drivers/gpu/drm/xe/xe_guc_log_types.h create mode 100644 drivers/gpu/drm/xe/xe_guc_pc.c create mode 100644 drivers/gpu/drm/xe/xe_guc_pc.h create mode 100644 drivers/gpu/drm/xe/xe_guc_pc_types.h create mode 100644 drivers/gpu/drm/xe/xe_guc_submit.c create mode 100644 drivers/gpu/drm/xe/xe_guc_submit.h create mode 100644 drivers/gpu/drm/xe/xe_guc_submit_types.h create mode 100644 drivers/gpu/drm/xe/xe_guc_types.h create mode 100644 drivers/gpu/drm/xe/xe_heci_gsc.c create mode 100644 drivers/gpu/drm/xe/xe_heci_gsc.h create mode 100644 drivers/gpu/drm/xe/xe_huc.c create mode 100644 drivers/gpu/drm/xe/xe_huc.h create mode 100644 drivers/gpu/drm/xe/xe_huc_debugfs.c create mode 100644 drivers/gpu/drm/xe/xe_huc_debugfs.h create mode 100644 drivers/gpu/drm/xe/xe_huc_types.h create mode 100644 drivers/gpu/drm/xe/xe_hw_engine.c create mode 100644 drivers/gpu/drm/xe/xe_hw_engine.h create mode 100644 drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c create mode 100644 drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.h create mode 100644 drivers/gpu/drm/xe/xe_hw_engine_types.h create mode 100644 drivers/gpu/drm/xe/xe_hw_fence.c create mode 100644 drivers/gpu/drm/xe/xe_hw_fence.h create mode 100644 drivers/gpu/drm/xe/xe_hw_fence_types.h create mode 100644 drivers/gpu/drm/xe/xe_hwmon.c create mode 100644 drivers/gpu/drm/xe/xe_hwmon.h create mode 100644 drivers/gpu/drm/xe/xe_irq.c create mode 100644 drivers/gpu/drm/xe/xe_irq.h create mode 100644 drivers/gpu/drm/xe/xe_lmtt.c create mode 100644 drivers/gpu/drm/xe/xe_lmtt.h create mode 100644 drivers/gpu/drm/xe/xe_lmtt_2l.c create mode 100644 drivers/gpu/drm/xe/xe_lmtt_ml.c create mode 100644 drivers/gpu/drm/xe/xe_lmtt_types.h create mode 100644 drivers/gpu/drm/xe/xe_lrc.c create mode 100644 drivers/gpu/drm/xe/xe_lrc.h create mode 100644 drivers/gpu/drm/xe/xe_lrc_types.h create mode 100644 drivers/gpu/drm/xe/xe_macros.h create mode 100644 drivers/gpu/drm/xe/xe_map.h create mode 100644 drivers/gpu/drm/xe/xe_migrate.c create mode 100644 drivers/gpu/drm/xe/xe_migrate.h create mode 100644 drivers/gpu/drm/xe/xe_migrate_doc.h create mode 100644 drivers/gpu/drm/xe/xe_mmio.c create mode 100644 drivers/gpu/drm/xe/xe_mmio.h create mode 100644 drivers/gpu/drm/xe/xe_mocs.c create mode 100644 drivers/gpu/drm/xe/xe_mocs.h create mode 100644 drivers/gpu/drm/xe/xe_module.c create mode 100644 drivers/gpu/drm/xe/xe_module.h create mode 100644 drivers/gpu/drm/xe/xe_pat.c create mode 100644 drivers/gpu/drm/xe/xe_pat.h create mode 100644 drivers/gpu/drm/xe/xe_pci.c create mode 100644 drivers/gpu/drm/xe/xe_pci.h create mode 100644 drivers/gpu/drm/xe/xe_pci_types.h create mode 100644 drivers/gpu/drm/xe/xe_pcode.c create mode 100644 drivers/gpu/drm/xe/xe_pcode.h create mode 100644 drivers/gpu/drm/xe/xe_pcode_api.h create mode 100644 drivers/gpu/drm/xe/xe_platform_types.h create mode 100644 drivers/gpu/drm/xe/xe_pm.c create mode 100644 drivers/gpu/drm/xe/xe_pm.h create mode 100644 drivers/gpu/drm/xe/xe_preempt_fence.c create mode 100644 drivers/gpu/drm/xe/xe_preempt_fence.h create mode 100644 drivers/gpu/drm/xe/xe_preempt_fence_types.h create mode 100644 drivers/gpu/drm/xe/xe_pt.c create mode 100644 drivers/gpu/drm/xe/xe_pt.h create mode 100644 drivers/gpu/drm/xe/xe_pt_types.h create mode 100644 drivers/gpu/drm/xe/xe_pt_walk.c create mode 100644 drivers/gpu/drm/xe/xe_pt_walk.h create mode 100644 drivers/gpu/drm/xe/xe_query.c create mode 100644 drivers/gpu/drm/xe/xe_query.h create mode 100644 drivers/gpu/drm/xe/xe_range_fence.c create mode 100644 drivers/gpu/drm/xe/xe_range_fence.h create mode 100644 drivers/gpu/drm/xe/xe_reg_sr.c create mode 100644 drivers/gpu/drm/xe/xe_reg_sr.h create mode 100644 drivers/gpu/drm/xe/xe_reg_sr_types.h create mode 100644 drivers/gpu/drm/xe/xe_reg_whitelist.c create mode 100644 drivers/gpu/drm/xe/xe_reg_whitelist.h create mode 100644 drivers/gpu/drm/xe/xe_res_cursor.h create mode 100644 drivers/gpu/drm/xe/xe_ring_ops.c create mode 100644 drivers/gpu/drm/xe/xe_ring_ops.h create mode 100644 drivers/gpu/drm/xe/xe_ring_ops_types.h create mode 100644 drivers/gpu/drm/xe/xe_rtp.c create mode 100644 drivers/gpu/drm/xe/xe_rtp.h create mode 100644 drivers/gpu/drm/xe/xe_rtp_helpers.h create mode 100644 drivers/gpu/drm/xe/xe_rtp_types.h create mode 100644 drivers/gpu/drm/xe/xe_sa.c create mode 100644 drivers/gpu/drm/xe/xe_sa.h create mode 100644 drivers/gpu/drm/xe/xe_sa_types.h create mode 100644 drivers/gpu/drm/xe/xe_sched_job.c create mode 100644 drivers/gpu/drm/xe/xe_sched_job.h create mode 100644 drivers/gpu/drm/xe/xe_sched_job_types.h create mode 100644 drivers/gpu/drm/xe/xe_sriov.c create mode 100644 drivers/gpu/drm/xe/xe_sriov.h create mode 100644 drivers/gpu/drm/xe/xe_sriov_printk.h create mode 100644 drivers/gpu/drm/xe/xe_sriov_types.h create mode 100644 drivers/gpu/drm/xe/xe_step.c create mode 100644 drivers/gpu/drm/xe/xe_step.h create mode 100644 drivers/gpu/drm/xe/xe_step_types.h create mode 100644 drivers/gpu/drm/xe/xe_sync.c create mode 100644 drivers/gpu/drm/xe/xe_sync.h create mode 100644 drivers/gpu/drm/xe/xe_sync_types.h create mode 100644 drivers/gpu/drm/xe/xe_tile.c create mode 100644 drivers/gpu/drm/xe/xe_tile.h create mode 100644 drivers/gpu/drm/xe/xe_tile_sysfs.c create mode 100644 drivers/gpu/drm/xe/xe_tile_sysfs.h create mode 100644 drivers/gpu/drm/xe/xe_tile_sysfs_types.h create mode 100644 drivers/gpu/drm/xe/xe_trace.c create mode 100644 drivers/gpu/drm/xe/xe_trace.h create mode 100644 drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c create mode 100644 drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h create mode 100644 drivers/gpu/drm/xe/xe_ttm_sys_mgr.c create mode 100644 drivers/gpu/drm/xe/xe_ttm_sys_mgr.h create mode 100644 drivers/gpu/drm/xe/xe_ttm_vram_mgr.c create mode 100644 drivers/gpu/drm/xe/xe_ttm_vram_mgr.h create mode 100644 drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h create mode 100644 drivers/gpu/drm/xe/xe_tuning.c create mode 100644 drivers/gpu/drm/xe/xe_tuning.h create mode 100644 drivers/gpu/drm/xe/xe_uc.c create mode 100644 drivers/gpu/drm/xe/xe_uc.h create mode 100644 drivers/gpu/drm/xe/xe_uc_debugfs.c create mode 100644 drivers/gpu/drm/xe/xe_uc_debugfs.h create mode 100644 drivers/gpu/drm/xe/xe_uc_fw.c create mode 100644 drivers/gpu/drm/xe/xe_uc_fw.h create mode 100644 drivers/gpu/drm/xe/xe_uc_fw_abi.h create mode 100644 drivers/gpu/drm/xe/xe_uc_fw_types.h create mode 100644 drivers/gpu/drm/xe/xe_uc_types.h create mode 100644 drivers/gpu/drm/xe/xe_vm.c create mode 100644 drivers/gpu/drm/xe/xe_vm.h create mode 100644 drivers/gpu/drm/xe/xe_vm_doc.h create mode 100644 drivers/gpu/drm/xe/xe_vm_types.h create mode 100644 drivers/gpu/drm/xe/xe_wa.c create mode 100644 drivers/gpu/drm/xe/xe_wa.h create mode 100644 drivers/gpu/drm/xe/xe_wa_oob.rules create mode 100644 drivers/gpu/drm/xe/xe_wait_user_fence.c create mode 100644 drivers/gpu/drm/xe/xe_wait_user_fence.h create mode 100644 drivers/gpu/drm/xe/xe_wopcm.c create mode 100644 drivers/gpu/drm/xe/xe_wopcm.h create mode 100644 drivers/gpu/drm/xe/xe_wopcm_types.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 9b079f3a1b..c7edba18a6 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -74,16 +74,17 @@ config DRM_KUNIT_TEST_HELPERS config DRM_KUNIT_TEST tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS - depends on DRM && KUNIT - select PRIME_NUMBERS + depends on DRM && KUNIT && MMU + select DRM_BUDDY select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HELPER - select DRM_LIB_RANDOM - select DRM_KMS_HELPER - select DRM_BUDDY + select DRM_EXEC select DRM_EXPORT_FOR_TESTS if m + select DRM_GEM_SHMEM_HELPER + select DRM_KMS_HELPER select DRM_KUNIT_TEST_HELPERS - select DRM_EXEC + select DRM_LIB_RANDOM + select PRIME_NUMBERS default KUNIT_ALL_TESTS help This builds unit tests for DRM. This option is not useful for @@ -276,6 +277,8 @@ source "drivers/gpu/drm/nouveau/Kconfig" source "drivers/gpu/drm/i915/Kconfig" +source "drivers/gpu/drm/xe/Kconfig" + source "drivers/gpu/drm/kmb/Kconfig" config DRM_VGEM @@ -395,6 +398,8 @@ source "drivers/gpu/drm/solomon/Kconfig" source "drivers/gpu/drm/sprd/Kconfig" +source "drivers/gpu/drm/imagination/Kconfig" + config DRM_HYPERV tristate "DRM Support for Hyper-V synthetic video device" depends on DRM && PCI && MMU && HYPERV @@ -408,27 +413,6 @@ config DRM_HYPERV If M is selected the module will be called hyperv_drm. -# Keep legacy drivers last - -menuconfig DRM_LEGACY - bool "Enable legacy drivers (DANGEROUS)" - depends on DRM && MMU - help - Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous - APIs to user-space, which can be used to circumvent access - restrictions and other security measures. For backwards compatibility - those drivers are still available, but their use is highly - inadvisable and might harm your system. - - You are recommended to use the safe modeset-only drivers instead, and - perform 3D emulation in user-space. - - Unless you have strong reasons to go rogue, say "N". - -if DRM_LEGACY -# leave here to list legacy drivers -endif # DRM_LEGACY - config DRM_EXPORT_FOR_TESTS bool diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 8e1bde0591..104b42df2e 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -22,6 +22,7 @@ drm-y := \ drm_drv.o \ drm_dumb_buffers.o \ drm_edid.o \ + drm_eld.o \ drm_encoder.o \ drm_file.o \ drm_fourcc.o \ @@ -46,18 +47,6 @@ drm-y := \ drm_vblank_work.o \ drm_vma_manager.o \ drm_writeback.o -drm-$(CONFIG_DRM_LEGACY) += \ - drm_agpsupport.o \ - drm_bufs.o \ - drm_context.o \ - drm_dma.o \ - drm_hashtab.o \ - drm_irq.o \ - drm_legacy_misc.o \ - drm_lock.o \ - drm_memory.o \ - drm_scatter.o \ - drm_vm.o drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_PANEL) += drm_panel.o @@ -145,6 +134,7 @@ obj-$(CONFIG_DRM_RADEON)+= radeon/ obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/ obj-$(CONFIG_DRM_AMDGPU)+= amd/amdxcp/ obj-$(CONFIG_DRM_I915) += i915/ +obj-$(CONFIG_DRM_XE) += xe/ obj-$(CONFIG_DRM_KMB_DISPLAY) += kmb/ obj-$(CONFIG_DRM_MGAG200) += mgag200/ obj-$(CONFIG_DRM_V3D) += v3d/ @@ -198,3 +188,4 @@ obj-$(CONFIG_DRM_HYPERV) += hyperv/ obj-y += solomon/ obj-$(CONFIG_DRM_SPRD) += sprd/ obj-$(CONFIG_DRM_LOONGSON) += loongson/ +obj-$(CONFIG_DRM_POWERVR) += imagination/ diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 2afecc5509..260e32ef7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -80,7 +80,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \ amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \ amdgpu_fw_attestation.o amdgpu_securedisplay.o \ amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \ - amdgpu_ring_mux.o amdgpu_xcp.o + amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 13c62a26aa..79827a6dcd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -109,6 +109,8 @@ #include "amdgpu_mca.h" #include "amdgpu_ras.h" #include "amdgpu_xcp.h" +#include "amdgpu_seq64.h" +#include "amdgpu_reg_state.h" #define MAX_GPU_INSTANCE 64 @@ -251,6 +253,8 @@ extern int amdgpu_seamless; extern int amdgpu_user_partt_mode; extern int amdgpu_agp; +extern int amdgpu_wbrf; + #define AMDGPU_VM_MAX_NUM_CTX 4096 #define AMDGPU_SG_THRESHOLD (256*1024*1024) #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 @@ -469,6 +473,7 @@ struct amdgpu_fpriv { struct amdgpu_vm vm; struct amdgpu_bo_va *prt_va; struct amdgpu_bo_va *csa_va; + struct amdgpu_bo_va *seq64_va; struct mutex bo_list_lock; struct idr bo_list_handles; struct amdgpu_ctx_mgr ctx_mgr; @@ -507,6 +512,31 @@ struct amdgpu_allowed_register_entry { bool grbm_indexed; }; +/** + * enum amd_reset_method - Methods for resetting AMD GPU devices + * + * @AMD_RESET_METHOD_NONE: The device will not be reset. + * @AMD_RESET_LEGACY: Method reserved for SI, CIK and VI ASICs. + * @AMD_RESET_MODE0: Reset the entire ASIC. Not currently available for the + * any device. + * @AMD_RESET_MODE1: Resets all IP blocks on the ASIC (SDMA, GFX, VCN, etc.) + * individually. Suitable only for some discrete GPU, not + * available for all ASICs. + * @AMD_RESET_MODE2: Resets a lesser level of IPs compared to MODE1. Which IPs + * are reset depends on the ASIC. Notably doesn't reset IPs + * shared with the CPU on APUs or the memory controllers (so + * VRAM is not lost). Not available on all ASICs. + * @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card + * but without powering off the PCI bus. Suitable only for + * discrete GPUs. + * @AMD_RESET_PCI: Does a full bus reset using core Linux subsystem PCI reset + * and does a secondary bus reset or FLR, depending on what the + * underlying hardware supports. + * + * Methods available for AMD GPU driver for resetting the device. Not all + * methods are suitable for every device. User can override the method using + * module parameter `reset_method`. + */ enum amd_reset_method { AMD_RESET_METHOD_NONE = -1, AMD_RESET_METHOD_LEGACY = 0, @@ -586,6 +616,10 @@ struct amdgpu_asic_funcs { const struct amdgpu_video_codecs **codecs); /* encode "> 32bits" smn addressing */ u64 (*encode_ext_smn_addressing)(int ext_id); + + ssize_t (*get_reg_state)(struct amdgpu_device *adev, + enum amdgpu_reg_state reg_state, void *buf, + size_t max_size); }; /* @@ -988,6 +1022,9 @@ struct amdgpu_device { /* GDS */ struct amdgpu_gds gds; + /* for userq and VM fences */ + struct amdgpu_seq64 seq64; + /* KFD */ struct amdgpu_kfd_dev kfd; @@ -1110,6 +1147,7 @@ struct amdgpu_device { bool debug_vm; bool debug_largebar; bool debug_disable_soft_recovery; + bool debug_use_vram_fw_buf; }; static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 3d126f2967..131983ed43 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -138,6 +138,30 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work) amdgpu_device_gpu_recover(adev, NULL, &reset_context); } +static const struct drm_client_funcs kfd_client_funcs = { + .unregister = drm_client_release, +}; + +int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev) +{ + int ret; + + if (!adev->kfd.init_complete || adev->kfd.client.dev) + return 0; + + ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd", + &kfd_client_funcs); + if (ret) { + dev_err(adev->dev, "Failed to init DRM client: %d\n", + ret); + return ret; + } + + drm_client_register(&adev->kfd.client); + + return 0; +} + void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) { int i; @@ -708,35 +732,6 @@ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) return false; } -int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev, - uint16_t vmid) -{ - if (adev->family == AMDGPU_FAMILY_AI) { - int i; - - for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) - amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0); - } else { - amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0), 0); - } - - return 0; -} - -int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev, - uint16_t pasid, - enum TLB_FLUSH_TYPE flush_type, - uint32_t inst) -{ - bool all_hub = false; - - if (adev->family == AMDGPU_FAMILY_AI || - adev->family == AMDGPU_FAMILY_RV) - all_hub = true; - - return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub, inst); -} - bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev) { return adev->have_atomics_support; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index dac983da96..27c61c535e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -33,6 +33,7 @@ #include #include #include +#include #include "amdgpu_sync.h" #include "amdgpu_vm.h" #include "amdgpu_xcp.h" @@ -83,6 +84,7 @@ struct kgd_mem { struct amdgpu_sync sync; + uint32_t gem_handle; bool aql_queue; bool is_imported; }; @@ -105,6 +107,9 @@ struct amdgpu_kfd_dev { /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ struct dev_pagemap pgmap; + + /* Client for KFD BO GEM handle allocations */ + struct drm_client_dev client; }; enum kgd_engine_type { @@ -162,11 +167,6 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, uint32_t *ib_cmd, uint32_t ib_len); void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle); bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev); -int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev, - uint16_t vmid); -int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev, - uint16_t pasid, enum TLB_FLUSH_TYPE flush_type, - uint32_t inst); bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); @@ -182,6 +182,8 @@ int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, struct mm_struct *mm, struct svm_range_bo *svm_bo); + +int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev); #if defined(CONFIG_DEBUG_FS) int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data); #endif @@ -301,7 +303,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv); int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv); -void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv); +int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv); int amdgpu_amdkfd_gpuvm_sync_memory( struct amdgpu_device *adev, struct kgd_mem *mem, bool intr); int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem, @@ -311,14 +313,13 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem); int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo); int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info, - struct dma_fence **ef); + struct dma_fence __rcu **ef); int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, struct kfd_vm_fault_info *info); -int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, - struct dma_buf *dmabuf, - uint64_t va, void *drm_priv, - struct kgd_mem **mem, uint64_t *size, - uint64_t *mmap_offset); +int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd, + uint64_t va, void *drm_priv, + struct kgd_mem **mem, uint64_t *size, + uint64_t *mmap_offset); int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem, struct dma_buf **dmabuf); void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index 625db444df..3a3f3ce09f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -200,7 +200,7 @@ int kgd_arcturus_hqd_sdma_dump(struct amdgpu_device *adev, #undef HQD_N_REGS #define HQD_N_REGS (19+6+7+10) - *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; @@ -290,7 +290,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus for (i = 0; i < adev->gfx.num_compute_rings; i++) { struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; - if (!(ring && ring->sched.thread)) + if (!amdgpu_ring_sched_ready(ring)) continue; /* stop secheduler and drain ring. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c index f6598b9e4f..a5c7259cf2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c @@ -141,7 +141,7 @@ static int kgd_gfx_v9_4_3_hqd_sdma_dump(struct amdgpu_device *adev, (*dump)[i++][1] = RREG32(addr); \ } while (0) - *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 6bf448ab3d..ca4a6b8281 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -214,7 +214,7 @@ static int kgd_hqd_dump(struct amdgpu_device *adev, (*dump)[i++][1] = RREG32(addr); \ } while (0) - *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; @@ -301,7 +301,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, #undef HQD_N_REGS #define HQD_N_REGS (19+4) - *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index cd06e4a6d1..0f3e2944ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -238,7 +238,7 @@ static int kgd_hqd_dump(struct amdgpu_device *adev, (*dump)[i++][1] = RREG32(addr); \ } while (0) - *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; @@ -324,7 +324,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, #undef HQD_N_REGS #define HQD_N_REGS (19+4+2+3+7) - *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 00fbc0f44c..5a35a8ca89 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -363,7 +363,7 @@ int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev, (*dump)[i++][1] = RREG32(addr); \ } while (0) - *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; @@ -460,7 +460,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, #undef HQD_N_REGS #define HQD_N_REGS (19+6+7+10) - *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); + *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 41fbc4fd0f..daa66eb4f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -806,13 +807,22 @@ kfd_mem_dmaunmap_attachment(struct kgd_mem *mem, static int kfd_mem_export_dmabuf(struct kgd_mem *mem) { if (!mem->dmabuf) { - struct dma_buf *ret = amdgpu_gem_prime_export( - &mem->bo->tbo.base, + struct amdgpu_device *bo_adev; + struct dma_buf *dmabuf; + int r, fd; + + bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); + r = drm_gem_prime_handle_to_fd(&bo_adev->ddev, bo_adev->kfd.client.file, + mem->gem_handle, mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? - DRM_RDWR : 0); - if (IS_ERR(ret)) - return PTR_ERR(ret); - mem->dmabuf = ret; + DRM_RDWR : 0, &fd); + if (r) + return r; + dmabuf = dma_buf_get(fd); + close_fd(fd); + if (WARN_ON_ONCE(IS_ERR(dmabuf))) + return PTR_ERR(dmabuf); + mem->dmabuf = dmabuf; } return 0; @@ -1137,7 +1147,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem, ctx->n_vms = 1; ctx->sync = &mem->sync; - drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT); + drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); drm_exec_until_all_locked(&ctx->exec) { ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2); drm_exec_retry_on_contention(&ctx->exec); @@ -1176,7 +1186,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, int ret; ctx->sync = &mem->sync; - drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT); + drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); drm_exec_until_all_locked(&ctx->exec) { ctx->n_vms = 0; list_for_each_entry(entry, &mem->attachments, list) { @@ -1384,7 +1394,6 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, amdgpu_amdkfd_restore_userptr_worker); *process_info = info; - *ef = dma_fence_get(&info->eviction_fence->base); } vm->process_info = *process_info; @@ -1415,6 +1424,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, list_add_tail(&vm->vm_list_node, &(vm->process_info->vm_list_head)); vm->process_info->n_vms++; + + *ef = dma_fence_get(&vm->process_info->eviction_fence->base); mutex_unlock(&vm->process_info->lock); return 0; @@ -1426,10 +1437,7 @@ validate_pd_fail: reserve_pd_fail: vm->process_info = NULL; if (info) { - /* Two fence references: one in info and one in *ef */ dma_fence_put(&info->eviction_fence->base); - dma_fence_put(*ef); - *ef = NULL; *process_info = NULL; put_pid(info->pid); create_evict_fence_fail: @@ -1623,7 +1631,8 @@ int amdgpu_amdkfd_criu_resume(void *p) goto out_unlock; } WRITE_ONCE(pinfo->block_mmu_notifications, false); - schedule_delayed_work(&pinfo->restore_userptr_work, 0); + queue_delayed_work(system_freezable_wq, + &pinfo->restore_userptr_work, 0); out_unlock: mutex_unlock(&pinfo->lock); @@ -1779,6 +1788,9 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( pr_debug("Failed to allow vma node access. ret %d\n", ret); goto err_node_allow; } + ret = drm_gem_handle_create(adev->kfd.client.file, gobj, &(*mem)->gem_handle); + if (ret) + goto err_gem_handle_create; bo = gem_to_amdgpu_bo(gobj); if (bo_type == ttm_bo_type_sg) { bo->tbo.sg = sg; @@ -1830,6 +1842,8 @@ allocate_init_user_pages_failed: err_pin_bo: err_validate_bo: remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); + drm_gem_handle_delete(adev->kfd.client.file, (*mem)->gem_handle); +err_gem_handle_create: drm_vma_node_revoke(&gobj->vma_node, drm_priv); err_node_allow: /* Don't unreserve system mem limit twice */ @@ -1837,6 +1851,7 @@ err_node_allow: err_bo_create: amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id); err_reserve_limit: + amdgpu_sync_free(&(*mem)->sync); mutex_destroy(&(*mem)->lock); if (gobj) drm_gem_object_put(gobj); @@ -1942,8 +1957,11 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( /* Free the BO*/ drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); - if (mem->dmabuf) + drm_gem_handle_delete(adev->kfd.client.file, mem->gem_handle); + if (mem->dmabuf) { dma_buf_put(mem->dmabuf); + mem->dmabuf = NULL; + } mutex_destroy(&mem->lock); /* If this releases the last reference, it will end up calling @@ -2068,21 +2086,35 @@ out: return ret; } -void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv) +int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv) { struct kfd_mem_attachment *entry; struct amdgpu_vm *vm; + int ret; vm = drm_priv_to_vm(drm_priv); mutex_lock(&mem->lock); + ret = amdgpu_bo_reserve(mem->bo, true); + if (ret) + goto out; + list_for_each_entry(entry, &mem->attachments, list) { - if (entry->bo_va->base.vm == vm) - kfd_mem_dmaunmap_attachment(mem, entry); + if (entry->bo_va->base.vm != vm) + continue; + if (entry->bo_va->base.bo->tbo.ttm && + !entry->bo_va->base.bo->tbo.ttm->sg) + continue; + + kfd_mem_dmaunmap_attachment(mem, entry); } + amdgpu_bo_unreserve(mem->bo); +out: mutex_unlock(&mem->lock); + + return ret; } int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( @@ -2295,34 +2327,26 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, return 0; } -int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, - struct dma_buf *dma_buf, - uint64_t va, void *drm_priv, - struct kgd_mem **mem, uint64_t *size, - uint64_t *mmap_offset) +static int import_obj_create(struct amdgpu_device *adev, + struct dma_buf *dma_buf, + struct drm_gem_object *obj, + uint64_t va, void *drm_priv, + struct kgd_mem **mem, uint64_t *size, + uint64_t *mmap_offset) { struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); - struct drm_gem_object *obj; struct amdgpu_bo *bo; int ret; - obj = amdgpu_gem_prime_import(adev_to_drm(adev), dma_buf); - if (IS_ERR(obj)) - return PTR_ERR(obj); - bo = gem_to_amdgpu_bo(obj); if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | - AMDGPU_GEM_DOMAIN_GTT))) { + AMDGPU_GEM_DOMAIN_GTT))) /* Only VRAM and GTT BOs are supported */ - ret = -EINVAL; - goto err_put_obj; - } + return -EINVAL; *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); - if (!*mem) { - ret = -ENOMEM; - goto err_put_obj; - } + if (!*mem) + return -ENOMEM; ret = drm_vma_node_allow(&obj->vma_node, drm_priv); if (ret) @@ -2372,8 +2396,41 @@ err_remove_mem: drm_vma_node_revoke(&obj->vma_node, drm_priv); err_free_mem: kfree(*mem); + return ret; +} + +int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd, + uint64_t va, void *drm_priv, + struct kgd_mem **mem, uint64_t *size, + uint64_t *mmap_offset) +{ + struct drm_gem_object *obj; + uint32_t handle; + int ret; + + ret = drm_gem_prime_fd_to_handle(&adev->ddev, adev->kfd.client.file, fd, + &handle); + if (ret) + return ret; + obj = drm_gem_object_lookup(adev->kfd.client.file, handle); + if (!obj) { + ret = -EINVAL; + goto err_release_handle; + } + + ret = import_obj_create(adev, obj->dma_buf, obj, va, drm_priv, mem, size, + mmap_offset); + if (ret) + goto err_put_obj; + + (*mem)->gem_handle = handle; + + return 0; + err_put_obj: drm_gem_object_put(obj); +err_release_handle: + drm_gem_handle_delete(adev->kfd.client.file, handle); return ret; } @@ -2426,7 +2483,8 @@ int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni, KFD_QUEUE_EVICTION_TRIGGER_USERPTR); if (r) pr_err("Failed to quiesce KFD\n"); - schedule_delayed_work(&process_info->restore_userptr_work, + queue_delayed_work(system_freezable_wq, + &process_info->restore_userptr_work, msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); } mutex_unlock(&process_info->notifier_lock); @@ -2552,7 +2610,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) amdgpu_sync_create(&sync); - drm_exec_init(&exec, 0); + drm_exec_init(&exec, 0, 0); /* Reserve all BOs and page tables for validation */ drm_exec_until_all_locked(&exec) { /* Reserve all the page directories */ @@ -2749,7 +2807,8 @@ unlock_out: /* If validation failed, reschedule another attempt */ if (evicted_bos) { - schedule_delayed_work(&process_info->restore_userptr_work, + queue_delayed_work(system_freezable_wq, + &process_info->restore_userptr_work, msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); kfd_smi_event_queue_restore_rescheduled(mm); @@ -2758,6 +2817,23 @@ unlock_out: put_task_struct(usertask); } +static void replace_eviction_fence(struct dma_fence __rcu **ef, + struct dma_fence *new_ef) +{ + struct dma_fence *old_ef = rcu_replace_pointer(*ef, new_ef, true + /* protected by process_info->lock */); + + /* If we're replacing an unsignaled eviction fence, that fence will + * never be signaled, and if anyone is still waiting on that fence, + * they will hang forever. This should never happen. We should only + * replace the fence in restore_work that only gets scheduled after + * eviction work signaled the fence. + */ + WARN_ONCE(!dma_fence_is_signaled(old_ef), + "Replacing unsignaled eviction fence"); + dma_fence_put(old_ef); +} + /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given * KFD process identified by process_info * @@ -2776,12 +2852,11 @@ unlock_out: * 7. Add fence to all PD and PT BOs. * 8. Unreserve all BOs */ -int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) +int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu **ef) { struct amdkfd_process_info *process_info = info; struct amdgpu_vm *peer_vm; struct kgd_mem *mem; - struct amdgpu_amdkfd_fence *new_fence; struct list_head duplicate_save; struct amdgpu_sync sync_obj; unsigned long failed_size = 0; @@ -2793,7 +2868,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) mutex_lock(&process_info->lock); - drm_exec_init(&exec, 0); + drm_exec_init(&exec, 0, 0); drm_exec_until_all_locked(&exec) { list_for_each_entry(peer_vm, &process_info->vm_list_head, vm_list_node) { @@ -2825,12 +2900,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) if (ret) goto validate_map_fail; - ret = process_sync_pds_resv(process_info, &sync_obj); - if (ret) { - pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n"); - goto validate_map_fail; - } - /* Validate BOs and map them to GPUVM (update VM page tables). */ list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) { @@ -2881,6 +2950,19 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) if (failed_size) pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size); + /* Update mappings not managed by KFD */ + list_for_each_entry(peer_vm, &process_info->vm_list_head, + vm_list_node) { + struct amdgpu_device *adev = amdgpu_ttm_adev( + peer_vm->root.bo->tbo.bdev); + + ret = amdgpu_vm_handle_moved(adev, peer_vm, &exec.ticket); + if (ret) { + pr_debug("Memory eviction: handle moved failed. Try again\n"); + goto validate_map_fail; + } + } + /* Update page directories */ ret = process_update_pds(process_info, &sync_obj); if (ret) { @@ -2888,25 +2970,47 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) goto validate_map_fail; } + /* Sync with fences on all the page tables. They implicitly depend on any + * move fences from amdgpu_vm_handle_moved above. + */ + ret = process_sync_pds_resv(process_info, &sync_obj); + if (ret) { + pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n"); + goto validate_map_fail; + } + /* Wait for validate and PT updates to finish */ amdgpu_sync_wait(&sync_obj, false); - /* Release old eviction fence and create new one, because fence only - * goes from unsignaled to signaled, fence cannot be reused. - * Use context and mm from the old fence. + /* The old eviction fence may be unsignaled if restore happens + * after a GPU reset or suspend/resume. Keep the old fence in that + * case. Otherwise release the old eviction fence and create new + * one, because fence only goes from unsignaled to signaled once + * and cannot be reused. Use context and mm from the old fence. + * + * If an old eviction fence signals after this check, that's OK. + * Anyone signaling an eviction fence must stop the queues first + * and schedule another restore worker. */ - new_fence = amdgpu_amdkfd_fence_create( + if (dma_fence_is_signaled(&process_info->eviction_fence->base)) { + struct amdgpu_amdkfd_fence *new_fence = + amdgpu_amdkfd_fence_create( process_info->eviction_fence->base.context, process_info->eviction_fence->mm, NULL); - if (!new_fence) { - pr_err("Failed to create eviction fence\n"); - ret = -ENOMEM; - goto validate_map_fail; + + if (!new_fence) { + pr_err("Failed to create eviction fence\n"); + ret = -ENOMEM; + goto validate_map_fail; + } + dma_fence_put(&process_info->eviction_fence->base); + process_info->eviction_fence = new_fence; + replace_eviction_fence(ef, dma_fence_get(&new_fence->base)); + } else { + WARN_ONCE(*ef != &process_info->eviction_fence->base, + "KFD eviction fence doesn't match KGD process_info"); } - dma_fence_put(&process_info->eviction_fence->base); - process_info->eviction_fence = new_fence; - *ef = dma_fence_get(&new_fence->base); /* Attach new eviction fence to all BOs except pinned ones */ list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 7473a42f7d..9caba10315 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -103,7 +103,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector) struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector_atom_dig *dig_connector; int bpc = 8; - unsigned mode_clock, max_tmds_clock; + unsigned int mode_clock, max_tmds_clock; switch (connector->connector_type) { case DRM_MODE_CONNECTOR_DVII: @@ -255,6 +255,7 @@ struct edid *amdgpu_connector_edid(struct drm_connector *connector) return amdgpu_connector->edid; } else if (edid_blob) { struct edid *edid = kmemdup(edid_blob->data, edid_blob->length, GFP_KERNEL); + if (edid) amdgpu_connector->edid = edid; } @@ -581,6 +582,7 @@ static int amdgpu_connector_set_property(struct drm_connector *connector, amdgpu_encoder = to_amdgpu_encoder(connector->encoder); } else { const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; + amdgpu_encoder = to_amdgpu_encoder(connector_funcs->best_encoder(connector)); } @@ -797,6 +799,7 @@ static int amdgpu_connector_set_lcd_property(struct drm_connector *connector, amdgpu_encoder = to_amdgpu_encoder(connector->encoder); else { const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; + amdgpu_encoder = to_amdgpu_encoder(connector_funcs->best_encoder(connector)); } @@ -979,6 +982,41 @@ amdgpu_connector_check_hpd_status_unchanged(struct drm_connector *connector) return false; } +static void amdgpu_connector_shared_ddc(enum drm_connector_status *status, + struct drm_connector *connector, + struct amdgpu_connector *amdgpu_connector) +{ + struct drm_connector *list_connector; + struct drm_connector_list_iter iter; + struct amdgpu_connector *list_amdgpu_connector; + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + + if (amdgpu_connector->shared_ddc && *status == connector_status_connected) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(list_connector, + &iter) { + if (connector == list_connector) + continue; + list_amdgpu_connector = to_amdgpu_connector(list_connector); + if (list_amdgpu_connector->shared_ddc && + list_amdgpu_connector->ddc_bus->rec.i2c_id == + amdgpu_connector->ddc_bus->rec.i2c_id) { + /* cases where both connectors are digital */ + if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) { + /* hpd is our only option in this case */ + if (!amdgpu_display_hpd_sense(adev, + amdgpu_connector->hpd.hpd)) { + amdgpu_connector_free_edid(connector); + *status = connector_status_disconnected; + } + } + } + } + drm_connector_list_iter_end(&iter); + } +} + /* * DVI is complicated * Do a DDC probe, if DDC probe passes, get the full EDID so @@ -1065,32 +1103,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) * DDC line. The latter is more complex because with DVI<->HDMI adapters * you don't really know what's connected to which port as both are digital. */ - if (amdgpu_connector->shared_ddc && (ret == connector_status_connected)) { - struct drm_connector *list_connector; - struct drm_connector_list_iter iter; - struct amdgpu_connector *list_amdgpu_connector; - - drm_connector_list_iter_begin(dev, &iter); - drm_for_each_connector_iter(list_connector, - &iter) { - if (connector == list_connector) - continue; - list_amdgpu_connector = to_amdgpu_connector(list_connector); - if (list_amdgpu_connector->shared_ddc && - (list_amdgpu_connector->ddc_bus->rec.i2c_id == - amdgpu_connector->ddc_bus->rec.i2c_id)) { - /* cases where both connectors are digital */ - if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) { - /* hpd is our only option in this case */ - if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { - amdgpu_connector_free_edid(connector); - ret = connector_status_disconnected; - } - } - } - } - drm_connector_list_iter_end(&iter); - } + amdgpu_connector_shared_ddc(&ret, connector, amdgpu_connector); } } @@ -1192,6 +1205,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector) static void amdgpu_connector_dvi_force(struct drm_connector *connector) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); + if (connector->force == DRM_FORCE_ON) amdgpu_connector->use_digital = false; if (connector->force == DRM_FORCE_ON_DIGITAL) @@ -1426,6 +1440,7 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) ret = connector_status_connected; else if (amdgpu_connector->dac_load_detect) { /* try load detection */ const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; + ret = encoder_funcs->detect(encoder, connector); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index e50be65000..116d3756c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -66,7 +66,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, amdgpu_sync_create(&p->sync); drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT | - DRM_EXEC_IGNORE_DUPLICATES); + DRM_EXEC_IGNORE_DUPLICATES, 0); return 0; } @@ -819,7 +819,7 @@ retry: p->bytes_moved += ctx.bytes_moved; if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && - amdgpu_bo_in_cpu_visible_vram(bo)) + amdgpu_res_cpu_visible(adev, bo->tbo.resource)) p->bytes_moved_vis += ctx.bytes_moved; if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { @@ -870,9 +870,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, struct amdgpu_bo *bo = e->bo; int i; - e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages, - sizeof(struct page *), - GFP_KERNEL | __GFP_ZERO); + e->user_pages = kvcalloc(bo->tbo.ttm->num_pages, + sizeof(struct page *), + GFP_KERNEL); if (!e->user_pages) { DRM_ERROR("kvmalloc_array failure\n"); r = -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c index 7200110197..796fa6f142 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c @@ -70,7 +70,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct drm_exec exec; int r; - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); drm_exec_until_all_locked(&exec) { r = amdgpu_vm_lock_pd(vm, &exec, 0); if (likely(!r)) @@ -110,7 +110,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct drm_exec exec; int r; - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); drm_exec_until_all_locked(&exec) { r = amdgpu_vm_lock_pd(vm, &exec, 0); if (likely(!r)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index e2ae9ba147..5cb33ac99f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -73,10 +73,10 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio) return DRM_SCHED_PRIORITY_NORMAL; case AMDGPU_CTX_PRIORITY_VERY_LOW: - return DRM_SCHED_PRIORITY_MIN; + return DRM_SCHED_PRIORITY_LOW; case AMDGPU_CTX_PRIORITY_LOW: - return DRM_SCHED_PRIORITY_MIN; + return DRM_SCHED_PRIORITY_LOW; case AMDGPU_CTX_PRIORITY_NORMAL: return DRM_SCHED_PRIORITY_NORMAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 8d4a3ff65c..1afbb2e932 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -540,7 +540,11 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, while (size) { uint32_t value; - value = RREG32_PCIE(*pos); + if (upper_32_bits(*pos)) + value = RREG32_PCIE_EXT(*pos); + else + value = RREG32_PCIE(*pos); + r = put_user(value, (uint32_t *)buf); if (r) goto out; @@ -600,7 +604,10 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user if (r) goto out; - WREG32_PCIE(*pos, value); + if (upper_32_bits(*pos)) + WREG32_PCIE_EXT(*pos, value); + else + WREG32_PCIE(*pos, value); result += 4; buf += 4; @@ -1671,9 +1678,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!amdgpu_ring_sched_ready(ring)) continue; - kthread_park(ring->sched.thread); + drm_sched_wqueue_stop(&ring->sched); } seq_puts(m, "run ib test:\n"); @@ -1687,9 +1694,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!amdgpu_ring_sched_ready(ring)) continue; - kthread_unpark(ring->sched.thread); + drm_sched_wqueue_start(&ring->sched); } up_write(&adev->reset_domain->sem); @@ -1909,7 +1916,8 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) ring = adev->rings[val]; - if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread) + if (!amdgpu_ring_sched_ready(ring) || + !ring->funcs->preempt_ib) return -EINVAL; /* the last preemption failed */ @@ -1927,7 +1935,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) goto pro_end; /* stop the scheduler */ - kthread_park(ring->sched.thread); + drm_sched_wqueue_stop(&ring->sched); /* preempt the IB */ r = amdgpu_ring_preempt_ib(ring); @@ -1961,7 +1969,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) failure: /* restart the scheduler */ - kthread_unpark(ring->sched.thread); + drm_sched_wqueue_start(&ring->sched); up_read(&adev->reset_domain->sem); @@ -2146,6 +2154,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) amdgpu_debugfs_firmware_init(adev); amdgpu_ta_if_debugfs_init(adev); + amdgpu_debugfs_mes_event_log_init(adev); + #if defined(CONFIG_DRM_AMD_DC) if (adev->dc_enabled) dtn_debugfs_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h index 371a6f0deb..0425432d86 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h @@ -32,3 +32,5 @@ void amdgpu_debugfs_fini(struct amdgpu_device *adev); void amdgpu_debugfs_fence_init(struct amdgpu_device *adev); void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); void amdgpu_debugfs_gem_init(struct amdgpu_device *adev); +void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev); + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7f48c7ec41..d0afb9ba37 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -162,6 +162,65 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev, static DEVICE_ATTR(pcie_replay_count, 0444, amdgpu_device_get_pcie_replay_count, NULL); +static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t ppos, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + ssize_t bytes_read; + + switch (ppos) { + case AMDGPU_SYS_REG_STATE_XGMI: + bytes_read = amdgpu_asic_get_reg_state( + adev, AMDGPU_REG_STATE_TYPE_XGMI, buf, count); + break; + case AMDGPU_SYS_REG_STATE_WAFL: + bytes_read = amdgpu_asic_get_reg_state( + adev, AMDGPU_REG_STATE_TYPE_WAFL, buf, count); + break; + case AMDGPU_SYS_REG_STATE_PCIE: + bytes_read = amdgpu_asic_get_reg_state( + adev, AMDGPU_REG_STATE_TYPE_PCIE, buf, count); + break; + case AMDGPU_SYS_REG_STATE_USR: + bytes_read = amdgpu_asic_get_reg_state( + adev, AMDGPU_REG_STATE_TYPE_USR, buf, count); + break; + case AMDGPU_SYS_REG_STATE_USR_1: + bytes_read = amdgpu_asic_get_reg_state( + adev, AMDGPU_REG_STATE_TYPE_USR_1, buf, count); + break; + default: + return -EINVAL; + } + + return bytes_read; +} + +BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL, + AMDGPU_SYS_REG_STATE_END); + +int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev) +{ + int ret; + + if (!amdgpu_asic_get_reg_state_supported(adev)) + return 0; + + ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state); + + return ret; +} + +void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev) +{ + if (!amdgpu_asic_get_reg_state_supported(adev)) + return; + sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state); +} + /** * DOC: board_info * @@ -1541,7 +1600,7 @@ bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev) if (adev->mman.keep_stolen_vga_memory) return false; - return adev->ip_versions[DCE_HWIP][0] >= IP_VERSION(3, 0, 0); + return amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0); } /* @@ -1552,11 +1611,15 @@ bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev) * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/ * https://gitlab.freedesktop.org/drm/amd/-/issues/2663 */ -static bool amdgpu_device_pcie_dynamic_switching_supported(void) +static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device *adev) { #if IS_ENABLED(CONFIG_X86) struct cpuinfo_x86 *c = &cpu_data(0); + /* eGPU change speeds based on USB4 fabric conditions */ + if (dev_is_removable(adev->dev)) + return true; + if (c->x86_vendor == X86_VENDOR_INTEL) return false; #endif @@ -2389,7 +2452,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK; - if (!amdgpu_device_pcie_dynamic_switching_supported()) + if (!amdgpu_device_pcie_dynamic_switching_supported(adev)) adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK; total = true; @@ -2567,7 +2630,7 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) break; } - r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, + r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT, ring->num_hw_submission, 0, timeout, adev->reset_domain->wq, @@ -2670,6 +2733,12 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) goto init_failed; } } + + r = amdgpu_seq64_init(adev); + if (r) { + DRM_ERROR("allocate seq64 failed %d\n", r); + goto init_failed; + } } } @@ -3132,6 +3201,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) amdgpu_device_wb_fini(adev); amdgpu_device_mem_scratch_fini(adev); amdgpu_ib_pool_fini(adev); + amdgpu_seq64_fini(adev); } r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); @@ -4202,6 +4272,7 @@ fence_driver_init: "Could not create amdgpu board attributes\n"); amdgpu_fru_sysfs_init(adev); + amdgpu_reg_state_sysfs_init(adev); if (IS_ENABLED(CONFIG_PERF_EVENTS)) r = amdgpu_pmu_init(adev); @@ -4324,6 +4395,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); amdgpu_fru_sysfs_fini(adev); + amdgpu_reg_state_sysfs_fini(adev); + /* disable ras feature must before hw fini */ amdgpu_ras_pre_fini(adev); @@ -4451,6 +4524,8 @@ int amdgpu_device_prepare(struct drm_device *dev) if (r) goto unprepare; + flush_delayed_work(&adev->gfx.gfx_off_delay_work); + for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.valid) continue; @@ -4954,7 +5029,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!amdgpu_ring_sched_ready(ring)) continue; spin_lock(&ring->sched.job_list_lock); @@ -5093,7 +5168,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!amdgpu_ring_sched_ready(ring)) continue; /* Clear job fence from fence drv to avoid force_completion @@ -5560,7 +5635,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = tmp_adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!amdgpu_ring_sched_ready(ring)) continue; drm_sched_stop(&ring->sched, job ? &job->base : NULL); @@ -5629,7 +5704,7 @@ skip_hw_reset: for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = tmp_adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!amdgpu_ring_sched_ready(ring)) continue; drm_sched_start(&ring->sched, true); @@ -5690,6 +5765,39 @@ skip_sched_resume: return r; } +/** + * amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner + * + * @adev: amdgpu_device pointer + * @speed: pointer to the speed of the link + * @width: pointer to the width of the link + * + * Evaluate the hierarchy to find the speed and bandwidth capabilities of the + * first physical partner to an AMD dGPU. + * This will exclude any virtual switches and links. + */ +static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev, + enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + struct pci_dev *parent = adev->pdev; + + if (!speed || !width) + return; + + *speed = PCI_SPEED_UNKNOWN; + *width = PCIE_LNK_WIDTH_UNKNOWN; + + while ((parent = pci_upstream_bridge(parent))) { + /* skip upstream/downstream switches internal to dGPU*/ + if (parent->vendor == PCI_VENDOR_ID_ATI) + continue; + *speed = pcie_get_speed_cap(parent); + *width = pcie_get_width_cap(parent); + break; + } +} + /** * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot * @@ -5723,8 +5831,8 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask) return; - pcie_bandwidth_available(adev->pdev, NULL, - &platform_speed_cap, &platform_link_width); + amdgpu_device_partner_bandwidth(adev, &platform_speed_cap, + &platform_link_width); if (adev->pm.pcie_gen_mask == 0) { /* asic caps */ @@ -5951,7 +6059,7 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!amdgpu_ring_sched_ready(ring)) continue; drm_sched_stop(&ring->sched, NULL); @@ -6001,6 +6109,20 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) struct amdgpu_reset_context reset_context; u32 memsize; struct list_head device_list; + struct amdgpu_hive_info *hive; + int hive_ras_recovery = 0; + struct amdgpu_ras *ras; + + /* PCI error slot reset should be skipped During RAS recovery */ + hive = amdgpu_get_xgmi_hive(adev); + if (hive) { + hive_ras_recovery = atomic_read(&hive->ras_recovery); + amdgpu_put_xgmi_hive(hive); + } + ras = amdgpu_ras_get_context(adev); + if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) && + ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery)) + return PCI_ERS_RESULT_RECOVERED; DRM_INFO("PCI error: slot reset callback!!\n"); @@ -6079,7 +6201,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev) for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!amdgpu_ring_sched_ready(ring)) continue; drm_sched_start(&ring->sched, true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index e7e87a3b26..decbbe3d4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -42,6 +42,7 @@ #include #include #include +#include "amdgpu_trace.h" /** * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation @@ -63,6 +64,7 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, attach->peer2peer = false; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + trace_amdgpu_runpm_reference_dumps(1, __func__); if (r < 0) goto out; @@ -70,6 +72,7 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, out: pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + trace_amdgpu_runpm_reference_dumps(0, __func__); return r; } @@ -90,6 +93,7 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf, pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + trace_amdgpu_runpm_reference_dumps(0, __func__); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 855ab59632..64b1bb2404 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -128,6 +128,7 @@ enum AMDGPU_DEBUG_MASK { AMDGPU_DEBUG_VM = BIT(0), AMDGPU_DEBUG_LARGEBAR = BIT(1), AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY = BIT(2), + AMDGPU_DEBUG_USE_VRAM_FW_BUF = BIT(3), }; unsigned int amdgpu_vram_limit = UINT_MAX; @@ -209,6 +210,7 @@ int amdgpu_umsch_mm; int amdgpu_seamless = -1; /* auto */ uint amdgpu_debug_mask; int amdgpu_agp = -1; /* auto */ +int amdgpu_wbrf = -1; int amdgpu_damage_clips = -1; /* auto */ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work); @@ -985,6 +987,22 @@ module_param_named(debug_mask, amdgpu_debug_mask, uint, 0444); MODULE_PARM_DESC(agp, "AGP (-1 = auto (default), 0 = disable, 1 = enable)"); module_param_named(agp, amdgpu_agp, int, 0444); +/** + * DOC: wbrf (int) + * Enable Wifi RFI interference mitigation feature. + * Due to electrical and mechanical constraints there may be likely interference of + * relatively high-powered harmonics of the (G-)DDR memory clocks with local radio + * module frequency bands used by Wifi 6/6e/7. To mitigate the possible RFI interference, + * with this feature enabled, PMFW will use either “shadowed P-State” or “P-State” based + * on active list of frequencies in-use (to be avoided) as part of initial setting or + * P-state transition. However, there may be potential performance impact with this + * feature enabled. + * (0 = disabled, 1 = enabled, -1 = auto (default setting, will be enabled if supported)) + */ +MODULE_PARM_DESC(wbrf, + "Enable Wifi RFI interference mitigation (0 = disabled, 1 = enabled, -1 = auto(default)"); +module_param_named(wbrf, amdgpu_wbrf, int, 0444); + /* These devices are not supported by amdgpu. * They are supported by the mach64, r128, radeon drivers */ @@ -2113,6 +2131,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev) pr_info("debug: soft reset for GPU recovery disabled\n"); adev->debug_disable_soft_recovery = true; } + + if (amdgpu_debug_mask & AMDGPU_DEBUG_USE_VRAM_FW_BUF) { + pr_info("debug: place fw in vram for frontdoor loading\n"); + adev->debug_use_vram_fw_buf = true; + } } static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags) @@ -2224,6 +2247,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, ddev); + amdgpu_init_debug_options(adev); + ret = amdgpu_driver_load_kms(adev, flags); if (ret) goto err_pci; @@ -2243,6 +2268,10 @@ retry_init: if (ret) goto err_pci; + ret = amdgpu_amdkfd_drm_client_create(adev); + if (ret) + goto err_pci; + /* * 1. don't init fbdev on hw without DCE * 2. don't init fbdev if there are no connectors @@ -2304,8 +2333,6 @@ retry_init: amdgpu_get_secondary_funcs(adev); } - amdgpu_init_debug_options(adev); - return 0; err_pci: @@ -2424,8 +2451,11 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work) } for (i = 0; i < mgpu_info.num_dgpu; i++) { adev = mgpu_info.gpu_ins[i].adev; - if (!adev->kfd.init_complete) + if (!adev->kfd.init_complete) { + kgd2kfd_init_zone_device(adev); amdgpu_amdkfd_device_init(adev); + amdgpu_amdkfd_drm_client_create(adev); + } amdgpu_ttm_set_buffer_funcs_status(adev, true); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c index 5706b282a0..c7df7fa345 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c @@ -97,6 +97,10 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file) stats.requested_visible_vram/1024UL); drm_printf(p, "amd-requested-gtt:\t%llu KiB\n", stats.requested_gtt/1024UL); + drm_printf(p, "drm-shared-vram:\t%llu KiB\n", stats.vram_shared/1024UL); + drm_printf(p, "drm-shared-gtt:\t%llu KiB\n", stats.gtt_shared/1024UL); + drm_printf(p, "drm-shared-cpu:\t%llu KiB\n", stats.cpu_shared/1024UL); + for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) { if (!usage[hw_ip]) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index dc23021274..70bff8cecf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -183,6 +183,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, seq, flags | AMDGPU_FENCE_FLAG_INT); pm_runtime_get_noresume(adev_to_drm(adev)->dev); + trace_amdgpu_runpm_reference_dumps(1, __func__); ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; if (unlikely(rcu_dereference_protected(*ptr, 1))) { struct dma_fence *old; @@ -310,6 +311,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) dma_fence_put(fence); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + trace_amdgpu_runpm_reference_dumps(0, __func__); } while (last_seq != seq); return true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 84beeaa4d2..49a5f1c73b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -203,7 +203,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj, struct drm_exec exec; long r; - drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES); + drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); drm_exec_until_all_locked(&exec) { r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1); drm_exec_retry_on_contention(&exec); @@ -739,7 +739,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, } drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | - DRM_EXEC_IGNORE_DUPLICATES); + DRM_EXEC_IGNORE_DUPLICATES, 0); drm_exec_until_all_locked(&exec) { if (gobj) { r = drm_exec_lock_obj(&exec, gobj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c index 57516a8c5d..431ec72655 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c @@ -202,8 +202,8 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, pr_debug("hmm range: start = 0x%lx, end = 0x%lx", hmm_range->start, hmm_range->end); - /* Assuming 128MB takes maximum 1 second to fault page address */ - timeout = max((hmm_range->end - hmm_range->start) >> 27, 1UL); + /* Assuming 64MB takes maximum 1 second to fault page address */ + timeout = max((hmm_range->end - hmm_range->start) >> 26, 1UL); timeout *= HMM_RANGE_DEFAULT_TIMEOUT; timeout = jiffies + msecs_to_jiffies(timeout); @@ -211,6 +211,7 @@ retry: hmm_range->notifier_seq = mmu_interval_read_begin(notifier); r = hmm_range_fault(hmm_range); if (unlikely(r)) { + schedule(); /* * FIXME: This timeout should encompass the retry from * mmu_interval_read_retry() as well. @@ -224,7 +225,6 @@ retry: break; hmm_range->hmm_pfns += MAX_WALK_BYTE >> PAGE_SHIFT; hmm_range->start = hmm_range->end; - schedule(); } while (hmm_range->end < end); hmm_range->start = start; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index 82608df433..d79cb13e1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -175,7 +175,6 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, i2c->rec = *rec; i2c->adapter.owner = THIS_MODULE; - i2c->adapter.class = I2C_CLASS_DDC; i2c->adapter.dev.parent = dev->dev; i2c->dev = dev; i2c_set_adapdata(&i2c->adapter, i2c); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 1f35719853..71a5cf37b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -115,7 +115,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (!entity) return 0; - return drm_sched_job_init(&(*job)->base, entity, owner); + return drm_sched_job_init(&(*job)->base, entity, 1, owner); } int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, @@ -325,7 +325,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) int i; /* Signal all jobs not yet scheduled */ - for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { struct drm_sched_rq *rq = sched->sched_rq[i]; spin_lock(&rq->lock); list_for_each_entry(s_entity, &rq->entities, list) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 598867919c..bf4f48fe43 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1433,6 +1433,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, fpriv->csa_va = NULL; } + amdgpu_seq64_unmap(adev, fpriv); + pasid = fpriv->vm.pasid; pd = amdgpu_bo_ref(fpriv->vm.root.bo); if (!WARN_ON(amdgpu_bo_reserve(pd, true))) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index 061d88f448..59fafb8392 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -218,6 +218,7 @@ static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, st int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data) { struct amdgpu_smuio_mcm_config_info mcm_info; + struct ras_err_addr err_addr = {0}; struct mca_bank_set mca_set; struct mca_bank_node *node; struct mca_bank_entry *entry; @@ -246,10 +247,18 @@ int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_blo mcm_info.socket_id = entry->info.socket_id; mcm_info.die_id = entry->info.aid; + if (blk == AMDGPU_RAS_BLOCK__UMC) { + err_addr.err_status = entry->regs[MCA_REG_IDX_STATUS]; + err_addr.err_ipid = entry->regs[MCA_REG_IDX_IPID]; + err_addr.err_addr = entry->regs[MCA_REG_IDX_ADDR]; + } + if (type == AMDGPU_MCA_ERROR_TYPE_UE) - amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, (uint64_t)count); + amdgpu_ras_error_statistic_ue_count(err_data, + &mcm_info, &err_addr, (uint64_t)count); else - amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, (uint64_t)count); + amdgpu_ras_error_statistic_ce_count(err_data, + &mcm_info, &err_addr, (uint64_t)count); } out_mca_release: @@ -377,7 +386,7 @@ static int amdgpu_mca_smu_debug_mode_set(void *data, u64 val) struct amdgpu_device *adev = (struct amdgpu_device *)data; int ret; - ret = amdgpu_mca_smu_set_debug_mode(adev, val ? true : false); + ret = amdgpu_ras_set_mca_debug_mode(adev, val ? true : false); if (ret) return ret; @@ -485,7 +494,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(mca_debug_mode_fops, NULL, amdgpu_mca_smu_debug_mode_se void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root) { #if defined(CONFIG_DEBUG_FS) - if (!root || adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 6)) + if (!root || amdgpu_ip_version(adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 6)) return; debugfs_create_file("mca_debug_mode", 0200, root, adev, &mca_debug_mode_fops); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h index e51e8918e6..b399f1b628 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h @@ -46,6 +46,8 @@ #define MCA_REG__STATUS__ERRORCODEEXT(x) MCA_REG_FIELD(x, 21, 16) #define MCA_REG__STATUS__ERRORCODE(x) MCA_REG_FIELD(x, 15, 0) +#define MCA_REG__MISC0__ERRCNT(x) MCA_REG_FIELD(x, 43, 32) + #define MCA_REG__SYND__ERRORINFORMATION(x) MCA_REG_FIELD(x, 17, 0) enum amdgpu_mca_ip { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 30c0108366..da48b6da01 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -98,6 +98,26 @@ static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev) return 0; } +static int amdgpu_mes_event_log_init(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &adev->mes.event_log_gpu_obj, + &adev->mes.event_log_gpu_addr, + &adev->mes.event_log_cpu_addr); + if (r) { + dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r); + return r; + } + + memset(adev->mes.event_log_cpu_addr, 0, PAGE_SIZE); + + return 0; + +} + static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev) { bitmap_free(adev->mes.doorbell_bitmap); @@ -182,8 +202,14 @@ int amdgpu_mes_init(struct amdgpu_device *adev) if (r) goto error; + r = amdgpu_mes_event_log_init(adev); + if (r) + goto error_doorbell; + return 0; +error_doorbell: + amdgpu_mes_doorbell_free(adev); error: amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs); @@ -199,6 +225,10 @@ error_ids: void amdgpu_mes_fini(struct amdgpu_device *adev) { + amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj, + &adev->mes.event_log_gpu_addr, + &adev->mes.event_log_cpu_addr); + amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs); amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs); amdgpu_device_wb_free(adev, adev->mes.read_val_offs); @@ -1153,7 +1183,7 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev, amdgpu_sync_create(&sync); - drm_exec_init(&exec, 0); + drm_exec_init(&exec, 0, 0); drm_exec_until_all_locked(&exec) { r = drm_exec_lock_obj(&exec, &ctx_data->meta_data_obj->tbo.base); @@ -1224,7 +1254,7 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev, struct drm_exec exec; long r; - drm_exec_init(&exec, 0); + drm_exec_init(&exec, 0, 0); drm_exec_until_all_locked(&exec) { r = drm_exec_lock_obj(&exec, &ctx_data->meta_data_obj->tbo.base); @@ -1510,3 +1540,34 @@ out: amdgpu_ucode_release(&adev->mes.fw[pipe]); return r; } + +#if defined(CONFIG_DEBUG_FS) + +static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused) +{ + struct amdgpu_device *adev = m->private; + uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr); + + seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4, + mem, PAGE_SIZE, false); + + return 0; +} + + +DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log); + +#endif + +void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev) +{ + +#if defined(CONFIG_DEBUG_FS) + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + + debugfs_create_file("amdgpu_mes_event_log", 0444, root, + adev, &amdgpu_debugfs_mes_event_log_fops); + +#endif +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index c2c88b7723..7d4f93fea9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -133,6 +133,11 @@ struct amdgpu_mes { uint32_t num_mes_dbs; unsigned long *doorbell_bitmap; + /* MES event log buffer */ + struct amdgpu_bo *event_log_gpu_obj; + uint64_t event_log_gpu_addr; + void *event_log_cpu_addr; + /* ip specific functions */ const struct amdgpu_mes_funcs *funcs; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 32fe05c810..2e4911050c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -32,7 +32,6 @@ #include #include -#include #include #include #include @@ -51,6 +50,7 @@ struct amdgpu_device; struct amdgpu_encoder; struct amdgpu_router; struct amdgpu_hpd; +struct edid; #define to_amdgpu_crtc(x) container_of(x, struct amdgpu_crtc, base) #define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base) @@ -343,6 +343,97 @@ struct amdgpu_mode_info { int disp_priority; const struct amdgpu_display_funcs *funcs; const enum drm_plane_type *plane_type; + + /* Driver-private color mgmt props */ + + /* @plane_degamma_lut_property: Plane property to set a degamma LUT to + * convert encoded values to light linear values before sampling or + * blending. + */ + struct drm_property *plane_degamma_lut_property; + /* @plane_degamma_lut_size_property: Plane property to define the max + * size of degamma LUT as supported by the driver (read-only). + */ + struct drm_property *plane_degamma_lut_size_property; + /** + * @plane_degamma_tf_property: Plane pre-defined transfer function to + * to go from scanout/encoded values to linear values. + */ + struct drm_property *plane_degamma_tf_property; + /** + * @plane_hdr_mult_property: + */ + struct drm_property *plane_hdr_mult_property; + + struct drm_property *plane_ctm_property; + /** + * @shaper_lut_property: Plane property to set pre-blending shaper LUT + * that converts color content before 3D LUT. If + * plane_shaper_tf_property != Identity TF, AMD color module will + * combine the user LUT values with pre-defined TF into the LUT + * parameters to be programmed. + */ + struct drm_property *plane_shaper_lut_property; + /** + * @shaper_lut_size_property: Plane property for the size of + * pre-blending shaper LUT as supported by the driver (read-only). + */ + struct drm_property *plane_shaper_lut_size_property; + /** + * @plane_shaper_tf_property: Plane property to set a predefined + * transfer function for pre-blending shaper (before applying 3D LUT) + * with or without LUT. There is no shaper ROM, but we can use AMD + * color modules to program LUT parameters from predefined TF (or + * from a combination of pre-defined TF and the custom 1D LUT). + */ + struct drm_property *plane_shaper_tf_property; + /** + * @plane_lut3d_property: Plane property for color transformation using + * a 3D LUT (pre-blending), a three-dimensional array where each + * element is an RGB triplet. Each dimension has the size of + * lut3d_size. The array contains samples from the approximated + * function. On AMD, values between samples are estimated by + * tetrahedral interpolation. The array is accessed with three indices, + * one for each input dimension (color channel), blue being the + * outermost dimension, red the innermost. + */ + struct drm_property *plane_lut3d_property; + /** + * @plane_degamma_lut_size_property: Plane property to define the max + * size of 3D LUT as supported by the driver (read-only). The max size + * is the max size of one dimension and, therefore, the max number of + * entries for 3D LUT array is the 3D LUT size cubed; + */ + struct drm_property *plane_lut3d_size_property; + /** + * @plane_blend_lut_property: Plane property for output gamma before + * blending. Userspace set a blend LUT to convert colors after 3D LUT + * conversion. It works as a post-3DLUT 1D LUT. With shaper LUT, they + * are sandwiching 3D LUT with two 1D LUT. If plane_blend_tf_property + * != Identity TF, AMD color module will combine the user LUT values + * with pre-defined TF into the LUT parameters to be programmed. + */ + struct drm_property *plane_blend_lut_property; + /** + * @plane_blend_lut_size_property: Plane property to define the max + * size of blend LUT as supported by the driver (read-only). + */ + struct drm_property *plane_blend_lut_size_property; + /** + * @plane_blend_tf_property: Plane property to set a predefined + * transfer function for pre-blending blend/out_gamma (after applying + * 3D LUT) with or without LUT. There is no blend ROM, but we can use + * AMD color modules to program LUT parameters from predefined TF (or + * from a combination of pre-defined TF and the custom 1D LUT). + */ + struct drm_property *plane_blend_tf_property; + /* @regamma_tf_property: Transfer function for CRTC regamma + * (post-blending). Possible values are defined by `enum + * amdgpu_transfer_function`. There is no regamma ROM, but we can use + * AMD color modules to program LUT parameters from predefined TF (or + * from a combination of pre-defined TF and the custom 1D LUT). + */ + struct drm_property *regamma_tf_property; }; #define AMDGPU_MAX_BL_LEVEL 0xFF @@ -416,6 +507,10 @@ struct amdgpu_crtc { int otg_inst; struct drm_pending_vblank_event *event; + + bool wb_pending; + bool wb_enabled; + struct drm_writeback_connector *wb_conn; }; struct amdgpu_encoder_atom_dig { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 425cebcc5c..866bfde1ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -620,8 +620,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, return r; if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && - bo->tbo.resource->mem_type == TTM_PL_VRAM && - amdgpu_bo_in_cpu_visible_vram(bo)) + amdgpu_res_cpu_visible(adev, bo->tbo.resource)) amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, ctx.bytes_moved); else @@ -1275,26 +1274,39 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict) void amdgpu_bo_get_memory(struct amdgpu_bo *bo, struct amdgpu_mem_stats *stats) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct ttm_resource *res = bo->tbo.resource; uint64_t size = amdgpu_bo_size(bo); + struct drm_gem_object *obj; unsigned int domain; + bool shared; /* Abort if the BO doesn't currently have a backing store */ - if (!bo->tbo.resource) + if (!res) return; - domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); + obj = &bo->tbo.base; + shared = drm_gem_object_is_shared_for_memory_stats(obj); + + domain = amdgpu_mem_type_to_domain(res->mem_type); switch (domain) { case AMDGPU_GEM_DOMAIN_VRAM: stats->vram += size; - if (amdgpu_bo_in_cpu_visible_vram(bo)) + if (amdgpu_res_cpu_visible(adev, bo->tbo.resource)) stats->visible_vram += size; + if (shared) + stats->vram_shared += size; break; case AMDGPU_GEM_DOMAIN_GTT: stats->gtt += size; + if (shared) + stats->gtt_shared += size; break; case AMDGPU_GEM_DOMAIN_CPU: default: stats->cpu += size; + if (shared) + stats->cpu_shared += size; break; } @@ -1381,10 +1393,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) /* Remember that this BO was accessed by the CPU */ abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - if (bo->resource->mem_type != TTM_PL_VRAM) - return 0; - - if (amdgpu_bo_in_cpu_visible_vram(abo)) + if (amdgpu_res_cpu_visible(adev, bo->resource)) return 0; /* Can't move a pinned BO to visible VRAM */ @@ -1408,7 +1417,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) /* this should never happen */ if (bo->resource->mem_type == TTM_PL_VRAM && - !amdgpu_bo_in_cpu_visible_vram(abo)) + !amdgpu_res_cpu_visible(adev, bo->resource)) return VM_FAULT_SIGBUS; ttm_bo_move_to_lru_tail_unlocked(bo); @@ -1572,6 +1581,7 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, */ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct dma_buf_attachment *attachment; struct dma_buf *dma_buf; const char *placement; @@ -1580,10 +1590,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) if (dma_resv_trylock(bo->tbo.base.resv)) { unsigned int domain; + domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); switch (domain) { case AMDGPU_GEM_DOMAIN_VRAM: - if (amdgpu_bo_in_cpu_visible_vram(bo)) + if (amdgpu_res_cpu_visible(adev, bo->tbo.resource)) placement = "VRAM VISIBLE"; else placement = "VRAM"; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index a3ea8a82db..fa03d9e487 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -138,12 +138,18 @@ struct amdgpu_bo_vm { struct amdgpu_mem_stats { /* current VRAM usage, includes visible VRAM */ uint64_t vram; + /* current shared VRAM usage, includes visible VRAM */ + uint64_t vram_shared; /* current visible VRAM usage */ uint64_t visible_vram; /* current GTT usage */ uint64_t gtt; + /* current shared GTT usage */ + uint64_t gtt_shared; /* current system memory usage */ uint64_t cpu; + /* current shared system memory usage */ + uint64_t cpu_shared; /* sum of evicted buffers, includes visible VRAM */ uint64_t evicted_vram; /* sum of evicted buffers due to CPU access */ @@ -244,28 +250,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); } -/** - * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM - */ -static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo) -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - struct amdgpu_res_cursor cursor; - - if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM) - return false; - - amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor); - while (cursor.remaining) { - if (cursor.start < adev->gmc.visible_vram_size) - return true; - - amdgpu_res_next(&cursor, cursor.size); - } - - return false; -} - /** * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index a21045d018..0328616473 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -466,7 +466,7 @@ static int psp_sw_init(void *handle) } ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, - amdgpu_sriov_vf(adev) ? + (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, &psp->fw_pri_bo, &psp->fw_pri_mc_addr, @@ -1433,8 +1433,8 @@ int psp_xgmi_get_topology_info(struct psp_context *psp, get_extended_data) || amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6); - bool ta_port_num_support = psp->xgmi_context.xgmi_ta_caps & - EXTEND_PEER_LINK_INFO_CMD_FLAG; + bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 : + psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG; /* popluate the shared output buffer rather than the cmd input buffer * with node_ids as the input for GET_PEER_LINKS command execution. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 4a3726bb6d..31823a30de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -305,11 +305,13 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, return -EINVAL; data->head.block = block_id; - /* only ue and ce errors are supported */ + /* only ue, ce and poison errors are supported */ if (!memcmp("ue", err, 2)) data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; else if (!memcmp("ce", err, 2)) data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE; + else if (!memcmp("poison", err, 6)) + data->head.type = AMDGPU_RAS_ERROR__POISON; else return -EINVAL; @@ -431,9 +433,10 @@ static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev, * The block is one of: umc, sdma, gfx, etc. * see ras_block_string[] for details * - * The error type is one of: ue, ce, where, + * The error type is one of: ue, ce and poison where, * ue is multi-uncorrectable * ce is single-correctable + * poison is poison * * The sub-block is a the sub-block index, pass 0 if there is no sub-block. * The address and value are hexadecimal numbers, leading 0x is optional. @@ -1067,8 +1070,7 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, mcm_info = &err_info->mcm_info; if (err_info->ce_count) { dev_info(adev->dev, "socket: %d, die: %d, " - "%lld new correctable hardware errors detected in %s block, " - "no user action is needed\n", + "%lld new correctable hardware errors detected in %s block\n", mcm_info->socket_id, mcm_info->die_id, err_info->ce_count, @@ -1080,8 +1082,7 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, err_info = &err_node->err_info; mcm_info = &err_info->mcm_info; dev_info(adev->dev, "socket: %d, die: %d, " - "%lld correctable hardware errors detected in total in %s block, " - "no user action is needed\n", + "%lld correctable hardware errors detected in total in %s block\n", mcm_info->socket_id, mcm_info->die_id, err_info->ce_count, blk_name); } } @@ -1108,16 +1109,14 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, adev->smuio.funcs->get_die_id) { dev_info(adev->dev, "socket: %d, die: %d " "%ld correctable hardware errors " - "detected in %s block, no user " - "action is needed.\n", + "detected in %s block\n", adev->smuio.funcs->get_socket_id(adev), adev->smuio.funcs->get_die_id(adev), ras_mgr->err_data.ce_count, blk_name); } else { dev_info(adev->dev, "%ld correctable hardware errors " - "detected in %s block, no user " - "action is needed.\n", + "detected in %s block\n", ras_mgr->err_data.ce_count, blk_name); } @@ -1156,8 +1155,10 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s for_each_ras_error(err_node, err_data) { err_info = &err_node->err_info; - amdgpu_ras_error_statistic_ce_count(&obj->err_data, &err_info->mcm_info, err_info->ce_count); - amdgpu_ras_error_statistic_ue_count(&obj->err_data, &err_info->mcm_info, err_info->ue_count); + amdgpu_ras_error_statistic_ce_count(&obj->err_data, + &err_info->mcm_info, NULL, err_info->ce_count); + amdgpu_ras_error_statistic_ue_count(&obj->err_data, + &err_info->mcm_info, NULL, err_info->ue_count); } } else { /* for legacy asic path which doesn't has error source info */ @@ -1918,7 +1919,7 @@ static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj struct amdgpu_iv_entry *entry) { dev_info(obj->adev->dev, - "Poison is created, no user action is needed.\n"); + "Poison is created\n"); } static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj, @@ -2541,7 +2542,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev) return 0; data = &con->eh_data; - *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO); + *data = kzalloc(sizeof(**data), GFP_KERNEL); if (!*data) { ret = -ENOMEM; goto out; @@ -2828,10 +2829,10 @@ int amdgpu_ras_init(struct amdgpu_device *adev) if (con) return 0; - con = kmalloc(sizeof(struct amdgpu_ras) + + con = kzalloc(sizeof(*con) + sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT + sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT, - GFP_KERNEL|__GFP_ZERO); + GFP_KERNEL); if (!con) return -ENOMEM; @@ -2918,6 +2919,11 @@ int amdgpu_ras_init(struct amdgpu_device *adev) amdgpu_ras_query_poison_mode(adev); + /* Packed socket_id to ras feature mask bits[31:29] */ + if (adev->smuio.funcs && + adev->smuio.funcs->get_socket_id) + con->features |= ((adev->smuio.funcs->get_socket_id(adev)) << 29); + /* Get RAS schema for particular SOC */ con->schema = amdgpu_get_ras_schema(adev); @@ -3136,6 +3142,8 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) return 0; + amdgpu_ras_set_mca_debug_mode(adev, false); + list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { if (!node->ras_obj) { dev_warn(adev->dev, "Warning: abnormal ras list node.\n"); @@ -3409,12 +3417,18 @@ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) return 0; } -void amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable) +int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + int ret = 0; - if (con) - con->is_mca_debug_mode = enable; + if (con) { + ret = amdgpu_mca_smu_set_debug_mode(adev, enable); + if (!ret) + con->is_mca_debug_mode = enable; + } + + return ret; } bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev) @@ -3685,7 +3699,8 @@ static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct } static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data, - struct amdgpu_smuio_mcm_config_info *mcm_info) + struct amdgpu_smuio_mcm_config_info *mcm_info, + struct ras_err_addr *err_addr) { struct ras_err_node *err_node; @@ -3699,6 +3714,9 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info)); + if (err_addr) + memcpy(&err_node->err_info.err_addr, err_addr, sizeof(*err_addr)); + err_data->err_list_count++; list_add_tail(&err_node->node, &err_data->err_node_list); list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp); @@ -3707,7 +3725,8 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d } int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data, - struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count) + struct amdgpu_smuio_mcm_config_info *mcm_info, + struct ras_err_addr *err_addr, u64 count) { struct ras_err_info *err_info; @@ -3717,7 +3736,7 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data, if (!count) return 0; - err_info = amdgpu_ras_error_get_info(err_data, mcm_info); + err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr); if (!err_info) return -EINVAL; @@ -3728,7 +3747,8 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data, } int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data, - struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count) + struct amdgpu_smuio_mcm_config_info *mcm_info, + struct ras_err_addr *err_addr, u64 count) { struct ras_err_info *err_info; @@ -3738,7 +3758,7 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data, if (!count) return 0; - err_info = amdgpu_ras_error_get_info(err_data, mcm_info); + err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr); if (!err_info) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 19161916ac..76fb856287 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -452,10 +452,17 @@ struct ras_fs_data { char debugfs_name[32]; }; +struct ras_err_addr { + uint64_t err_status; + uint64_t err_ipid; + uint64_t err_addr; +}; + struct ras_err_info { struct amdgpu_smuio_mcm_config_info mcm_info; u64 ce_count; u64 ue_count; + struct ras_err_addr err_addr; }; struct ras_err_node { @@ -773,7 +780,7 @@ struct amdgpu_ras* amdgpu_ras_get_context(struct amdgpu_device *adev); int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con); -void amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable); +int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable); bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev); bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev, unsigned int *mode); @@ -806,8 +813,10 @@ void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev, int amdgpu_ras_error_data_init(struct ras_err_data *err_data); void amdgpu_ras_error_data_fini(struct ras_err_data *err_data); int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data, - struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count); + struct amdgpu_smuio_mcm_config_info *mcm_info, + struct ras_err_addr *err_addr, u64 count); int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data, - struct amdgpu_smuio_mcm_config_info *mcm_info, u64 count); + struct amdgpu_smuio_mcm_config_info *mcm_info, + struct ras_err_addr *err_addr, u64 count); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 4e526d7730..06f0a6534a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -647,6 +647,7 @@ int amdgpu_ring_test_helper(struct amdgpu_ring *ring) ring->name); ring->sched.ready = !r; + return r; } @@ -729,3 +730,14 @@ void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring) if (ring->is_sw_ring) amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE); } + +bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring) +{ + if (!ring) + return false; + + if (ring->no_scheduler || !drm_sched_wqueue_ready(&ring->sched)) + return false; + + return true; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index bbb53720a0..fe1a61eb6e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -450,5 +450,5 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, int amdgpu_ib_pool_init(struct amdgpu_device *adev); void amdgpu_ib_pool_fini(struct amdgpu_device *adev); int amdgpu_ib_ring_tests(struct amdgpu_device *adev); - +bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c index 35e0ae9aca..2c3675d916 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c @@ -531,13 +531,12 @@ int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev, if (version_major == 2 && version_minor == 1) adev->gfx.rlc.is_rlc_v2_1 = true; - if (version_minor >= 0) { - err = amdgpu_gfx_rlc_init_microcode_v2_0(adev); - if (err) { - dev_err(adev->dev, "fail to init rlc v2_0 microcode\n"); - return err; - } + err = amdgpu_gfx_rlc_init_microcode_v2_0(adev); + if (err) { + dev_err(adev->dev, "fail to init rlc v2_0 microcode\n"); + return err; } + if (version_minor >= 1) amdgpu_gfx_rlc_init_microcode_v2_1(adev); if (version_minor >= 2) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c new file mode 100644 index 0000000000..7a6a672754 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_seq64.h" + +#include + +/** + * DOC: amdgpu_seq64 + * + * amdgpu_seq64 allocates a 64bit memory on each request in sequence order. + * seq64 driver is required for user queue fence memory allocation, TLB + * counters and VM updates. It has maximum count of 32768 64 bit slots. + */ + +/** + * amdgpu_seq64_map - Map the seq64 memory to VM + * + * @adev: amdgpu_device pointer + * @vm: vm pointer + * @bo_va: bo_va pointer + * @seq64_addr: seq64 vaddr start address + * @size: seq64 pool size + * + * Map the seq64 memory to the given VM. + * + * Returns: + * 0 on success or a negative error code on failure + */ +int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_bo_va **bo_va, u64 seq64_addr, + uint32_t size) +{ + struct amdgpu_bo *bo; + struct drm_exec exec; + int r; + + bo = adev->seq64.sbo; + if (!bo) + return -EINVAL; + + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); + drm_exec_until_all_locked(&exec) { + r = amdgpu_vm_lock_pd(vm, &exec, 0); + if (likely(!r)) + r = drm_exec_lock_obj(&exec, &bo->tbo.base); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto error; + } + + *bo_va = amdgpu_vm_bo_add(adev, vm, bo); + if (!*bo_va) { + r = -ENOMEM; + goto error; + } + + r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, size, + AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | + AMDGPU_PTE_EXECUTABLE); + if (r) { + DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r); + amdgpu_vm_bo_del(adev, *bo_va); + goto error; + } + + r = amdgpu_vm_bo_update(adev, *bo_va, false); + if (r) { + DRM_ERROR("failed to do vm_bo_update on userq sem\n"); + amdgpu_vm_bo_del(adev, *bo_va); + goto error; + } + +error: + drm_exec_fini(&exec); + return r; +} + +/** + * amdgpu_seq64_unmap - Unmap the seq64 memory + * + * @adev: amdgpu_device pointer + * @fpriv: DRM file private + * + * Unmap the seq64 memory from the given VM. + */ +void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv) +{ + struct amdgpu_vm *vm; + struct amdgpu_bo *bo; + struct drm_exec exec; + int r; + + if (!fpriv->seq64_va) + return; + + bo = adev->seq64.sbo; + if (!bo) + return; + + vm = &fpriv->vm; + + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); + drm_exec_until_all_locked(&exec) { + r = amdgpu_vm_lock_pd(vm, &exec, 0); + if (likely(!r)) + r = drm_exec_lock_obj(&exec, &bo->tbo.base); + drm_exec_retry_on_contention(&exec); + if (unlikely(r)) + goto error; + } + + amdgpu_vm_bo_del(adev, fpriv->seq64_va); + + fpriv->seq64_va = NULL; + +error: + drm_exec_fini(&exec); +} + +/** + * amdgpu_seq64_alloc - Allocate a 64 bit memory + * + * @adev: amdgpu_device pointer + * @gpu_addr: allocated gpu VA start address + * @cpu_addr: allocated cpu VA start address + * + * Alloc a 64 bit memory from seq64 pool. + * + * Returns: + * 0 on success or a negative error code on failure + */ +int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr, + u64 **cpu_addr) +{ + unsigned long bit_pos; + u32 offset; + + bit_pos = find_first_zero_bit(adev->seq64.used, adev->seq64.num_sem); + + if (bit_pos < adev->seq64.num_sem) { + __set_bit(bit_pos, adev->seq64.used); + offset = bit_pos << 6; /* convert to qw offset */ + } else { + return -EINVAL; + } + + *gpu_addr = offset + AMDGPU_SEQ64_VADDR_START; + *cpu_addr = offset + adev->seq64.cpu_base_addr; + + return 0; +} + +/** + * amdgpu_seq64_free - Free the given 64 bit memory + * + * @adev: amdgpu_device pointer + * @gpu_addr: gpu start address to be freed + * + * Free the given 64 bit memory from seq64 pool. + * + */ +void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr) +{ + u32 offset; + + offset = gpu_addr - AMDGPU_SEQ64_VADDR_START; + + offset >>= 6; + if (offset < adev->seq64.num_sem) + __clear_bit(offset, adev->seq64.used); +} + +/** + * amdgpu_seq64_fini - Cleanup seq64 driver + * + * @adev: amdgpu_device pointer + * + * Free the memory space allocated for seq64. + * + */ +void amdgpu_seq64_fini(struct amdgpu_device *adev) +{ + amdgpu_bo_free_kernel(&adev->seq64.sbo, + NULL, + (void **)&adev->seq64.cpu_base_addr); +} + +/** + * amdgpu_seq64_init - Initialize seq64 driver + * + * @adev: amdgpu_device pointer + * + * Allocate the required memory space for seq64. + * + * Returns: + * 0 on success or a negative error code on failure + */ +int amdgpu_seq64_init(struct amdgpu_device *adev) +{ + int r; + + if (adev->seq64.sbo) + return 0; + + /* + * AMDGPU_MAX_SEQ64_SLOTS * sizeof(u64) * 8 = AMDGPU_MAX_SEQ64_SLOTS + * 64bit slots + */ + r = amdgpu_bo_create_kernel(adev, AMDGPU_SEQ64_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, + &adev->seq64.sbo, NULL, + (void **)&adev->seq64.cpu_base_addr); + if (r) { + dev_warn(adev->dev, "(%d) create seq64 failed\n", r); + return r; + } + + memset(adev->seq64.cpu_base_addr, 0, AMDGPU_SEQ64_SIZE); + + adev->seq64.num_sem = AMDGPU_MAX_SEQ64_SLOTS; + memset(&adev->seq64.used, 0, sizeof(adev->seq64.used)); + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h new file mode 100644 index 0000000000..2196e72be5 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_SEQ64_H__ +#define __AMDGPU_SEQ64_H__ + +#define AMDGPU_SEQ64_SIZE (2ULL << 20) +#define AMDGPU_MAX_SEQ64_SLOTS (AMDGPU_SEQ64_SIZE / (sizeof(u64) * 8)) +#define AMDGPU_SEQ64_VADDR_OFFSET 0x50000 +#define AMDGPU_SEQ64_VADDR_START (AMDGPU_VA_RESERVED_SIZE + AMDGPU_SEQ64_VADDR_OFFSET) + +struct amdgpu_seq64 { + struct amdgpu_bo *sbo; + u32 num_sem; + u64 *cpu_base_addr; + DECLARE_BITMAP(used, AMDGPU_MAX_SEQ64_SLOTS); +}; + +void amdgpu_seq64_fini(struct amdgpu_device *adev); +int amdgpu_seq64_init(struct amdgpu_device *adev); +int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr, u64 **cpu_addr); +void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr); +int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct amdgpu_bo_va **bo_va, u64 seq64_addr, uint32_t size); +void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv); + +#endif + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 2fd1bfb359..f539b1d002 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -554,6 +554,21 @@ TRACE_EVENT(amdgpu_reset_reg_dumps, __entry->value) ); +TRACE_EVENT(amdgpu_runpm_reference_dumps, + TP_PROTO(uint32_t index, const char *func), + TP_ARGS(index, func), + TP_STRUCT__entry( + __field(uint32_t, index) + __string(func, func) + ), + TP_fast_assign( + __entry->index = index; + __assign_str(func, func); + ), + TP_printk("amdgpu runpm reference dump 0x%x: 0x%s\n", + __entry->index, + __get_str(func)) +); #undef AMDGPU_JOB_GET_TIMELINE_NAME #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index b0ed10f4de..851509c6e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -137,7 +137,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && - amdgpu_bo_in_cpu_visible_vram(abo)) { + amdgpu_res_cpu_visible(adev, bo->resource)) { /* Try evicting to the CPU inaccessible part of VRAM * first, but only set GTT as busy placement, so this @@ -408,40 +408,55 @@ error: return r; } -/* - * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy +/** + * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU + * @adev: amdgpu device + * @res: the resource to check * - * Called by amdgpu_bo_move() + * Returns: true if the full resource is CPU visible, false otherwise. */ -static bool amdgpu_mem_visible(struct amdgpu_device *adev, - struct ttm_resource *mem) +bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, + struct ttm_resource *res) { - u64 mem_size = (u64)mem->size; struct amdgpu_res_cursor cursor; - u64 end; - if (mem->mem_type == TTM_PL_SYSTEM || - mem->mem_type == TTM_PL_TT) + if (!res) + return false; + + if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT || + res->mem_type == AMDGPU_PL_PREEMPT) return true; - if (mem->mem_type != TTM_PL_VRAM) + + if (res->mem_type != TTM_PL_VRAM) return false; - amdgpu_res_first(mem, 0, mem_size, &cursor); - end = cursor.start + cursor.size; + amdgpu_res_first(res, 0, res->size, &cursor); while (cursor.remaining) { + if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size) + return false; amdgpu_res_next(&cursor, cursor.size); + } - if (!cursor.remaining) - break; + return true; +} - /* ttm_resource_ioremap only supports contiguous memory */ - if (end != cursor.start) - return false; +/* + * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy + * + * Called by amdgpu_bo_move() + */ +static bool amdgpu_res_copyable(struct amdgpu_device *adev, + struct ttm_resource *mem) +{ + if (!amdgpu_res_cpu_visible(adev, mem)) + return false; - end = cursor.start + cursor.size; - } + /* ttm_resource_ioremap only supports contiguous memory */ + if (mem->mem_type == TTM_PL_VRAM && + !(mem->placement & TTM_PL_FLAG_CONTIGUOUS)) + return false; - return end <= adev->gmc.visible_vram_size; + return true; } /* @@ -534,8 +549,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, if (r) { /* Check that all memory is CPU accessible */ - if (!amdgpu_mem_visible(adev, old_mem) || - !amdgpu_mem_visible(adev, new_mem)) { + if (!amdgpu_res_copyable(adev, old_mem) || + !amdgpu_res_copyable(adev, new_mem)) { pr_err("Move buffer fallback to memcpy unavailable\n"); return r; } @@ -562,7 +577,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); - size_t bus_size = (size_t)mem->size; switch (mem->mem_type) { case TTM_PL_SYSTEM: @@ -573,9 +587,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, break; case TTM_PL_VRAM: mem->bus.offset = mem->start << PAGE_SHIFT; - /* check if it's visible */ - if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size) - return -EINVAL; if (adev->mman.aper_base_kaddr && mem->placement & TTM_PL_FLAG_CONTIGUOUS) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 65ec82141a..32cf6b6f6e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -139,6 +139,9 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, uint64_t start); +bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, + struct ttm_resource *res); + int amdgpu_ttm_init(struct amdgpu_device *adev); void amdgpu_ttm_fini(struct amdgpu_device *adev); void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 0efb2568cb..3e12763e47 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -1062,7 +1062,8 @@ int amdgpu_ucode_create_bo(struct amdgpu_device *adev) { if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) { amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE, - amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, + (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ? + AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, &adev->firmware.fw_buf, &adev->firmware.fw_buf_mc, &adev->firmware.fw_buf_ptr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c index ca45ba8ac1..1b5ef32108 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c @@ -86,7 +86,7 @@ static int map_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm, amdgpu_sync_create(&sync); - drm_exec_init(&exec, 0); + drm_exec_init(&exec, 0, 0); drm_exec_until_all_locked(&exec) { r = drm_exec_lock_obj(&exec, &bo->tbo.base); drm_exec_retry_on_contention(&exec); @@ -149,7 +149,7 @@ static int unmap_ring_data(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct drm_exec exec; long r; - drm_exec_init(&exec, 0); + drm_exec_init(&exec, 0, 0); drm_exec_until_all_locked(&exec) { r = drm_exec_lock_obj(&exec, &bo->tbo.base); drm_exec_retry_on_contention(&exec); @@ -766,6 +766,9 @@ static int umsch_mm_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (amdgpu_in_reset(adev) || adev->in_s0ix || adev->in_suspend) + return 0; + return umsch_mm_test(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 3a632c3b1a..0dcff2889e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -1099,7 +1099,8 @@ bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev) { bool xnack_mode = true; - if (amdgpu_sriov_vf(adev) && adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) + if (amdgpu_sriov_vf(adev) && + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2)) xnack_mode = false; return xnack_mode; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index db6fc0cb18..453a4b786c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ #include +#include #include #include diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 5baefb548a..5b50f73591 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1438,6 +1438,51 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, return 0; } +/** + * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM + * + * @adev: amdgpu_device pointer + * @vm: requested vm + * @flush_type: flush type + * @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush. + * + * Flush TLB if needed for a compute VM. + * + * Returns: + * 0 for success. + */ +int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + uint32_t flush_type, + uint32_t xcc_mask) +{ + uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm); + bool all_hub = false; + int xcc = 0, r = 0; + + WARN_ON_ONCE(!vm->is_compute_context); + + /* + * It can be that we race and lose here, but that is extremely unlikely + * and the worst thing which could happen is that we flush the changes + * into the TLB once more which is harmless. + */ + if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq) + return 0; + + if (adev->family == AMDGPU_FAMILY_AI || + adev->family == AMDGPU_FAMILY_RV) + all_hub = true; + + for_each_inst(xcc, xcc_mask) { + r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type, + all_hub, xcc); + if (r) + break; + } + return r; +} + /** * amdgpu_vm_bo_add - add a bo to a specific vm * @@ -1514,6 +1559,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, trace_amdgpu_vm_bo_map(bo_va, mapping); } +/* Validate operation parameters to prevent potential abuse */ +static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev, + struct amdgpu_bo *bo, + uint64_t saddr, + uint64_t offset, + uint64_t size) +{ + uint64_t tmp, lpfn; + + if (saddr & AMDGPU_GPU_PAGE_MASK + || offset & AMDGPU_GPU_PAGE_MASK + || size & AMDGPU_GPU_PAGE_MASK) + return -EINVAL; + + if (check_add_overflow(saddr, size, &tmp) + || check_add_overflow(offset, size, &tmp) + || size == 0 /* which also leads to end < begin */) + return -EINVAL; + + /* make sure object fit at this offset */ + if (bo && offset + size > amdgpu_bo_size(bo)) + return -EINVAL; + + /* Ensure last pfn not exceed max_pfn */ + lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT; + if (lpfn >= adev->vm_manager.max_pfn) + return -EINVAL; + + return 0; +} + /** * amdgpu_vm_bo_map - map bo inside a vm * @@ -1540,21 +1616,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_vm *vm = bo_va->base.vm; uint64_t eaddr; + int r; - /* validate the parameters */ - if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) - return -EINVAL; - if (saddr + size <= saddr || offset + size <= offset) - return -EINVAL; - - /* make sure object fit at this offset */ - eaddr = saddr + size - 1; - if ((bo && offset + size > amdgpu_bo_size(bo)) || - (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) - return -EINVAL; + r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size); + if (r) + return r; saddr /= AMDGPU_GPU_PAGE_SIZE; - eaddr /= AMDGPU_GPU_PAGE_SIZE; + eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); if (tmp) { @@ -1607,17 +1676,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, uint64_t eaddr; int r; - /* validate the parameters */ - if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) - return -EINVAL; - if (saddr + size <= saddr || offset + size <= offset) - return -EINVAL; - - /* make sure object fit at this offset */ - eaddr = saddr + size - 1; - if ((bo && offset + size > amdgpu_bo_size(bo)) || - (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) - return -EINVAL; + r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size); + if (r) + return r; /* Allocate all the needed memory */ mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); @@ -1631,7 +1692,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, } saddr /= AMDGPU_GPU_PAGE_SIZE; - eaddr /= AMDGPU_GPU_PAGE_SIZE; + eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; mapping->start = saddr; mapping->last = eaddr; @@ -1718,10 +1779,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; LIST_HEAD(removed); uint64_t eaddr; + int r; + + r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size); + if (r) + return r; - eaddr = saddr + size - 1; saddr /= AMDGPU_GPU_PAGE_SIZE; - eaddr /= AMDGPU_GPU_PAGE_SIZE; + eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; /* Allocate all the needed memory */ before = kzalloc(sizeof(*before), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 2cd86d2bf7..4740dd65b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -116,7 +116,7 @@ struct amdgpu_mem_stats; #define AMDGPU_VM_FAULT_STOP_FIRST 1 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 -/* Reserve 4MB VRAM for page tables */ +/* How much VRAM be reserved for page tables */ #define AMDGPU_VM_RESERVED_VRAM (8ULL << 20) /* @@ -324,6 +324,7 @@ struct amdgpu_vm { /* Last finished delayed update */ atomic64_t tlb_seq; struct dma_fence *last_tlb_flush; + atomic64_t kfd_last_flushed_seq; /* How many times we had to re-generate the page tables */ uint64_t generation; @@ -445,6 +446,10 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, int amdgpu_vm_handle_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); +int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + uint32_t flush_type, + uint32_t xcc_mask); void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, struct amdgpu_vm *vm, struct amdgpu_bo *bo); int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c index e81579708e..ad44012cc0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c @@ -26,6 +26,7 @@ #include "amdgpu.h" #include "amdgpu_ucode.h" #include "amdgpu_vpe.h" +#include "amdgpu_smu.h" #include "soc15_common.h" #include "vpe_v6_1.h" @@ -33,8 +34,180 @@ /* VPE CSA resides in the 4th page of CSA */ #define AMDGPU_CSA_VPE_OFFSET (4096 * 3) +/* 1 second timeout */ +#define VPE_IDLE_TIMEOUT msecs_to_jiffies(1000) + +#define VPE_MAX_DPM_LEVEL 4 +#define FIXED1_8_BITS_PER_FRACTIONAL_PART 8 +#define GET_PRATIO_INTEGER_PART(x) ((x) >> FIXED1_8_BITS_PER_FRACTIONAL_PART) + static void vpe_set_ring_funcs(struct amdgpu_device *adev); +static inline uint16_t div16_u16_rem(uint16_t dividend, uint16_t divisor, uint16_t *remainder) +{ + *remainder = dividend % divisor; + return dividend / divisor; +} + +static inline uint16_t complete_integer_division_u16( + uint16_t dividend, + uint16_t divisor, + uint16_t *remainder) +{ + return div16_u16_rem(dividend, divisor, (uint16_t *)remainder); +} + +static uint16_t vpe_u1_8_from_fraction(uint16_t numerator, uint16_t denominator) +{ + u16 arg1_value = numerator; + u16 arg2_value = denominator; + + uint16_t remainder; + + /* determine integer part */ + uint16_t res_value = complete_integer_division_u16( + arg1_value, arg2_value, &remainder); + + if (res_value > 127 /* CHAR_MAX */) + return 0; + + /* determine fractional part */ + { + unsigned int i = FIXED1_8_BITS_PER_FRACTIONAL_PART; + + do { + remainder <<= 1; + + res_value <<= 1; + + if (remainder >= arg2_value) { + res_value |= 1; + remainder -= arg2_value; + } + } while (--i != 0); + } + + /* round up LSB */ + { + uint16_t summand = (remainder << 1) >= arg2_value; + + if ((res_value + summand) > 32767 /* SHRT_MAX */) + return 0; + + res_value += summand; + } + + return res_value; +} + +static uint16_t vpe_internal_get_pratio(uint16_t from_frequency, uint16_t to_frequency) +{ + uint16_t pratio = vpe_u1_8_from_fraction(from_frequency, to_frequency); + + if (GET_PRATIO_INTEGER_PART(pratio) > 1) + pratio = 0; + + return pratio; +} + +/* + * VPE has 4 DPM levels from level 0 (lowerest) to 3 (highest), + * VPE FW will dynamically decide which level should be used according to current loading. + * + * Get VPE and SOC clocks from PM, and select the appropriate four clock values, + * calculate the ratios of adjusting from one clock to another. + * The VPE FW can then request the appropriate frequency from the PMFW. + */ +int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe) +{ + struct amdgpu_device *adev = vpe->ring.adev; + uint32_t dpm_ctl; + + if (adev->pm.dpm_enabled) { + struct dpm_clocks clock_table = { 0 }; + struct dpm_clock *VPEClks; + struct dpm_clock *SOCClks; + uint32_t idx; + uint32_t pratio_vmax_vnorm = 0, pratio_vnorm_vmid = 0, pratio_vmid_vmin = 0; + uint16_t pratio_vmin_freq = 0, pratio_vmid_freq = 0, pratio_vnorm_freq = 0, pratio_vmax_freq = 0; + + dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable)); + dpm_ctl |= 1; /* DPM enablement */ + WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl); + + /* Get VPECLK and SOCCLK */ + if (amdgpu_dpm_get_dpm_clock_table(adev, &clock_table)) { + dev_dbg(adev->dev, "%s: get clock failed!\n", __func__); + goto disable_dpm; + } + + SOCClks = clock_table.SocClocks; + VPEClks = clock_table.VPEClocks; + + /* vpe dpm only cares 4 levels. */ + for (idx = 0; idx < VPE_MAX_DPM_LEVEL; idx++) { + uint32_t soc_dpm_level; + uint32_t min_freq; + + if (idx == 0) + soc_dpm_level = 0; + else + soc_dpm_level = (idx * 2) + 1; + + /* clamp the max level */ + if (soc_dpm_level > PP_SMU_NUM_VPECLK_DPM_LEVELS - 1) + soc_dpm_level = PP_SMU_NUM_VPECLK_DPM_LEVELS - 1; + + min_freq = (SOCClks[soc_dpm_level].Freq < VPEClks[soc_dpm_level].Freq) ? + SOCClks[soc_dpm_level].Freq : VPEClks[soc_dpm_level].Freq; + + switch (idx) { + case 0: + pratio_vmin_freq = min_freq; + break; + case 1: + pratio_vmid_freq = min_freq; + break; + case 2: + pratio_vnorm_freq = min_freq; + break; + case 3: + pratio_vmax_freq = min_freq; + break; + default: + break; + } + } + + if (pratio_vmin_freq && pratio_vmid_freq && pratio_vnorm_freq && pratio_vmax_freq) { + uint32_t pratio_ctl; + + pratio_vmax_vnorm = (uint32_t)vpe_internal_get_pratio(pratio_vmax_freq, pratio_vnorm_freq); + pratio_vnorm_vmid = (uint32_t)vpe_internal_get_pratio(pratio_vnorm_freq, pratio_vmid_freq); + pratio_vmid_vmin = (uint32_t)vpe_internal_get_pratio(pratio_vmid_freq, pratio_vmin_freq); + + pratio_ctl = pratio_vmax_vnorm | (pratio_vnorm_vmid << 9) | (pratio_vmid_vmin << 18); + WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_pratio), pratio_ctl); /* PRatio */ + WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_interval), 24000); /* 1ms, unit=1/24MHz */ + WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_decision_threshold), 1200000); /* 50ms */ + WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_busy_clamp_threshold), 1200000);/* 50ms */ + WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_idle_clamp_threshold), 1200000);/* 50ms */ + dev_dbg(adev->dev, "%s: configure vpe dpm pratio done!\n", __func__); + } else { + dev_dbg(adev->dev, "%s: invalid pratio parameters!\n", __func__); + goto disable_dpm; + } + } + return 0; + +disable_dpm: + dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable)); + dpm_ctl &= 0xfffffffe; /* Disable DPM */ + WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl); + dev_dbg(adev->dev, "%s: disable vpe dpm\n", __func__); + return 0; +} + int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev) { struct amdgpu_firmware_info ucode = { @@ -134,6 +307,19 @@ static int vpe_early_init(void *handle) return 0; } +static void vpe_idle_work_handler(struct work_struct *work) +{ + struct amdgpu_device *adev = + container_of(work, struct amdgpu_device, vpe.idle_work.work); + unsigned int fences = 0; + + fences += amdgpu_fence_count_emitted(&adev->vpe.ring); + + if (fences == 0) + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE); + else + schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); +} static int vpe_common_init(struct amdgpu_vpe *vpe) { @@ -150,6 +336,9 @@ static int vpe_common_init(struct amdgpu_vpe *vpe) return r; } + vpe->context_started = false; + INIT_DELAYED_WORK(&adev->vpe.idle_work, vpe_idle_work_handler); + return 0; } @@ -201,6 +390,12 @@ static int vpe_hw_init(void *handle) struct amdgpu_vpe *vpe = &adev->vpe; int ret; + /* Power on VPE */ + ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, + AMD_PG_STATE_UNGATE); + if (ret) + return ret; + ret = vpe_load_microcode(vpe); if (ret) return ret; @@ -219,6 +414,9 @@ static int vpe_hw_fini(void *handle) vpe_ring_stop(vpe); + /* Power off VPE */ + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE); + return 0; } @@ -226,6 +424,8 @@ static int vpe_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_delayed_work_sync(&adev->vpe.idle_work); + return vpe_hw_fini(adev); } @@ -430,6 +630,21 @@ static int vpe_set_clockgating_state(void *handle, static int vpe_set_powergating_state(void *handle, enum amd_powergating_state state) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_vpe *vpe = &adev->vpe; + + if (!adev->pm.dpm_enabled) + dev_err(adev->dev, "Without PM, cannot support powergating\n"); + + dev_dbg(adev->dev, "%s: %s!\n", __func__, (state == AMD_PG_STATE_GATE) ? "GATE":"UNGATE"); + + if (state == AMD_PG_STATE_GATE) { + amdgpu_dpm_enable_vpe(adev, false); + vpe->context_started = false; + } else { + amdgpu_dpm_enable_vpe(adev, true); + } + return 0; } @@ -595,6 +810,38 @@ err0: return ret; } +static void vpe_ring_begin_use(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_vpe *vpe = &adev->vpe; + + cancel_delayed_work_sync(&adev->vpe.idle_work); + + /* Power on VPE and notify VPE of new context */ + if (!vpe->context_started) { + uint32_t context_notify; + + /* Power on VPE */ + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_UNGATE); + + /* Indicates that a job from a new context has been submitted. */ + context_notify = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator)); + if ((context_notify & 0x1) == 0) + context_notify |= 0x1; + else + context_notify &= ~(0x1); + WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator), context_notify); + vpe->context_started = true; + } +} + +static void vpe_ring_end_use(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); +} + static const struct amdgpu_ring_funcs vpe_ring_funcs = { .type = AMDGPU_RING_TYPE_VPE, .align_mask = 0xf, @@ -625,6 +872,8 @@ static const struct amdgpu_ring_funcs vpe_ring_funcs = { .init_cond_exec = vpe_ring_init_cond_exec, .patch_cond_exec = vpe_ring_patch_cond_exec, .preempt_ib = vpe_ring_preempt_ib, + .begin_use = vpe_ring_begin_use, + .end_use = vpe_ring_end_use, }; static void vpe_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h index 29d56f7ae4..1153ddaea6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h @@ -47,6 +47,15 @@ struct vpe_regs { uint32_t queue0_rb_wptr_lo; uint32_t queue0_rb_wptr_hi; uint32_t queue0_preempt; + + uint32_t dpm_enable; + uint32_t dpm_pratio; + uint32_t dpm_request_interval; + uint32_t dpm_decision_threshold; + uint32_t dpm_busy_clamp_threshold; + uint32_t dpm_idle_clamp_threshold; + uint32_t dpm_request_lv; + uint32_t context_indicator; }; struct amdgpu_vpe { @@ -63,12 +72,15 @@ struct amdgpu_vpe { struct amdgpu_bo *cmdbuf_obj; uint64_t cmdbuf_gpu_addr; uint32_t *cmdbuf_cpu_addr; + struct delayed_work idle_work; + bool context_started; }; int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev); int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe); int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe); int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe); +int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe); #define vpe_ring_init(vpe) ((vpe)->funcs->ring_init ? (vpe)->funcs->ring_init((vpe)) : 0) #define vpe_ring_start(vpe) ((vpe)->funcs->ring_start ? (vpe)->funcs->ring_start((vpe)) : 0) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index bd20cb3b98..a6c88f2fe6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -413,6 +413,38 @@ static ssize_t amdgpu_xgmi_show_num_links(struct device *dev, return sysfs_emit(buf, "%s\n", buf); } +static ssize_t amdgpu_xgmi_show_connected_port_num(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info; + int i, j, size = 0; + int current_node; + /* + * get the node id in the sysfs for the current socket and show + * it in the port num info output in the sysfs for easy reading. + * it is NOT the one retrieved from xgmi ta. + */ + for (i = 0; i < top->num_nodes; i++) { + if (top->nodes[i].node_id == adev->gmc.xgmi.node_id) { + current_node = i; + break; + } + } + + for (i = 0; i < top->num_nodes; i++) { + for (j = 0; j < top->nodes[i].num_links; j++) + /* node id in sysfs starts from 1 rather than 0 so +1 here */ + size += sysfs_emit_at(buf, size, "%02x:%02x -> %02x:%02x\n", current_node + 1, + top->nodes[i].port_num[j].src_xgmi_port_num, i + 1, + top->nodes[i].port_num[j].dst_xgmi_port_num); + } + + return size; +} + #define AMDGPU_XGMI_SET_FICAA(o) ((o) | 0x456801) static ssize_t amdgpu_xgmi_show_error(struct device *dev, struct device_attribute *attr, @@ -452,6 +484,7 @@ static DEVICE_ATTR(xgmi_physical_id, 0444, amdgpu_xgmi_show_physical_id, NULL); static DEVICE_ATTR(xgmi_error, S_IRUGO, amdgpu_xgmi_show_error, NULL); static DEVICE_ATTR(xgmi_num_hops, S_IRUGO, amdgpu_xgmi_show_num_hops, NULL); static DEVICE_ATTR(xgmi_num_links, S_IRUGO, amdgpu_xgmi_show_num_links, NULL); +static DEVICE_ATTR(xgmi_port_num, S_IRUGO, amdgpu_xgmi_show_connected_port_num, NULL); static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev, struct amdgpu_hive_info *hive) @@ -487,6 +520,13 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev, if (ret) pr_err("failed to create xgmi_num_links\n"); + /* Create xgmi port num file if supported */ + if (adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG) { + ret = device_create_file(adev->dev, &dev_attr_xgmi_port_num); + if (ret) + dev_err(adev->dev, "failed to create xgmi_port_num\n"); + } + /* Create sysfs link to hive info folder on the first device */ if (hive->kobj.parent != (&adev->dev->kobj)) { ret = sysfs_create_link(&adev->dev->kobj, &hive->kobj, @@ -517,6 +557,8 @@ remove_file: device_remove_file(adev->dev, &dev_attr_xgmi_error); device_remove_file(adev->dev, &dev_attr_xgmi_num_hops); device_remove_file(adev->dev, &dev_attr_xgmi_num_links); + if (adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG) + device_remove_file(adev->dev, &dev_attr_xgmi_port_num); success: return ret; @@ -533,6 +575,8 @@ static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev, device_remove_file(adev->dev, &dev_attr_xgmi_error); device_remove_file(adev->dev, &dev_attr_xgmi_num_hops); device_remove_file(adev->dev, &dev_attr_xgmi_num_links); + if (adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG) + device_remove_file(adev->dev, &dev_attr_xgmi_port_num); if (hive->kobj.parent != (&adev->dev->kobj)) sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info"); @@ -779,6 +823,28 @@ static int amdgpu_xgmi_initialize_hive_get_data_partition(struct amdgpu_hive_inf return 0; } +static void amdgpu_xgmi_fill_topology_info(struct amdgpu_device *adev, + struct amdgpu_device *peer_adev) +{ + struct psp_xgmi_topology_info *top_info = &adev->psp.xgmi_context.top_info; + struct psp_xgmi_topology_info *peer_info = &peer_adev->psp.xgmi_context.top_info; + + for (int i = 0; i < peer_info->num_nodes; i++) { + if (peer_info->nodes[i].node_id == adev->gmc.xgmi.node_id) { + for (int j = 0; j < top_info->num_nodes; j++) { + if (top_info->nodes[j].node_id == peer_adev->gmc.xgmi.node_id) { + peer_info->nodes[i].num_hops = top_info->nodes[j].num_hops; + peer_info->nodes[i].is_sharing_enabled = + top_info->nodes[j].is_sharing_enabled; + peer_info->nodes[i].num_links = + top_info->nodes[j].num_links; + return; + } + } + } + } +} + int amdgpu_xgmi_add_device(struct amdgpu_device *adev) { struct psp_xgmi_topology_info *top_info; @@ -853,18 +919,38 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) goto exit_unlock; } - /* get latest topology info for each device from psp */ - list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { - ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, - &tmp_adev->psp.xgmi_context.top_info, false); + if (amdgpu_sriov_vf(adev) && + adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG) { + /* only get topology for VF being init if it can support full duplex */ + ret = psp_xgmi_get_topology_info(&adev->psp, count, + &adev->psp.xgmi_context.top_info, false); if (ret) { - dev_err(tmp_adev->dev, + dev_err(adev->dev, "XGMI: Get topology failure on device %llx, hive %llx, ret %d", - tmp_adev->gmc.xgmi.node_id, - tmp_adev->gmc.xgmi.hive_id, ret); - /* To do : continue with some node failed or disable the whole hive */ + adev->gmc.xgmi.node_id, + adev->gmc.xgmi.hive_id, ret); + /* To do: continue with some node failed or disable the whole hive*/ goto exit_unlock; } + + /* fill the topology info for peers instead of getting from PSP */ + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + amdgpu_xgmi_fill_topology_info(adev, tmp_adev); + } + } else { + /* get latest topology info for each device from psp */ + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, + &tmp_adev->psp.xgmi_context.top_info, false); + if (ret) { + dev_err(tmp_adev->dev, + "XGMI: Get topology failure on device %llx, hive %llx, ret %d", + tmp_adev->gmc.xgmi.node_id, + tmp_adev->gmc.xgmi.hive_id, ret); + /* To do : continue with some node failed or disable the whole hive */ + goto exit_unlock; + } + } } /* get topology again for hives that support extended data */ @@ -1227,10 +1313,10 @@ static void __xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, struct a switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) { case AMDGPU_MCA_ERROR_TYPE_UE: - amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, 1ULL); + amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, NULL, 1ULL); break; case AMDGPU_MCA_ERROR_TYPE_CE: - amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, 1ULL); + amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, NULL, 1ULL); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index 3f715e7fe1..d6f808acfb 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -24,6 +24,7 @@ #include "soc15.h" #include "soc15_common.h" +#include "amdgpu_reg_state.h" #include "amdgpu_xcp.h" #include "gfx_v9_4_3.h" #include "gfxhub_v1_2.h" @@ -656,3 +657,416 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) return 0; } + +static void aqua_read_smn(struct amdgpu_device *adev, + struct amdgpu_smn_reg_data *regdata, + uint64_t smn_addr) +{ + regdata->addr = smn_addr; + regdata->value = RREG32_PCIE(smn_addr); +} + +struct aqua_reg_list { + uint64_t start_addr; + uint32_t num_regs; + uint32_t incrx; +}; + +#define DW_ADDR_INCR 4 + +static void aqua_read_smn_ext(struct amdgpu_device *adev, + struct amdgpu_smn_reg_data *regdata, + uint64_t smn_addr, int i) +{ + regdata->addr = + smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i); + regdata->value = RREG32_PCIE_EXT(regdata->addr); +} + +#define smnreg_0x1A340218 0x1A340218 +#define smnreg_0x1A3402E4 0x1A3402E4 +#define smnreg_0x1A340294 0x1A340294 +#define smreg_0x1A380088 0x1A380088 + +#define NUM_PCIE_SMN_REGS 14 + +static struct aqua_reg_list pcie_reg_addrs[] = { + { smnreg_0x1A340218, 1, 0 }, + { smnreg_0x1A3402E4, 1, 0 }, + { smnreg_0x1A340294, 6, DW_ADDR_INCR }, + { smreg_0x1A380088, 6, DW_ADDR_INCR }, +}; + +static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev, + void *buf, size_t max_size) +{ + struct amdgpu_reg_state_pcie_v1_0 *pcie_reg_state; + uint32_t start_addr, incrx, num_regs, szbuf; + struct amdgpu_regs_pcie_v1_0 *pcie_regs; + struct amdgpu_smn_reg_data *reg_data; + struct pci_dev *us_pdev, *ds_pdev; + int aer_cap, r, n; + + if (!buf || !max_size) + return -EINVAL; + + pcie_reg_state = (struct amdgpu_reg_state_pcie_v1_0 *)buf; + + szbuf = sizeof(*pcie_reg_state) + + amdgpu_reginst_size(1, sizeof(*pcie_regs), NUM_PCIE_SMN_REGS); + /* Only one instance of pcie regs */ + if (max_size < szbuf) + return -EOVERFLOW; + + pcie_regs = (struct amdgpu_regs_pcie_v1_0 *)((uint8_t *)buf + + sizeof(*pcie_reg_state)); + pcie_regs->inst_header.instance = 0; + pcie_regs->inst_header.state = AMDGPU_INST_S_OK; + pcie_regs->inst_header.num_smn_regs = NUM_PCIE_SMN_REGS; + + reg_data = pcie_regs->smn_reg_values; + + for (r = 0; r < ARRAY_SIZE(pcie_reg_addrs); r++) { + start_addr = pcie_reg_addrs[r].start_addr; + incrx = pcie_reg_addrs[r].incrx; + num_regs = pcie_reg_addrs[r].num_regs; + for (n = 0; n < num_regs; n++) { + aqua_read_smn(adev, reg_data, start_addr + n * incrx); + ++reg_data; + } + } + + ds_pdev = pci_upstream_bridge(adev->pdev); + us_pdev = pci_upstream_bridge(ds_pdev); + + pcie_capability_read_word(us_pdev, PCI_EXP_DEVSTA, + &pcie_regs->device_status); + pcie_capability_read_word(us_pdev, PCI_EXP_LNKSTA, + &pcie_regs->link_status); + + aer_cap = pci_find_ext_capability(us_pdev, PCI_EXT_CAP_ID_ERR); + if (aer_cap) { + pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_COR_STATUS, + &pcie_regs->pcie_corr_err_status); + pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_UNCOR_STATUS, + &pcie_regs->pcie_uncorr_err_status); + } + + pci_read_config_dword(us_pdev, PCI_PRIMARY_BUS, + &pcie_regs->sub_bus_number_latency); + + pcie_reg_state->common_header.structure_size = szbuf; + pcie_reg_state->common_header.format_revision = 1; + pcie_reg_state->common_header.content_revision = 0; + pcie_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_PCIE; + pcie_reg_state->common_header.num_instances = 1; + + return pcie_reg_state->common_header.structure_size; +} + +#define smnreg_0x11A00050 0x11A00050 +#define smnreg_0x11A00180 0x11A00180 +#define smnreg_0x11A00070 0x11A00070 +#define smnreg_0x11A00200 0x11A00200 +#define smnreg_0x11A0020C 0x11A0020C +#define smnreg_0x11A00210 0x11A00210 +#define smnreg_0x11A00108 0x11A00108 + +#define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20)) + +#define NUM_XGMI_SMN_REGS 25 + +static struct aqua_reg_list xgmi_reg_addrs[] = { + { smnreg_0x11A00050, 1, 0 }, + { smnreg_0x11A00180, 16, DW_ADDR_INCR }, + { smnreg_0x11A00070, 4, DW_ADDR_INCR }, + { smnreg_0x11A00200, 1, 0 }, + { smnreg_0x11A0020C, 1, 0 }, + { smnreg_0x11A00210, 1, 0 }, + { smnreg_0x11A00108, 1, 0 }, +}; + +static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev, + void *buf, size_t max_size) +{ + struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state; + uint32_t start_addr, incrx, num_regs, szbuf; + struct amdgpu_regs_xgmi_v1_0 *xgmi_regs; + struct amdgpu_smn_reg_data *reg_data; + const int max_xgmi_instances = 8; + int inst = 0, i, j, r, n; + const int xgmi_inst = 2; + void *p; + + if (!buf || !max_size) + return -EINVAL; + + xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf; + + szbuf = sizeof(*xgmi_reg_state) + + amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs), + NUM_XGMI_SMN_REGS); + /* Only one instance of pcie regs */ + if (max_size < szbuf) + return -EOVERFLOW; + + p = &xgmi_reg_state->xgmi_state_regs[0]; + for_each_inst(i, adev->aid_mask) { + for (j = 0; j < xgmi_inst; ++j) { + xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p; + xgmi_regs->inst_header.instance = inst++; + + xgmi_regs->inst_header.state = AMDGPU_INST_S_OK; + xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS; + + reg_data = xgmi_regs->smn_reg_values; + + for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) { + start_addr = xgmi_reg_addrs[r].start_addr; + incrx = xgmi_reg_addrs[r].incrx; + num_regs = xgmi_reg_addrs[r].num_regs; + + for (n = 0; n < num_regs; n++) { + aqua_read_smn_ext( + adev, reg_data, + XGMI_LINK_REG(start_addr, j) + + n * incrx, + i); + ++reg_data; + } + } + p = reg_data; + } + } + + xgmi_reg_state->common_header.structure_size = szbuf; + xgmi_reg_state->common_header.format_revision = 1; + xgmi_reg_state->common_header.content_revision = 0; + xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI; + xgmi_reg_state->common_header.num_instances = max_xgmi_instances; + + return xgmi_reg_state->common_header.structure_size; +} + +#define smnreg_0x11C00070 0x11C00070 +#define smnreg_0x11C00210 0x11C00210 + +static struct aqua_reg_list wafl_reg_addrs[] = { + { smnreg_0x11C00070, 4, DW_ADDR_INCR }, + { smnreg_0x11C00210, 1, 0 }, +}; + +#define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20)) + +#define NUM_WAFL_SMN_REGS 5 + +static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev, + void *buf, size_t max_size) +{ + struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state; + uint32_t start_addr, incrx, num_regs, szbuf; + struct amdgpu_regs_wafl_v1_0 *wafl_regs; + struct amdgpu_smn_reg_data *reg_data; + const int max_wafl_instances = 8; + int inst = 0, i, j, r, n; + const int wafl_inst = 2; + void *p; + + if (!buf || !max_size) + return -EINVAL; + + wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf; + + szbuf = sizeof(*wafl_reg_state) + + amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs), + NUM_WAFL_SMN_REGS); + + if (max_size < szbuf) + return -EOVERFLOW; + + p = &wafl_reg_state->wafl_state_regs[0]; + for_each_inst(i, adev->aid_mask) { + for (j = 0; j < wafl_inst; ++j) { + wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p; + wafl_regs->inst_header.instance = inst++; + + wafl_regs->inst_header.state = AMDGPU_INST_S_OK; + wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS; + + reg_data = wafl_regs->smn_reg_values; + + for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) { + start_addr = wafl_reg_addrs[r].start_addr; + incrx = wafl_reg_addrs[r].incrx; + num_regs = wafl_reg_addrs[r].num_regs; + for (n = 0; n < num_regs; n++) { + aqua_read_smn_ext( + adev, reg_data, + WAFL_LINK_REG(start_addr, j) + + n * incrx, + i); + ++reg_data; + } + } + p = reg_data; + } + } + + wafl_reg_state->common_header.structure_size = szbuf; + wafl_reg_state->common_header.format_revision = 1; + wafl_reg_state->common_header.content_revision = 0; + wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL; + wafl_reg_state->common_header.num_instances = max_wafl_instances; + + return wafl_reg_state->common_header.structure_size; +} + +#define smnreg_0x1B311060 0x1B311060 +#define smnreg_0x1B411060 0x1B411060 +#define smnreg_0x1B511060 0x1B511060 +#define smnreg_0x1B611060 0x1B611060 + +#define smnreg_0x1C307120 0x1C307120 +#define smnreg_0x1C317120 0x1C317120 + +#define smnreg_0x1C320830 0x1C320830 +#define smnreg_0x1C380830 0x1C380830 +#define smnreg_0x1C3D0830 0x1C3D0830 +#define smnreg_0x1C420830 0x1C420830 + +#define smnreg_0x1C320100 0x1C320100 +#define smnreg_0x1C380100 0x1C380100 +#define smnreg_0x1C3D0100 0x1C3D0100 +#define smnreg_0x1C420100 0x1C420100 + +#define smnreg_0x1B310500 0x1B310500 +#define smnreg_0x1C300400 0x1C300400 + +#define USR_CAKE_INCR 0x11000 +#define USR_LINK_INCR 0x100000 +#define USR_CP_INCR 0x10000 + +#define NUM_USR_SMN_REGS 20 + +struct aqua_reg_list usr_reg_addrs[] = { + { smnreg_0x1B311060, 4, DW_ADDR_INCR }, + { smnreg_0x1B411060, 4, DW_ADDR_INCR }, + { smnreg_0x1B511060, 4, DW_ADDR_INCR }, + { smnreg_0x1B611060, 4, DW_ADDR_INCR }, + { smnreg_0x1C307120, 2, DW_ADDR_INCR }, + { smnreg_0x1C317120, 2, DW_ADDR_INCR }, +}; + +#define NUM_USR1_SMN_REGS 46 +struct aqua_reg_list usr1_reg_addrs[] = { + { smnreg_0x1C320830, 6, USR_CAKE_INCR }, + { smnreg_0x1C380830, 5, USR_CAKE_INCR }, + { smnreg_0x1C3D0830, 5, USR_CAKE_INCR }, + { smnreg_0x1C420830, 4, USR_CAKE_INCR }, + { smnreg_0x1C320100, 6, USR_CAKE_INCR }, + { smnreg_0x1C380100, 5, USR_CAKE_INCR }, + { smnreg_0x1C3D0100, 5, USR_CAKE_INCR }, + { smnreg_0x1C420100, 4, USR_CAKE_INCR }, + { smnreg_0x1B310500, 4, USR_LINK_INCR }, + { smnreg_0x1C300400, 2, USR_CP_INCR }, +}; + +static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev, + void *buf, size_t max_size, + int reg_state) +{ + uint32_t start_addr, incrx, num_regs, szbuf, num_smn; + struct amdgpu_reg_state_usr_v1_0 *usr_reg_state; + struct amdgpu_regs_usr_v1_0 *usr_regs; + struct amdgpu_smn_reg_data *reg_data; + const int max_usr_instances = 4; + struct aqua_reg_list *reg_addrs; + int inst = 0, i, n, r, arr_size; + void *p; + + if (!buf || !max_size) + return -EINVAL; + + switch (reg_state) { + case AMDGPU_REG_STATE_TYPE_USR: + arr_size = ARRAY_SIZE(usr_reg_addrs); + reg_addrs = usr_reg_addrs; + num_smn = NUM_USR_SMN_REGS; + break; + case AMDGPU_REG_STATE_TYPE_USR_1: + arr_size = ARRAY_SIZE(usr1_reg_addrs); + reg_addrs = usr1_reg_addrs; + num_smn = NUM_USR1_SMN_REGS; + break; + default: + return -EINVAL; + } + + usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf; + + szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances, + sizeof(*usr_regs), + num_smn); + if (max_size < szbuf) + return -EOVERFLOW; + + p = &usr_reg_state->usr_state_regs[0]; + for_each_inst(i, adev->aid_mask) { + usr_regs = (struct amdgpu_regs_usr_v1_0 *)p; + usr_regs->inst_header.instance = inst++; + usr_regs->inst_header.state = AMDGPU_INST_S_OK; + usr_regs->inst_header.num_smn_regs = num_smn; + reg_data = usr_regs->smn_reg_values; + + for (r = 0; r < arr_size; r++) { + start_addr = reg_addrs[r].start_addr; + incrx = reg_addrs[r].incrx; + num_regs = reg_addrs[r].num_regs; + for (n = 0; n < num_regs; n++) { + aqua_read_smn_ext(adev, reg_data, + start_addr + n * incrx, i); + reg_data++; + } + } + p = reg_data; + } + + usr_reg_state->common_header.structure_size = szbuf; + usr_reg_state->common_header.format_revision = 1; + usr_reg_state->common_header.content_revision = 0; + usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR; + usr_reg_state->common_header.num_instances = max_usr_instances; + + return usr_reg_state->common_header.structure_size; +} + +ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev, + enum amdgpu_reg_state reg_state, void *buf, + size_t max_size) +{ + ssize_t size; + + switch (reg_state) { + case AMDGPU_REG_STATE_TYPE_PCIE: + size = aqua_vanjaram_read_pcie_state(adev, buf, max_size); + break; + case AMDGPU_REG_STATE_TYPE_XGMI: + size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size); + break; + case AMDGPU_REG_STATE_TYPE_WAFL: + size = aqua_vanjaram_read_wafl_state(adev, buf, max_size); + break; + case AMDGPU_REG_STATE_TYPE_USR: + size = aqua_vanjaram_read_usr_state(adev, buf, max_size, + AMDGPU_REG_STATE_TYPE_USR); + break; + case AMDGPU_REG_STATE_TYPE_USR_1: + size = aqua_vanjaram_read_usr_state( + adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1); + break; + default: + return -EINVAL; + } + + return size; +} diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c index f0737fb3a9..d1bba9c64e 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c @@ -30,6 +30,8 @@ #define regATHUB_MISC_CNTL_V3_0_1 0x00d7 #define regATHUB_MISC_CNTL_V3_0_1_BASE_IDX 0 +#define regATHUB_MISC_CNTL_V3_3_0 0x00d8 +#define regATHUB_MISC_CNTL_V3_3_0_BASE_IDX 0 static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev) @@ -40,6 +42,9 @@ static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev) case IP_VERSION(3, 0, 1): data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1); break; + case IP_VERSION(3, 3, 0): + data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_3_0); + break; default: data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL); break; @@ -53,6 +58,9 @@ static void athub_v3_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data) case IP_VERSION(3, 0, 1): WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1, data); break; + case IP_VERSION(3, 3, 0): + WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_3_0, data); + break; default: WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data); break; diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index 1b51be9b5f..ac51d19b51 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -395,7 +395,6 @@ static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr) (*ptr)++; return; } - return; } } diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index 3ee219aa28..7672abe6c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -28,6 +28,7 @@ #include +#include #include #include "amdgpu.h" #include "amdgpu_connectors.h" diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index bb666cb752..587ee632a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -21,6 +21,7 @@ * */ +#include #include #include #include diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 7af277f61c..f22ec27365 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -21,6 +21,7 @@ * */ +#include #include #include #include diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 143efc37a1..4dbe9b3259 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -23,6 +23,7 @@ #include +#include #include #include #include diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index adeddfb7ff..05bcce2338 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -21,6 +21,7 @@ * */ +#include #include #include #include diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 806a8cf904..0afe86bcc9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -67,6 +67,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_1_me.bin"); @@ -106,23 +107,6 @@ static const struct soc15_reg_golden golden_settings_gc_11_0_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a) }; -static const struct soc15_reg_golden golden_settings_gc_11_5_0[] = { - SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_DEBUG5, 0xffffffff, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0x0c1807ff, 0x00000242), - SOC15_REG_GOLDEN_VALUE(GC, 0, regGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500), - SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3), - SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3), - SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL, 0xffffffff, 0xf37fff3f), - SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xfffffffb, 0x00f40188), - SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL4, 0xf0ffffff, 0x80009007), - SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf1ffffff, 0x00880007), - SOC15_REG_GOLDEN_VALUE(GC, 0, regPC_CONFIG_CNTL_1, 0xffffffff, 0x00010000), - SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000), - SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL2, 0x007f0000, 0x00000000), - SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xffcfffff, 0x0000200a), - SOC15_REG_GOLDEN_VALUE(GC, 0, regUTCL1_CTRL_2, 0xffffffff, 0x0000048f) -}; - #define DEFAULT_SH_MEM_CONFIG \ ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ @@ -293,6 +277,9 @@ static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev) { + if (amdgpu_sriov_vf(adev)) + return; + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): @@ -300,11 +287,6 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_11_0_1, (const u32)ARRAY_SIZE(golden_settings_gc_11_0_1)); break; - case IP_VERSION(11, 5, 0): - soc15_program_register_sequence(adev, - golden_settings_gc_11_5_0, - (const u32)ARRAY_SIZE(golden_settings_gc_11_5_0)); - break; default: break; } @@ -564,7 +546,11 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) } if (!amdgpu_sriov_vf(adev)) { - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix); + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 0) && + adev->pdev->revision == 0xCE) + snprintf(fw_name, sizeof(fw_name), "amdgpu/gc_11_0_0_rlc_1.bin"); + else + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix); err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name); if (err) goto out; @@ -1644,7 +1630,7 @@ static void gfx_v11_0_setup_rb(struct amdgpu_device *adev) active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa)); } - active_rb_bitmap |= global_active_rb_bitmap; + active_rb_bitmap &= global_active_rb_bitmap; adev->gfx.config.backend_enable_mask = active_rb_bitmap; adev->gfx.config.num_rbs = hweight32(active_rb_bitmap); } @@ -4466,11 +4452,43 @@ static int gfx_v11_0_wait_for_idle(void *handle) return -ETIMEDOUT; } +static int gfx_v11_0_request_gfx_index_mutex(struct amdgpu_device *adev, + int req) +{ + u32 i, tmp, val; + + for (i = 0; i < adev->usec_timeout; i++) { + /* Request with MeId=2, PipeId=0 */ + tmp = REG_SET_FIELD(0, CP_GFX_INDEX_MUTEX, REQUEST, req); + tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, CLIENTID, 4); + WREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX, tmp); + + val = RREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX); + if (req) { + if (val == tmp) + break; + } else { + tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, + REQUEST, 1); + + /* unlocked or locked by firmware */ + if (val != tmp) + break; + } + udelay(1); + } + + if (i >= adev->usec_timeout) + return -EINVAL; + + return 0; +} + static int gfx_v11_0_soft_reset(void *handle) { u32 grbm_soft_reset = 0; u32 tmp; - int i, j, k; + int r, i, j, k; struct amdgpu_device *adev = (struct amdgpu_device *)handle; tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL); @@ -4510,6 +4528,13 @@ static int gfx_v11_0_soft_reset(void *handle) } } + /* Try to acquire the gfx mutex before access to CP_VMID_RESET */ + r = gfx_v11_0_request_gfx_index_mutex(adev, 1); + if (r) { + DRM_ERROR("Failed to acquire the gfx mutex during soft reset\n"); + return r; + } + WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe); // Read CP_VMID_RESET register three times. @@ -4518,6 +4543,13 @@ static int gfx_v11_0_soft_reset(void *handle) RREG32_SOC15(GC, 0, regCP_VMID_RESET); RREG32_SOC15(GC, 0, regCP_VMID_RESET); + /* release the gfx mutex */ + r = gfx_v11_0_request_gfx_index_mutex(adev, 0); + if (r) { + DRM_ERROR("Failed to release the gfx mutex during soft reset\n"); + return r; + } + for (i = 0; i < adev->usec_timeout; i++) { if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) && !RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE)) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 4a09cc0d8c..131cddbdda 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -3828,8 +3828,8 @@ static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev, /* the caller should make sure initialize value of * err_data->ue_count and err_data->ce_count */ - amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count); - amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count); + amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count); + amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count); } static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev, @@ -3882,150 +3882,6 @@ static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev, mutex_unlock(&adev->grbm_idx_mutex); } -static void gfx_v9_4_3_inst_query_utc_err_status(struct amdgpu_device *adev, - int xcc_id) -{ - uint32_t data; - - data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS); - if (data) { - dev_warn(adev->dev, "GFX UTCL2 Mem Ecc Status: 0x%x!\n", data); - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS, 0x3); - } - - data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS); - if (data) { - dev_warn(adev->dev, "GFX VML2 Mem Ecc Status: 0x%x!\n", data); - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS, 0x3); - } - - data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), - regVML2_WALKER_MEM_ECC_STATUS); - if (data) { - dev_warn(adev->dev, "GFX VML2 Walker Mem Ecc Status: 0x%x!\n", data); - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_WALKER_MEM_ECC_STATUS, - 0x3); - } -} - -static void gfx_v9_4_3_log_cu_timeout_status(struct amdgpu_device *adev, - uint32_t status, int xcc_id) -{ - struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; - uint32_t i, simd, wave; - uint32_t wave_status; - uint32_t wave_pc_lo, wave_pc_hi; - uint32_t wave_exec_lo, wave_exec_hi; - uint32_t wave_inst_dw0, wave_inst_dw1; - uint32_t wave_ib_sts; - - for (i = 0; i < 32; i++) { - if (!((i << 1) & status)) - continue; - - simd = i / cu_info->max_waves_per_simd; - wave = i % cu_info->max_waves_per_simd; - - wave_status = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS); - wave_pc_lo = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO); - wave_pc_hi = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI); - wave_exec_lo = - wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO); - wave_exec_hi = - wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI); - wave_inst_dw0 = - wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0); - wave_inst_dw1 = - wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1); - wave_ib_sts = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS); - - dev_info( - adev->dev, - "\t SIMD %d, Wave %d: status 0x%x, pc 0x%llx, exec 0x%llx, inst 0x%llx, ib_sts 0x%x\n", - simd, wave, wave_status, - ((uint64_t)wave_pc_hi << 32 | wave_pc_lo), - ((uint64_t)wave_exec_hi << 32 | wave_exec_lo), - ((uint64_t)wave_inst_dw1 << 32 | wave_inst_dw0), - wave_ib_sts); - } -} - -static void gfx_v9_4_3_inst_query_sq_timeout_status(struct amdgpu_device *adev, - int xcc_id) -{ - uint32_t se_idx, sh_idx, cu_idx; - uint32_t status; - - mutex_lock(&adev->grbm_idx_mutex); - for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines; se_idx++) { - for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se; sh_idx++) { - for (cu_idx = 0; cu_idx < adev->gfx.config.max_cu_per_sh; cu_idx++) { - gfx_v9_4_3_xcc_select_se_sh(adev, se_idx, sh_idx, - cu_idx, xcc_id); - status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), - regSQ_TIMEOUT_STATUS); - if (status != 0) { - dev_info( - adev->dev, - "GFX Watchdog Timeout: SE %d, SH %d, CU %d\n", - se_idx, sh_idx, cu_idx); - gfx_v9_4_3_log_cu_timeout_status( - adev, status, xcc_id); - } - /* clear old status */ - WREG32_SOC15(GC, GET_INST(GC, xcc_id), - regSQ_TIMEOUT_STATUS, 0); - } - } - } - gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, - xcc_id); - mutex_unlock(&adev->grbm_idx_mutex); -} - -static void gfx_v9_4_3_inst_query_ras_err_status(struct amdgpu_device *adev, - void *ras_error_status, int xcc_id) -{ - gfx_v9_4_3_inst_query_utc_err_status(adev, xcc_id); - gfx_v9_4_3_inst_query_sq_timeout_status(adev, xcc_id); -} - -static void gfx_v9_4_3_inst_reset_utc_err_status(struct amdgpu_device *adev, - int xcc_id) -{ - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regUTCL2_MEM_ECC_STATUS, 0x3); - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_MEM_ECC_STATUS, 0x3); - WREG32_SOC15(GC, GET_INST(GC, xcc_id), regVML2_WALKER_MEM_ECC_STATUS, 0x3); -} - -static void gfx_v9_4_3_inst_reset_sq_timeout_status(struct amdgpu_device *adev, - int xcc_id) -{ - uint32_t se_idx, sh_idx, cu_idx; - - mutex_lock(&adev->grbm_idx_mutex); - for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines; se_idx++) { - for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se; sh_idx++) { - for (cu_idx = 0; cu_idx < adev->gfx.config.max_cu_per_sh; cu_idx++) { - gfx_v9_4_3_xcc_select_se_sh(adev, se_idx, sh_idx, - cu_idx, xcc_id); - WREG32_SOC15(GC, GET_INST(GC, xcc_id), - regSQ_TIMEOUT_STATUS, 0); - } - } - } - gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, - xcc_id); - mutex_unlock(&adev->grbm_idx_mutex); -} - -static void gfx_v9_4_3_inst_reset_ras_err_status(struct amdgpu_device *adev, - void *ras_error_status, int xcc_id) -{ - gfx_v9_4_3_inst_reset_utc_err_status(adev, xcc_id); - gfx_v9_4_3_inst_reset_sq_timeout_status(adev, xcc_id); -} - static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev, void *ras_error_status, int xcc_id) { @@ -4067,16 +3923,6 @@ static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev) amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_count); } -static void gfx_v9_4_3_query_ras_error_status(struct amdgpu_device *adev) -{ - amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_query_ras_err_status); -} - -static void gfx_v9_4_3_reset_ras_error_status(struct amdgpu_device *adev) -{ - amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_status); -} - static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev) { amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer); @@ -4394,8 +4240,6 @@ struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = { struct amdgpu_ras_block_hw_ops gfx_v9_4_3_ras_ops = { .query_ras_error_count = &gfx_v9_4_3_query_ras_error_count, .reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count, - .query_ras_error_status = &gfx_v9_4_3_query_ras_error_status, - .reset_ras_error_status = &gfx_v9_4_3_reset_ras_error_status, }; struct amdgpu_gfx_ras gfx_v9_4_3_ras = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index 95d06da544..49aecdcee0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -456,10 +456,12 @@ static void gfxhub_v1_2_xcc_gart_disable(struct amdgpu_device *adev, WREG32_SOC15_RLC(GC, GET_INST(GC, j), regMC_VM_MX_L1_TLB_CNTL, tmp); /* Setup L2 cache */ - tmp = RREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL); - tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); - WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL, tmp); - WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL3, 0); + if (!amdgpu_sriov_vf(adev)) { + tmp = RREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL); + tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); + WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL, tmp); + WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL3, 0); + } } } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index ced2e1bdcc..e67a62db9e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -883,7 +883,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, * GRBM interface. */ if ((vmhub == AMDGPU_GFXHUB(0)) && - (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) + (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2))) RREG32_NO_KIQ(req); for (j = 0; j < adev->usec_timeout; j++) { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index bc38b90f8c..88ea58d5c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -674,14 +674,6 @@ static int jpeg_v4_0_set_powergating_state(void *handle, return ret; } -static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - unsigned type, - enum amdgpu_interrupt_state state) -{ - return 0; -} - static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, unsigned int type, @@ -765,7 +757,6 @@ static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev) } static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = { - .set = jpeg_v4_0_set_interrupt_state, .process = jpeg_v4_0_process_interrupt, }; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c index 6ede85b28c..78b74daf4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c @@ -181,7 +181,6 @@ static int jpeg_v4_0_5_hw_fini(void *handle) RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE); } - amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0); return 0; } @@ -516,14 +515,6 @@ static int jpeg_v4_0_5_set_powergating_state(void *handle, return ret; } -static int jpeg_v4_0_5_set_interrupt_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - unsigned type, - enum amdgpu_interrupt_state state) -{ - return 0; -} - static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -603,7 +594,6 @@ static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev) } static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = { - .set = jpeg_v4_0_5_set_interrupt_state, .process = jpeg_v4_0_5_process_interrupt, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 4dfec56e1b..26d71a2239 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -408,6 +408,8 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes) mes_set_hw_res_pkt.enable_reg_active_poll = 1; mes_set_hw_res_pkt.enable_level_process_quantum_check = 1; mes_set_hw_res_pkt.oversubscription_timer = 50; + mes_set_hw_res_pkt.enable_mes_event_int_logging = 1; + mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr; return mes_v11_0_submit_pkt_and_poll_completion(mes, &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt), diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 9b0146732e..fb53aacdcb 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -652,8 +652,8 @@ static void mmhub_v1_8_inst_query_ras_error_count(struct amdgpu_device *adev, AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, &ue_count); - amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count); - amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count); + amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count); + amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count); } static void mmhub_v1_8_query_ras_error_count(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 6d24c84924..19986ff6a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -401,8 +401,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device if (err_data.ce_count) dev_info(adev->dev, "%ld correctable hardware " - "errors detected in %s block, " - "no user action is needed.\n", + "errors detected in %s block\n", obj->err_data.ce_count, get_ras_block_str(adev->nbio.ras_if)); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index db013f8614..b4723d68ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -603,8 +603,7 @@ static void nbio_v7_9_handle_ras_controller_intr_no_bifring(struct amdgpu_device if (err_data.ce_count) dev_info(adev->dev, "%ld correctable hardware " - "errors detected in %s block, " - "no user action is needed.\n", + "errors detected in %s block\n", obj->err_data.ce_count, get_ras_block_str(adev->nbio.ras_if)); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 8eed9a73d0..1caed0163b 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -366,7 +366,8 @@ static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring) u32 ref_and_mask = 0; const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; - ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me; + ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 + << (ring->me % adev->sdma.num_inst_per_aid); sdma_v4_4_2_wait_reg_mem(ring, 0, 1, adev->nbio.funcs->get_hdp_flush_done_offset(adev), @@ -2135,7 +2136,7 @@ static void sdma_v4_4_2_inst_query_ras_error_count(struct amdgpu_device *adev, AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, &ue_count); - amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count); + amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count); } static void sdma_v4_4_2_query_ras_error_count(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 0058f3f7cf..46f9f58d84 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -292,17 +292,21 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring) u32 ref_and_mask = 0; const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; - ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me; - - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | - SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | - SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ - amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2); - amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2); - amdgpu_ring_write(ring, ref_and_mask); /* reference */ - amdgpu_ring_write(ring, ref_and_mask); /* mask */ - amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | - SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ + if (ring->me > 1) { + amdgpu_asic_flush_hdp(adev, ring); + } else { + ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me; + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | + SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | + SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ + amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2); + amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2); + amdgpu_ring_write(ring, ref_and_mask); /* reference */ + amdgpu_ring_write(ring, ref_and_mask); /* mask */ + amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | + SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ + } } /** diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index f9ba180304..1c614451de 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -925,6 +925,7 @@ static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs = .pre_asic_init = &soc15_pre_asic_init, .query_video_codecs = &soc15_query_video_codecs, .encode_ext_smn_addressing = &aqua_vanjaram_encode_ext_smn_addressing, + .get_reg_state = &aqua_vanjaram_get_reg_state, }; static int soc15_common_early_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index eac54042c6..1444b7765e 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -27,6 +27,7 @@ #include "nbio_v6_1.h" #include "nbio_v7_0.h" #include "nbio_v7_4.h" +#include "amdgpu_reg_state.h" extern const struct amdgpu_ip_block_version vega10_common_ip_block; @@ -114,6 +115,9 @@ int aldebaran_reg_base_init(struct amdgpu_device *adev); void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev); u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id); int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev); +ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev, + enum amdgpu_reg_state reg_state, void *buf, + size_t max_size); void vega10_doorbell_index_init(struct amdgpu_device *adev); void vega20_doorbell_index_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 4d7188912e..fd4505fa4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -450,10 +450,8 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev) { switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 0): - return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC); case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): - return false; default: return true; } @@ -714,7 +712,10 @@ static int soc21_common_early_init(void *handle) AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_JPEG | AMD_PG_SUPPORT_GFX_PG; - adev->external_rev_id = adev->rev_id + 0x1; + if (adev->rev_id == 0) + adev->external_rev_id = 0x1; + else + adev->external_rev_id = adev->rev_id + 0x10; break; default: /* FIXME: not supported yet */ @@ -832,10 +833,35 @@ static int soc21_common_suspend(void *handle) return soc21_common_hw_fini(adev); } +static bool soc21_need_reset_on_resume(struct amdgpu_device *adev) +{ + u32 sol_reg1, sol_reg2; + + /* Will reset for the following suspend abort cases. + * 1) Only reset dGPU side. + * 2) S3 suspend got aborted and TOS is active. + */ + if (!(adev->flags & AMD_IS_APU) && adev->in_s3 && + !adev->suspend_complete) { + sol_reg1 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81); + msleep(100); + sol_reg2 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81); + + return (sol_reg1 != sol_reg2); + } + + return false; +} + static int soc21_common_resume(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (soc21_need_reset_on_resume(adev)) { + dev_info(adev->dev, "S3 suspend aborted, resetting..."); + soc21_asic_reset(adev); + } + return soc21_common_hw_init(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index e9c2ff74f0..7458a218e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -26,6 +26,7 @@ #include "amdgpu.h" #include "umc/umc_12_0_0_offset.h" #include "umc/umc_12_0_0_sh_mask.h" +#include "mp/mp_13_0_6_sh_mask.h" const uint32_t umc_v12_0_channel_idx_tbl[] @@ -88,16 +89,26 @@ static void umc_v12_0_reset_error_count(struct amdgpu_device *adev) umc_v12_0_reset_error_count_per_channel, NULL); } -bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status) +bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status) { + if (amdgpu_ras_is_poison_mode_supported(adev) && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)) + return true; + return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)); } -bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status) +bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status) { + if (amdgpu_ras_is_poison_mode_supported(adev) && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)) + return false; + return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 || (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 && @@ -105,7 +116,7 @@ bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status) /* Identify data parity error in replay mode */ ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0x5 || REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0xb) && - !(umc_v12_0_is_uncorrectable_error(mc_umc_status))))); + !(umc_v12_0_is_uncorrectable_error(adev, mc_umc_status))))); } static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev, @@ -124,7 +135,7 @@ static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev, mc_umc_status = RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4); - if (umc_v12_0_is_correctable_error(mc_umc_status)) + if (umc_v12_0_is_correctable_error(adev, mc_umc_status)) *error_count += 1; } @@ -142,7 +153,7 @@ static void umc_v12_0_query_uncorrectable_error_count(struct amdgpu_device *adev mc_umc_status = RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4); - if (umc_v12_0_is_uncorrectable_error(mc_umc_status)) + if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)) *error_count += 1; } @@ -166,8 +177,8 @@ static int umc_v12_0_query_error_count(struct amdgpu_device *adev, umc_v12_0_query_correctable_error_count(adev, umc_reg_offset, &ce_count); umc_v12_0_query_uncorrectable_error_count(adev, umc_reg_offset, &ue_count); - amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count); - amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count); + amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count); + amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count); return 0; } @@ -360,6 +371,59 @@ static int umc_v12_0_err_cnt_init_per_channel(struct amdgpu_device *adev, return 0; } +static void umc_v12_0_ecc_info_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + amdgpu_mca_smu_log_ras_error(adev, + AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_CE, ras_error_status); + amdgpu_mca_smu_log_ras_error(adev, + AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_UE, ras_error_status); +} + +static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_node *err_node; + uint64_t mc_umc_status; + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + for_each_ras_error(err_node, err_data) { + mc_umc_status = err_node->err_info.err_addr.err_status; + if (!mc_umc_status) + continue; + + if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)) { + uint64_t mca_addr, err_addr, mca_ipid; + uint32_t InstanceIdLo; + struct amdgpu_smuio_mcm_config_info *mcm_info; + + mcm_info = &err_node->err_info.mcm_info; + mca_addr = err_node->err_info.err_addr.err_addr; + mca_ipid = err_node->err_info.err_addr.err_ipid; + + err_addr = REG_GET_FIELD(mca_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); + InstanceIdLo = REG_GET_FIELD(mca_ipid, MCMP1_IPIDT0, InstanceIdLo); + + dev_info(adev->dev, "UMC:IPID:0x%llx, aid:%d, inst:%d, ch:%d, err_addr:0x%llx\n", + mca_ipid, + mcm_info->die_id, + MCA_IPID_LO_2_UMC_INST(InstanceIdLo), + MCA_IPID_LO_2_UMC_CH(InstanceIdLo), + err_addr); + + umc_v12_0_convert_error_address(adev, + err_data, err_addr, + MCA_IPID_LO_2_UMC_CH(InstanceIdLo), + MCA_IPID_LO_2_UMC_INST(InstanceIdLo), + mcm_info->die_id); + + /* Clear umc error address content */ + memset(&err_node->err_info.err_addr, + 0, sizeof(err_node->err_info.err_addr)); + } + } +} + static void umc_v12_0_err_cnt_init(struct amdgpu_device *adev) { amdgpu_umc_loop_channels(adev, @@ -386,4 +450,6 @@ struct amdgpu_umc_ras umc_v12_0_ras = { }, .err_cnt_init = umc_v12_0_err_cnt_init, .query_ras_poison_mode = umc_v12_0_query_ras_poison_mode, + .ecc_info_query_ras_error_count = umc_v12_0_ecc_info_query_ras_error_count, + .ecc_info_query_ras_error_address = umc_v12_0_ecc_info_query_ras_error_address, }; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h index b34b1e358f..e8de3a9225 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h @@ -117,8 +117,12 @@ (pa) |= (UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) << UMC_V12_0_PA_CH6_BIT); \ } while (0) -bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status); -bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status); +#define MCA_IPID_LO_2_UMC_CH(_ipid_lo) (((((_ipid_lo) >> 20) & 0x1) * 4) + \ + (((_ipid_lo) >> 12) & 0xF)) +#define MCA_IPID_LO_2_UMC_INST(_ipid_lo) (((_ipid_lo) >> 21) & 0x7) + +bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status); +bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status); extern const uint32_t umc_v12_0_channel_idx_tbl[] diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c index 530549314c..a3ee3c4c65 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c @@ -64,7 +64,7 @@ static void umc_v6_7_query_error_status_helper(struct amdgpu_device *adev, uint64_t reg_value; if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1) - dev_info(adev->dev, "Deferred error, no user action is needed.\n"); + dev_info(adev->dev, "Deferred error\n"); if (mc_umc_status) dev_info(adev->dev, "MCA STATUS 0x%llx, umc_reg_offset 0x%x\n", mc_umc_status, umc_reg_offset); diff --git a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c index 8e7b763cfd..fd23cd3485 100644 --- a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c @@ -225,6 +225,8 @@ static int umsch_mm_v4_0_ring_start(struct amdgpu_umsch_mm *umsch) WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_SIZE, ring->ring_size); + ring->wptr = 0; + data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE); data &= ~(VCN_RB_ENABLE__AUDIO_RB_EN_MASK); WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index 48bfcd0d55..8ab01ae919 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -100,6 +100,31 @@ static int vcn_v4_0_early_init(void *handle) return amdgpu_vcn_early_init(adev); } +static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx) +{ + volatile struct amdgpu_vcn4_fw_shared *fw_shared; + + fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); + fw_shared->sq.is_enabled = 1; + + fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG); + fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ? + AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU; + + if (amdgpu_ip_version(adev, VCN_HWIP, 0) == + IP_VERSION(4, 0, 2)) { + fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT; + fw_shared->drm_key_wa.method = + AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING; + } + + if (amdgpu_vcnfw_log) + amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]); + + return 0; +} + /** * vcn_v4_0_sw_init - sw init for VCN block * @@ -124,8 +149,6 @@ static int vcn_v4_0_sw_init(void *handle) return r; for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_vcn4_fw_shared *fw_shared; - if (adev->vcn.harvest_config & (1 << i)) continue; @@ -161,23 +184,7 @@ static int vcn_v4_0_sw_init(void *handle) if (r) return r; - fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; - fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); - fw_shared->sq.is_enabled = 1; - - fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG); - fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ? - AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU; - - if (amdgpu_ip_version(adev, VCN_HWIP, 0) == - IP_VERSION(4, 0, 2)) { - fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT; - fw_shared->drm_key_wa.method = - AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING; - } - - if (amdgpu_vcnfw_log) - amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); + vcn_v4_0_fw_shared_init(adev, i); } if (amdgpu_sriov_vf(adev)) { @@ -1273,6 +1280,9 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev) if (adev->vcn.harvest_config & (1 << i)) continue; + // Must re/init fw_shared at beginning + vcn_v4_0_fw_shared_init(adev, i); + table_size = 0; MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i, @@ -2007,22 +2017,6 @@ static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_sta return ret; } -/** - * vcn_v4_0_set_interrupt_state - set VCN block interrupt state - * - * @adev: amdgpu_device pointer - * @source: interrupt sources - * @type: interrupt types - * @state: interrupt states - * - * Set VCN block interrupt state - */ -static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned type, enum amdgpu_interrupt_state state) -{ - return 0; -} - /** * vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state * @@ -2087,7 +2081,6 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_ } static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = { - .set = vcn_v4_0_set_interrupt_state, .process = vcn_v4_0_process_interrupt, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index 2eda30e78f..49e4c3c09a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -269,8 +269,6 @@ static int vcn_v4_0_5_hw_fini(void *handle) vcn_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE); } } - - amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0); } return 0; @@ -1668,22 +1666,6 @@ static int vcn_v4_0_5_set_powergating_state(void *handle, enum amd_powergating_s return ret; } -/** - * vcn_v4_0_5_set_interrupt_state - set VCN block interrupt state - * - * @adev: amdgpu_device pointer - * @source: interrupt sources - * @type: interrupt types - * @state: interrupt states - * - * Set VCN block interrupt state - */ -static int vcn_v4_0_5_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source, - unsigned type, enum amdgpu_interrupt_state state) -{ - return 0; -} - /** * vcn_v4_0_5_process_interrupt - process VCN block interrupt * @@ -1726,7 +1708,6 @@ static int vcn_v4_0_5_process_interrupt(struct amdgpu_device *adev, struct amdgp } static const struct amdgpu_irq_src_funcs vcn_v4_0_5_irq_funcs = { - .set = vcn_v4_0_5_set_interrupt_state, .process = vcn_v4_0_5_process_interrupt, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c index 174f13eff5..d20060a51e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c @@ -96,6 +96,10 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe) adev->vpe.cmdbuf_cpu_addr[1] = f32_cntl; amdgpu_vpe_psp_update_sram(adev); + + /* Config DPM */ + amdgpu_vpe_configure_dpm(vpe); + return 0; } @@ -128,6 +132,8 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe) } vpe_v6_1_halt(vpe, false); + /* Config DPM */ + amdgpu_vpe_configure_dpm(vpe); return 0; } @@ -264,6 +270,15 @@ static int vpe_v6_1_set_regs(struct amdgpu_vpe *vpe) vpe->regs.queue0_rb_wptr_hi = regVPEC_QUEUE0_RB_WPTR_HI; vpe->regs.queue0_preempt = regVPEC_QUEUE0_PREEMPT; + vpe->regs.dpm_enable = regVPEC_PUB_DUMMY2; + vpe->regs.dpm_pratio = regVPEC_QUEUE6_DUMMY4; + vpe->regs.dpm_request_interval = regVPEC_QUEUE5_DUMMY3; + vpe->regs.dpm_decision_threshold = regVPEC_QUEUE5_DUMMY4; + vpe->regs.dpm_busy_clamp_threshold = regVPEC_QUEUE7_DUMMY2; + vpe->regs.dpm_idle_clamp_threshold = regVPEC_QUEUE7_DUMMY3; + vpe->regs.dpm_request_lv = regVPEC_QUEUE7_DUMMY1; + vpe->regs.context_indicator = regVPEC_QUEUE6_DUMMY3; + return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index d7cd5fa313..d1caaf0e6a 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -674,7 +674,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = { 0x86ea6a6a, 0x8f6e837a, 0xb96ee0c2, 0xbf800002, 0xb97a0002, 0xbf8a0000, - 0xbe801f6c, 0xbf810000, + 0xbe801f6c, 0xbf9b0000, }; static const uint32_t cwsr_trap_nv1x_hex[] = { @@ -1091,7 +1091,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = { 0xb9eef807, 0x876dff6d, 0x0000ffff, 0x87fe7e7e, 0x87ea6a6a, 0xb9faf802, - 0xbe80226c, 0xbf810000, + 0xbe80226c, 0xbf9b0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0x00000000, @@ -1574,7 +1574,7 @@ static const uint32_t cwsr_trap_arcturus_hex[] = { 0x86ea6a6a, 0x8f6e837a, 0xb96ee0c2, 0xbf800002, 0xb97a0002, 0xbf8a0000, - 0xbe801f6c, 0xbf810000, + 0xbe801f6c, 0xbf9b0000, }; static const uint32_t cwsr_trap_aldebaran_hex[] = { @@ -2065,11 +2065,11 @@ static const uint32_t cwsr_trap_aldebaran_hex[] = { 0x86ea6a6a, 0x8f6e837a, 0xb96ee0c2, 0xbf800002, 0xb97a0002, 0xbf8a0000, - 0xbe801f6c, 0xbf810000, + 0xbe801f6c, 0xbf9b0000, }; static const uint32_t cwsr_trap_gfx10_hex[] = { - 0xbf820001, 0xbf820220, + 0xbf820001, 0xbf820221, 0xb0804004, 0xb978f802, 0x8a78ff78, 0x00020006, 0xb97bf803, 0x876eff78, @@ -2118,391 +2118,391 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0xbf900004, 0xbf8cc07f, 0x877aff7f, 0x04000000, 0x8f7a857a, 0x886d7a6d, - 0xbefa037e, 0x877bff7f, - 0x0000ffff, 0xbefe03c1, - 0xbeff03c1, 0xdc5f8000, - 0x007a0000, 0x7e000280, - 0xbefe037a, 0xbeff037b, - 0xb97b02dc, 0x8f7b997b, - 0xb97a3a05, 0x807a817a, - 0xbf0d997b, 0xbf850002, - 0x8f7a897a, 0xbf820001, - 0x8f7a8a7a, 0xb97b1e06, - 0x8f7b8a7b, 0x807a7b7a, + 0x7e008200, 0xbefa037e, 0x877bff7f, 0x0000ffff, - 0x807aff7a, 0x00000200, - 0x807a7e7a, 0x827b807b, - 0xd7610000, 0x00010870, - 0xd7610000, 0x00010a71, - 0xd7610000, 0x00010c72, - 0xd7610000, 0x00010e73, - 0xd7610000, 0x00011074, - 0xd7610000, 0x00011275, - 0xd7610000, 0x00011476, - 0xd7610000, 0x00011677, - 0xd7610000, 0x00011a79, - 0xd7610000, 0x00011c7e, - 0xd7610000, 0x00011e7f, - 0xbefe03ff, 0x00003fff, - 0xbeff0380, 0xdc5f8040, - 0x007a0000, 0xd760007a, - 0x00011d00, 0xd760007b, - 0x00011f00, 0xbefe037a, - 0xbeff037b, 0xbef4037e, - 0x8775ff7f, 0x0000ffff, - 0x8875ff75, 0x00040000, - 0xbef60380, 0xbef703ff, - 0x10807fac, 0xbef1037c, - 0xbef00380, 0xb97302dc, - 0x8f739973, 0xbefe03c1, - 0x907c9973, 0x877c817c, - 0xbf06817c, 0xbf850002, - 0xbeff0380, 0xbf820002, - 0xbeff03c1, 0xbf820009, + 0xbefe03c1, 0xbeff03c1, + 0xdc5f8000, 0x007a0000, + 0x7e000280, 0xbefe037a, + 0xbeff037b, 0xb97b02dc, + 0x8f7b997b, 0xb97a3a05, + 0x807a817a, 0xbf0d997b, + 0xbf850002, 0x8f7a897a, + 0xbf820001, 0x8f7a8a7a, + 0xb97b1e06, 0x8f7b8a7b, + 0x807a7b7a, 0x877bff7f, + 0x0000ffff, 0x807aff7a, + 0x00000200, 0x807a7e7a, + 0x827b807b, 0xd7610000, + 0x00010870, 0xd7610000, + 0x00010a71, 0xd7610000, + 0x00010c72, 0xd7610000, + 0x00010e73, 0xd7610000, + 0x00011074, 0xd7610000, + 0x00011275, 0xd7610000, + 0x00011476, 0xd7610000, + 0x00011677, 0xd7610000, + 0x00011a79, 0xd7610000, + 0x00011c7e, 0xd7610000, + 0x00011e7f, 0xbefe03ff, + 0x00003fff, 0xbeff0380, + 0xdc5f8040, 0x007a0000, + 0xd760007a, 0x00011d00, + 0xd760007b, 0x00011f00, + 0xbefe037a, 0xbeff037b, + 0xbef4037e, 0x8775ff7f, + 0x0000ffff, 0x8875ff75, + 0x00040000, 0xbef60380, + 0xbef703ff, 0x10807fac, + 0xbef1037c, 0xbef00380, + 0xb97302dc, 0x8f739973, + 0xbefe03c1, 0x907c9973, + 0x877c817c, 0xbf06817c, + 0xbf850002, 0xbeff0380, + 0xbf820002, 0xbeff03c1, + 0xbf820009, 0xbef603ff, + 0x01000000, 0xe0704080, + 0x705d0100, 0xe0704100, + 0x705d0200, 0xe0704180, + 0x705d0300, 0xbf820008, 0xbef603ff, 0x01000000, - 0xe0704080, 0x705d0100, - 0xe0704100, 0x705d0200, - 0xe0704180, 0x705d0300, - 0xbf820008, 0xbef603ff, - 0x01000000, 0xe0704100, - 0x705d0100, 0xe0704200, - 0x705d0200, 0xe0704300, - 0x705d0300, 0xb9703a05, - 0x80708170, 0xbf0d9973, - 0xbf850002, 0x8f708970, - 0xbf820001, 0x8f708a70, - 0xb97a1e06, 0x8f7a8a7a, - 0x80707a70, 0x8070ff70, - 0x00000200, 0xbef603ff, - 0x01000000, 0x7e000280, - 0x7e020280, 0x7e040280, - 0xbefc0380, 0xd7610002, - 0x0000f871, 0x807c817c, - 0xd7610002, 0x0000f86c, - 0x807c817c, 0x8a7aff6d, - 0x80000000, 0xd7610002, - 0x0000f87a, 0x807c817c, - 0xd7610002, 0x0000f86e, - 0x807c817c, 0xd7610002, - 0x0000f86f, 0x807c817c, - 0xd7610002, 0x0000f878, - 0x807c817c, 0xb97af803, - 0xd7610002, 0x0000f87a, - 0x807c817c, 0xd7610002, - 0x0000f87b, 0x807c817c, - 0xb971f801, 0xd7610002, - 0x0000f871, 0x807c817c, - 0xb971f814, 0xd7610002, - 0x0000f871, 0x807c817c, - 0xb971f815, 0xd7610002, - 0x0000f871, 0x807c817c, - 0xbefe03ff, 0x0000ffff, - 0xbeff0380, 0xe0704000, - 0x705d0200, 0xbefe03c1, + 0xe0704100, 0x705d0100, + 0xe0704200, 0x705d0200, + 0xe0704300, 0x705d0300, 0xb9703a05, 0x80708170, 0xbf0d9973, 0xbf850002, 0x8f708970, 0xbf820001, 0x8f708a70, 0xb97a1e06, 0x8f7a8a7a, 0x80707a70, + 0x8070ff70, 0x00000200, 0xbef603ff, 0x01000000, - 0xbef90380, 0xbefc0380, - 0xbf800000, 0xbe802f00, - 0xbe822f02, 0xbe842f04, - 0xbe862f06, 0xbe882f08, - 0xbe8a2f0a, 0xbe8c2f0c, - 0xbe8e2f0e, 0xd7610002, - 0x0000f200, 0x80798179, - 0xd7610002, 0x0000f201, + 0x7e000280, 0x7e020280, + 0x7e040280, 0xbefc0380, + 0xd7610002, 0x0000f871, + 0x807c817c, 0xd7610002, + 0x0000f86c, 0x807c817c, + 0x8a7aff6d, 0x80000000, + 0xd7610002, 0x0000f87a, + 0x807c817c, 0xd7610002, + 0x0000f86e, 0x807c817c, + 0xd7610002, 0x0000f86f, + 0x807c817c, 0xd7610002, + 0x0000f878, 0x807c817c, + 0xb97af803, 0xd7610002, + 0x0000f87a, 0x807c817c, + 0xd7610002, 0x0000f87b, + 0x807c817c, 0xb971f801, + 0xd7610002, 0x0000f871, + 0x807c817c, 0xb971f814, + 0xd7610002, 0x0000f871, + 0x807c817c, 0xb971f815, + 0xd7610002, 0x0000f871, + 0x807c817c, 0xbefe03ff, + 0x0000ffff, 0xbeff0380, + 0xe0704000, 0x705d0200, + 0xbefe03c1, 0xb9703a05, + 0x80708170, 0xbf0d9973, + 0xbf850002, 0x8f708970, + 0xbf820001, 0x8f708a70, + 0xb97a1e06, 0x8f7a8a7a, + 0x80707a70, 0xbef603ff, + 0x01000000, 0xbef90380, + 0xbefc0380, 0xbf800000, + 0xbe802f00, 0xbe822f02, + 0xbe842f04, 0xbe862f06, + 0xbe882f08, 0xbe8a2f0a, + 0xbe8c2f0c, 0xbe8e2f0e, + 0xd7610002, 0x0000f200, 0x80798179, 0xd7610002, - 0x0000f202, 0x80798179, - 0xd7610002, 0x0000f203, + 0x0000f201, 0x80798179, + 0xd7610002, 0x0000f202, 0x80798179, 0xd7610002, - 0x0000f204, 0x80798179, - 0xd7610002, 0x0000f205, + 0x0000f203, 0x80798179, + 0xd7610002, 0x0000f204, 0x80798179, 0xd7610002, - 0x0000f206, 0x80798179, - 0xd7610002, 0x0000f207, + 0x0000f205, 0x80798179, + 0xd7610002, 0x0000f206, 0x80798179, 0xd7610002, - 0x0000f208, 0x80798179, - 0xd7610002, 0x0000f209, + 0x0000f207, 0x80798179, + 0xd7610002, 0x0000f208, 0x80798179, 0xd7610002, - 0x0000f20a, 0x80798179, - 0xd7610002, 0x0000f20b, + 0x0000f209, 0x80798179, + 0xd7610002, 0x0000f20a, 0x80798179, 0xd7610002, - 0x0000f20c, 0x80798179, - 0xd7610002, 0x0000f20d, + 0x0000f20b, 0x80798179, + 0xd7610002, 0x0000f20c, 0x80798179, 0xd7610002, - 0x0000f20e, 0x80798179, - 0xd7610002, 0x0000f20f, - 0x80798179, 0xbf06a079, - 0xbf840006, 0xe0704000, - 0x705d0200, 0x8070ff70, - 0x00000080, 0xbef90380, - 0x7e040280, 0x807c907c, - 0xbf0aff7c, 0x00000060, - 0xbf85ffbc, 0xbe802f00, - 0xbe822f02, 0xbe842f04, - 0xbe862f06, 0xbe882f08, - 0xbe8a2f0a, 0xd7610002, - 0x0000f200, 0x80798179, - 0xd7610002, 0x0000f201, + 0x0000f20d, 0x80798179, + 0xd7610002, 0x0000f20e, 0x80798179, 0xd7610002, - 0x0000f202, 0x80798179, - 0xd7610002, 0x0000f203, + 0x0000f20f, 0x80798179, + 0xbf06a079, 0xbf840006, + 0xe0704000, 0x705d0200, + 0x8070ff70, 0x00000080, + 0xbef90380, 0x7e040280, + 0x807c907c, 0xbf0aff7c, + 0x00000060, 0xbf85ffbc, + 0xbe802f00, 0xbe822f02, + 0xbe842f04, 0xbe862f06, + 0xbe882f08, 0xbe8a2f0a, + 0xd7610002, 0x0000f200, 0x80798179, 0xd7610002, - 0x0000f204, 0x80798179, - 0xd7610002, 0x0000f205, + 0x0000f201, 0x80798179, + 0xd7610002, 0x0000f202, 0x80798179, 0xd7610002, - 0x0000f206, 0x80798179, - 0xd7610002, 0x0000f207, + 0x0000f203, 0x80798179, + 0xd7610002, 0x0000f204, 0x80798179, 0xd7610002, - 0x0000f208, 0x80798179, - 0xd7610002, 0x0000f209, + 0x0000f205, 0x80798179, + 0xd7610002, 0x0000f206, 0x80798179, 0xd7610002, - 0x0000f20a, 0x80798179, - 0xd7610002, 0x0000f20b, - 0x80798179, 0xe0704000, - 0x705d0200, 0xbefe03c1, - 0x907c9973, 0x877c817c, - 0xbf06817c, 0xbf850002, - 0xbeff0380, 0xbf820001, - 0xbeff03c1, 0xb97b4306, - 0x877bc17b, 0xbf840044, - 0xbf8a0000, 0x877aff6d, - 0x80000000, 0xbf840040, - 0x8f7b867b, 0x8f7b827b, - 0xbef6037b, 0xb9703a05, - 0x80708170, 0xbf0d9973, - 0xbf850002, 0x8f708970, - 0xbf820001, 0x8f708a70, - 0xb97a1e06, 0x8f7a8a7a, - 0x80707a70, 0x8070ff70, - 0x00000200, 0x8070ff70, - 0x00000080, 0xbef603ff, - 0x01000000, 0xd7650000, - 0x000100c1, 0xd7660000, - 0x000200c1, 0x16000084, - 0x907c9973, 0x877c817c, - 0xbf06817c, 0xbefc0380, - 0xbf850012, 0xbe8303ff, - 0x00000080, 0xbf800000, - 0xbf800000, 0xbf800000, - 0xd8d80000, 0x01000000, - 0xbf8c0000, 0xe0704000, - 0x705d0100, 0x807c037c, - 0x80700370, 0xd5250000, - 0x0001ff00, 0x00000080, - 0xbf0a7b7c, 0xbf85fff4, - 0xbf820011, 0xbe8303ff, - 0x00000100, 0xbf800000, - 0xbf800000, 0xbf800000, - 0xd8d80000, 0x01000000, - 0xbf8c0000, 0xe0704000, - 0x705d0100, 0x807c037c, - 0x80700370, 0xd5250000, - 0x0001ff00, 0x00000100, - 0xbf0a7b7c, 0xbf85fff4, + 0x0000f207, 0x80798179, + 0xd7610002, 0x0000f208, + 0x80798179, 0xd7610002, + 0x0000f209, 0x80798179, + 0xd7610002, 0x0000f20a, + 0x80798179, 0xd7610002, + 0x0000f20b, 0x80798179, + 0xe0704000, 0x705d0200, 0xbefe03c1, 0x907c9973, 0x877c817c, 0xbf06817c, - 0xbf850004, 0xbef003ff, - 0x00000200, 0xbeff0380, - 0xbf820003, 0xbef003ff, - 0x00000400, 0xbeff03c1, - 0xb97b3a05, 0x807b817b, - 0x8f7b827b, 0x907c9973, + 0xbf850002, 0xbeff0380, + 0xbf820001, 0xbeff03c1, + 0xb97b4306, 0x877bc17b, + 0xbf840044, 0xbf8a0000, + 0x877aff6d, 0x80000000, + 0xbf840040, 0x8f7b867b, + 0x8f7b827b, 0xbef6037b, + 0xb9703a05, 0x80708170, + 0xbf0d9973, 0xbf850002, + 0x8f708970, 0xbf820001, + 0x8f708a70, 0xb97a1e06, + 0x8f7a8a7a, 0x80707a70, + 0x8070ff70, 0x00000200, + 0x8070ff70, 0x00000080, + 0xbef603ff, 0x01000000, + 0xd7650000, 0x000100c1, + 0xd7660000, 0x000200c1, + 0x16000084, 0x907c9973, 0x877c817c, 0xbf06817c, - 0xbf850017, 0xbef603ff, - 0x01000000, 0xbefc0384, - 0xbf0a7b7c, 0xbf840037, - 0x7e008700, 0x7e028701, - 0x7e048702, 0x7e068703, - 0xe0704000, 0x705d0000, - 0xe0704080, 0x705d0100, - 0xe0704100, 0x705d0200, - 0xe0704180, 0x705d0300, - 0x807c847c, 0x8070ff70, - 0x00000200, 0xbf0a7b7c, - 0xbf85ffef, 0xbf820025, + 0xbefc0380, 0xbf850012, + 0xbe8303ff, 0x00000080, + 0xbf800000, 0xbf800000, + 0xbf800000, 0xd8d80000, + 0x01000000, 0xbf8c0000, + 0xe0704000, 0x705d0100, + 0x807c037c, 0x80700370, + 0xd5250000, 0x0001ff00, + 0x00000080, 0xbf0a7b7c, + 0xbf85fff4, 0xbf820011, + 0xbe8303ff, 0x00000100, + 0xbf800000, 0xbf800000, + 0xbf800000, 0xd8d80000, + 0x01000000, 0xbf8c0000, + 0xe0704000, 0x705d0100, + 0x807c037c, 0x80700370, + 0xd5250000, 0x0001ff00, + 0x00000100, 0xbf0a7b7c, + 0xbf85fff4, 0xbefe03c1, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbf850004, + 0xbef003ff, 0x00000200, + 0xbeff0380, 0xbf820003, + 0xbef003ff, 0x00000400, + 0xbeff03c1, 0xb97b3a05, + 0x807b817b, 0x8f7b827b, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbf850017, 0xbef603ff, 0x01000000, 0xbefc0384, 0xbf0a7b7c, - 0xbf840011, 0x7e008700, + 0xbf840037, 0x7e008700, 0x7e028701, 0x7e048702, 0x7e068703, 0xe0704000, - 0x705d0000, 0xe0704100, - 0x705d0100, 0xe0704200, - 0x705d0200, 0xe0704300, + 0x705d0000, 0xe0704080, + 0x705d0100, 0xe0704100, + 0x705d0200, 0xe0704180, 0x705d0300, 0x807c847c, - 0x8070ff70, 0x00000400, + 0x8070ff70, 0x00000200, 0xbf0a7b7c, 0xbf85ffef, - 0xb97b1e06, 0x877bc17b, - 0xbf84000c, 0x8f7b837b, - 0x807b7c7b, 0xbefe03c1, - 0xbeff0380, 0x7e008700, + 0xbf820025, 0xbef603ff, + 0x01000000, 0xbefc0384, + 0xbf0a7b7c, 0xbf840011, + 0x7e008700, 0x7e028701, + 0x7e048702, 0x7e068703, 0xe0704000, 0x705d0000, - 0x807c817c, 0x8070ff70, - 0x00000080, 0xbf0a7b7c, - 0xbf85fff8, 0xbf82013b, - 0xbef4037e, 0x8775ff7f, - 0x0000ffff, 0x8875ff75, - 0x00040000, 0xbef60380, - 0xbef703ff, 0x10807fac, - 0xb97202dc, 0x8f729972, - 0x876eff7f, 0x04000000, - 0xbf840034, 0xbefe03c1, - 0x907c9972, 0x877c817c, - 0xbf06817c, 0xbf850002, - 0xbeff0380, 0xbf820001, - 0xbeff03c1, 0xb96f4306, - 0x876fc16f, 0xbf840029, - 0x8f6f866f, 0x8f6f826f, - 0xbef6036f, 0xb9783a05, - 0x80788178, 0xbf0d9972, - 0xbf850002, 0x8f788978, - 0xbf820001, 0x8f788a78, - 0xb96e1e06, 0x8f6e8a6e, - 0x80786e78, 0x8078ff78, - 0x00000200, 0x8078ff78, - 0x00000080, 0xbef603ff, - 0x01000000, 0x907c9972, - 0x877c817c, 0xbf06817c, - 0xbefc0380, 0xbf850009, - 0xe0310000, 0x781d0000, - 0x807cff7c, 0x00000080, - 0x8078ff78, 0x00000080, - 0xbf0a6f7c, 0xbf85fff8, - 0xbf820008, 0xe0310000, - 0x781d0000, 0x807cff7c, - 0x00000100, 0x8078ff78, - 0x00000100, 0xbf0a6f7c, - 0xbf85fff8, 0xbef80380, + 0xe0704100, 0x705d0100, + 0xe0704200, 0x705d0200, + 0xe0704300, 0x705d0300, + 0x807c847c, 0x8070ff70, + 0x00000400, 0xbf0a7b7c, + 0xbf85ffef, 0xb97b1e06, + 0x877bc17b, 0xbf84000c, + 0x8f7b837b, 0x807b7c7b, + 0xbefe03c1, 0xbeff0380, + 0x7e008700, 0xe0704000, + 0x705d0000, 0x807c817c, + 0x8070ff70, 0x00000080, + 0xbf0a7b7c, 0xbf85fff8, + 0xbf82013b, 0xbef4037e, + 0x8775ff7f, 0x0000ffff, + 0x8875ff75, 0x00040000, + 0xbef60380, 0xbef703ff, + 0x10807fac, 0xb97202dc, + 0x8f729972, 0x876eff7f, + 0x04000000, 0xbf840034, 0xbefe03c1, 0x907c9972, 0x877c817c, 0xbf06817c, 0xbf850002, 0xbeff0380, 0xbf820001, 0xbeff03c1, - 0xb96f3a05, 0x806f816f, - 0x8f6f826f, 0x907c9972, - 0x877c817c, 0xbf06817c, - 0xbf850024, 0xbef603ff, - 0x01000000, 0xbeee0378, + 0xb96f4306, 0x876fc16f, + 0xbf840029, 0x8f6f866f, + 0x8f6f826f, 0xbef6036f, + 0xb9783a05, 0x80788178, + 0xbf0d9972, 0xbf850002, + 0x8f788978, 0xbf820001, + 0x8f788a78, 0xb96e1e06, + 0x8f6e8a6e, 0x80786e78, 0x8078ff78, 0x00000200, - 0xbefc0384, 0xbf0a6f7c, - 0xbf840050, 0xe0304000, - 0x785d0000, 0xe0304080, - 0x785d0100, 0xe0304100, - 0x785d0200, 0xe0304180, - 0x785d0300, 0xbf8c3f70, - 0x7e008500, 0x7e028501, - 0x7e048502, 0x7e068503, - 0x807c847c, 0x8078ff78, - 0x00000200, 0xbf0a6f7c, - 0xbf85ffee, 0xe0304000, - 0x6e5d0000, 0xe0304080, - 0x6e5d0100, 0xe0304100, - 0x6e5d0200, 0xe0304180, - 0x6e5d0300, 0xbf8c3f70, - 0xbf820034, 0xbef603ff, - 0x01000000, 0xbeee0378, - 0x8078ff78, 0x00000400, - 0xbefc0384, 0xbf0a6f7c, - 0xbf840012, 0xe0304000, - 0x785d0000, 0xe0304100, - 0x785d0100, 0xe0304200, - 0x785d0200, 0xe0304300, - 0x785d0300, 0xbf8c3f70, - 0x7e008500, 0x7e028501, - 0x7e048502, 0x7e068503, - 0x807c847c, 0x8078ff78, - 0x00000400, 0xbf0a6f7c, - 0xbf85ffee, 0xb96f1e06, - 0x876fc16f, 0xbf84000e, - 0x8f6f836f, 0x806f7c6f, - 0xbefe03c1, 0xbeff0380, + 0x8078ff78, 0x00000080, + 0xbef603ff, 0x01000000, + 0x907c9972, 0x877c817c, + 0xbf06817c, 0xbefc0380, + 0xbf850009, 0xe0310000, + 0x781d0000, 0x807cff7c, + 0x00000080, 0x8078ff78, + 0x00000080, 0xbf0a6f7c, + 0xbf85fff8, 0xbf820008, + 0xe0310000, 0x781d0000, + 0x807cff7c, 0x00000100, + 0x8078ff78, 0x00000100, + 0xbf0a6f7c, 0xbf85fff8, + 0xbef80380, 0xbefe03c1, + 0x907c9972, 0x877c817c, + 0xbf06817c, 0xbf850002, + 0xbeff0380, 0xbf820001, + 0xbeff03c1, 0xb96f3a05, + 0x806f816f, 0x8f6f826f, + 0x907c9972, 0x877c817c, + 0xbf06817c, 0xbf850024, + 0xbef603ff, 0x01000000, + 0xbeee0378, 0x8078ff78, + 0x00000200, 0xbefc0384, + 0xbf0a6f7c, 0xbf840050, 0xe0304000, 0x785d0000, + 0xe0304080, 0x785d0100, + 0xe0304100, 0x785d0200, + 0xe0304180, 0x785d0300, 0xbf8c3f70, 0x7e008500, - 0x807c817c, 0x8078ff78, - 0x00000080, 0xbf0a6f7c, - 0xbf85fff7, 0xbeff03c1, + 0x7e028501, 0x7e048502, + 0x7e068503, 0x807c847c, + 0x8078ff78, 0x00000200, + 0xbf0a6f7c, 0xbf85ffee, 0xe0304000, 0x6e5d0000, - 0xe0304100, 0x6e5d0100, - 0xe0304200, 0x6e5d0200, - 0xe0304300, 0x6e5d0300, - 0xbf8c3f70, 0xb9783a05, - 0x80788178, 0xbf0d9972, - 0xbf850002, 0x8f788978, - 0xbf820001, 0x8f788a78, - 0xb96e1e06, 0x8f6e8a6e, - 0x80786e78, 0x8078ff78, - 0x00000200, 0x80f8ff78, - 0x00000050, 0xbef603ff, - 0x01000000, 0xbefc03ff, - 0x0000006c, 0x80f89078, - 0xf429003a, 0xf0000000, - 0xbf8cc07f, 0x80fc847c, - 0xbf800000, 0xbe803100, - 0xbe823102, 0x80f8a078, - 0xf42d003a, 0xf0000000, - 0xbf8cc07f, 0x80fc887c, - 0xbf800000, 0xbe803100, - 0xbe823102, 0xbe843104, - 0xbe863106, 0x80f8c078, - 0xf431003a, 0xf0000000, - 0xbf8cc07f, 0x80fc907c, - 0xbf800000, 0xbe803100, - 0xbe823102, 0xbe843104, - 0xbe863106, 0xbe883108, - 0xbe8a310a, 0xbe8c310c, - 0xbe8e310e, 0xbf06807c, - 0xbf84fff0, 0xba80f801, - 0x00000000, 0xbf8a0000, + 0xe0304080, 0x6e5d0100, + 0xe0304100, 0x6e5d0200, + 0xe0304180, 0x6e5d0300, + 0xbf8c3f70, 0xbf820034, + 0xbef603ff, 0x01000000, + 0xbeee0378, 0x8078ff78, + 0x00000400, 0xbefc0384, + 0xbf0a6f7c, 0xbf840012, + 0xe0304000, 0x785d0000, + 0xe0304100, 0x785d0100, + 0xe0304200, 0x785d0200, + 0xe0304300, 0x785d0300, + 0xbf8c3f70, 0x7e008500, + 0x7e028501, 0x7e048502, + 0x7e068503, 0x807c847c, + 0x8078ff78, 0x00000400, + 0xbf0a6f7c, 0xbf85ffee, + 0xb96f1e06, 0x876fc16f, + 0xbf84000e, 0x8f6f836f, + 0x806f7c6f, 0xbefe03c1, + 0xbeff0380, 0xe0304000, + 0x785d0000, 0xbf8c3f70, + 0x7e008500, 0x807c817c, + 0x8078ff78, 0x00000080, + 0xbf0a6f7c, 0xbf85fff7, + 0xbeff03c1, 0xe0304000, + 0x6e5d0000, 0xe0304100, + 0x6e5d0100, 0xe0304200, + 0x6e5d0200, 0xe0304300, + 0x6e5d0300, 0xbf8c3f70, 0xb9783a05, 0x80788178, 0xbf0d9972, 0xbf850002, 0x8f788978, 0xbf820001, 0x8f788a78, 0xb96e1e06, 0x8f6e8a6e, 0x80786e78, 0x8078ff78, 0x00000200, + 0x80f8ff78, 0x00000050, 0xbef603ff, 0x01000000, - 0xf4211bfa, 0xf0000000, - 0x80788478, 0xf4211b3a, + 0xbefc03ff, 0x0000006c, + 0x80f89078, 0xf429003a, + 0xf0000000, 0xbf8cc07f, + 0x80fc847c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0x80f8a078, 0xf42d003a, + 0xf0000000, 0xbf8cc07f, + 0x80fc887c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0xbe843104, 0xbe863106, + 0x80f8c078, 0xf431003a, + 0xf0000000, 0xbf8cc07f, + 0x80fc907c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0xbe843104, 0xbe863106, + 0xbe883108, 0xbe8a310a, + 0xbe8c310c, 0xbe8e310e, + 0xbf06807c, 0xbf84fff0, + 0xba80f801, 0x00000000, + 0xbf8a0000, 0xb9783a05, + 0x80788178, 0xbf0d9972, + 0xbf850002, 0x8f788978, + 0xbf820001, 0x8f788a78, + 0xb96e1e06, 0x8f6e8a6e, + 0x80786e78, 0x8078ff78, + 0x00000200, 0xbef603ff, + 0x01000000, 0xf4211bfa, 0xf0000000, 0x80788478, - 0xf4211b7a, 0xf0000000, - 0x80788478, 0xf4211c3a, + 0xf4211b3a, 0xf0000000, + 0x80788478, 0xf4211b7a, 0xf0000000, 0x80788478, - 0xf4211c7a, 0xf0000000, - 0x80788478, 0xf4211eba, + 0xf4211c3a, 0xf0000000, + 0x80788478, 0xf4211c7a, 0xf0000000, 0x80788478, - 0xf4211efa, 0xf0000000, - 0x80788478, 0xf4211e7a, + 0xf4211eba, 0xf0000000, + 0x80788478, 0xf4211efa, 0xf0000000, 0x80788478, - 0xf4211cfa, 0xf0000000, - 0x80788478, 0xf4211bba, + 0xf4211e7a, 0xf0000000, + 0x80788478, 0xf4211cfa, 0xf0000000, 0x80788478, - 0xbf8cc07f, 0xb9eef814, 0xf4211bba, 0xf0000000, 0x80788478, 0xbf8cc07f, - 0xb9eef815, 0xbefc036f, - 0xbefe0370, 0xbeff0371, - 0x876f7bff, 0x000003ff, - 0xb9ef4803, 0x876f7bff, - 0xfffff800, 0x906f8b6f, - 0xb9efa2c3, 0xb9f3f801, - 0xb96e3a05, 0x806e816e, - 0xbf0d9972, 0xbf850002, - 0x8f6e896e, 0xbf820001, - 0x8f6e8a6e, 0xb96f1e06, - 0x8f6f8a6f, 0x806e6f6e, - 0x806eff6e, 0x00000200, - 0x806e746e, 0x826f8075, - 0x876fff6f, 0x0000ffff, - 0xf4091c37, 0xfa000050, - 0xf4091d37, 0xfa000060, - 0xf4011e77, 0xfa000074, - 0xbf8cc07f, 0x876dff6d, - 0x0000ffff, 0x87fe7e7e, - 0x87ea6a6a, 0xb9faf802, - 0xbe80226c, 0xbf810000, + 0xb9eef814, 0xf4211bba, + 0xf0000000, 0x80788478, + 0xbf8cc07f, 0xb9eef815, + 0xbefc036f, 0xbefe0370, + 0xbeff0371, 0x876f7bff, + 0x000003ff, 0xb9ef4803, + 0x876f7bff, 0xfffff800, + 0x906f8b6f, 0xb9efa2c3, + 0xb9f3f801, 0xb96e3a05, + 0x806e816e, 0xbf0d9972, + 0xbf850002, 0x8f6e896e, + 0xbf820001, 0x8f6e8a6e, + 0xb96f1e06, 0x8f6f8a6f, + 0x806e6f6e, 0x806eff6e, + 0x00000200, 0x806e746e, + 0x826f8075, 0x876fff6f, + 0x0000ffff, 0xf4091c37, + 0xfa000050, 0xf4091d37, + 0xfa000060, 0xf4011e77, + 0xfa000074, 0xbf8cc07f, + 0x876dff6d, 0x0000ffff, + 0x87fe7e7e, 0x87ea6a6a, + 0xb9faf802, 0xbe80226c, + 0xbf9b0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, - 0xbf9f0000, 0x00000000, }; static const uint32_t cwsr_trap_gfx11_hex[] = { @@ -2944,7 +2944,7 @@ static const uint32_t cwsr_trap_gfx11_hex[] = { 0xb8eef802, 0xbf0d866e, 0xbfa20002, 0xb97af802, 0xbe80486c, 0xb97af802, - 0xbe804a6c, 0xbfb00000, + 0xbe804a6c, 0xbfb10000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0x00000000, @@ -3436,5 +3436,5 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = { 0x86ea6a6a, 0x8f6e837a, 0xb96ee0c2, 0xbf800002, 0xb97a0002, 0xbf8a0000, - 0xbe801f6c, 0xbf810000, + 0xbe801f6c, 0xbf9b0000, }; diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm index fdab646244..71b3dc0c73 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm @@ -369,6 +369,12 @@ L_SLEEP: s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp #if NO_SQC_STORE +#if ASIC_FAMILY <= CHIP_SIENNA_CICHLID + // gfx10: If there was a VALU exception, the exception state must be + // cleared before executing the VALU instructions below. + v_clrexcp +#endif + // Trap temporaries must be saved via VGPR but all VGPRs are in use. // There is no ttmp space to hold the resource constant for VGPR save. // Save v0 by itself since it requires only two SGPRs. @@ -1098,7 +1104,7 @@ L_RETURN_WITHOUT_PRIV: s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution L_END_PGM: - s_endpgm + s_endpgm_saved end function write_hwreg_to_mem(s, s_rsrc, s_mem_offset) diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm index e506411ad2..bb26338204 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm @@ -921,7 +921,7 @@ L_RESTORE: /* the END */ /**************************************************************************/ L_END_PGM: - s_endpgm + s_endpgm_saved end diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index f6d4748c19..08da993df1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -778,8 +778,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp, * nodes, but not more than args->num_of_nodes as that is * the amount of memory allocated by user */ - pa = kzalloc((sizeof(struct kfd_process_device_apertures) * - args->num_of_nodes), GFP_KERNEL); + pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures), + GFP_KERNEL); if (!pa) return -ENOMEM; @@ -1442,7 +1442,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT); /* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */ - amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv); + err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv); + if (err) + goto sync_memory_failed; } mutex_unlock(&p->mutex); @@ -1564,16 +1566,11 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, { struct kfd_ioctl_import_dmabuf_args *args = data; struct kfd_process_device *pdd; - struct dma_buf *dmabuf; int idr_handle; uint64_t size; void *mem; int r; - dmabuf = dma_buf_get(args->dmabuf_fd); - if (IS_ERR(dmabuf)) - return PTR_ERR(dmabuf); - mutex_lock(&p->mutex); pdd = kfd_process_device_data_by_id(p, args->gpu_id); if (!pdd) { @@ -1587,10 +1584,10 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, goto err_unlock; } - r = amdgpu_amdkfd_gpuvm_import_dmabuf(pdd->dev->adev, dmabuf, - args->va_addr, pdd->drm_priv, - (struct kgd_mem **)&mem, &size, - NULL); + r = amdgpu_amdkfd_gpuvm_import_dmabuf_fd(pdd->dev->adev, args->dmabuf_fd, + args->va_addr, pdd->drm_priv, + (struct kgd_mem **)&mem, &size, + NULL); if (r) goto err_unlock; @@ -1601,7 +1598,6 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, } mutex_unlock(&p->mutex); - dma_buf_put(dmabuf); args->handle = MAKE_HANDLE(args->gpu_id, idr_handle); @@ -1612,7 +1608,6 @@ err_free: pdd->drm_priv, NULL); err_unlock: mutex_unlock(&p->mutex); - dma_buf_put(dmabuf); return r; } @@ -1855,8 +1850,8 @@ static uint32_t get_process_num_bos(struct kfd_process *p) return num_of_bos; } -static int criu_get_prime_handle(struct kgd_mem *mem, int flags, - u32 *shared_fd) +static int criu_get_prime_handle(struct kgd_mem *mem, + int flags, u32 *shared_fd) { struct dma_buf *dmabuf; int ret; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index c0e7154338..c0ae1a9749 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1997,6 +1997,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, dev_err(dev, "HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n"); while (halt_if_hws_hang) schedule(); + kfd_hws_hang(dqm); return -ETIME; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 0f58be6513..739721254a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -880,6 +880,10 @@ static int copy_signaled_event_data(uint32_t num_events, dst = &data[i].memory_exception_data; src = &event->memory_exception_data; size = sizeof(struct kfd_hsa_memory_exception_data); + } else if (event->type == KFD_EVENT_TYPE_HW_EXCEPTION) { + dst = &data[i].memory_exception_data; + src = &event->hw_exception_data; + size = sizeof(struct kfd_hsa_hw_exception_data); } else if (event->type == KFD_EVENT_TYPE_SIGNAL && waiter->event_age_enabled) { dst = &data[i].signal_event_data.last_event_age; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index b8680e0753..bdc01ca960 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -260,19 +260,6 @@ static void svm_migrate_put_sys_page(unsigned long addr) put_page(page); } -static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate) -{ - unsigned long cpages = 0; - unsigned long i; - - for (i = 0; i < migrate->npages; i++) { - if (migrate->src[i] & MIGRATE_PFN_VALID && - migrate->src[i] & MIGRATE_PFN_MIGRATE) - cpages++; - } - return cpages; -} - static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate) { unsigned long upages = 0; @@ -402,6 +389,7 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, struct dma_fence *mfence = NULL; struct migrate_vma migrate = { 0 }; unsigned long cpages = 0; + unsigned long mpages = 0; dma_addr_t *scratch; void *buf; int r = -ENOMEM; @@ -442,20 +430,21 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, goto out_free; } if (cpages != npages) - pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", + pr_debug("partial migration, 0x%lx/0x%llx pages collected\n", cpages, npages); else - pr_debug("0x%lx pages migrated\n", cpages); + pr_debug("0x%lx pages collected\n", cpages); r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset); migrate_vma_pages(&migrate); - pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", - svm_migrate_successful_pages(&migrate), cpages, migrate.npages); - svm_migrate_copy_done(adev, mfence); migrate_vma_finalize(&migrate); + mpages = cpages - svm_migrate_unsuccessful_pages(&migrate); + pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", + mpages, cpages, migrate.npages); + kfd_smi_event_migration_end(node, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, 0, node->id, trigger); @@ -465,12 +454,12 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, out_free: kvfree(buf); out: - if (!r && cpages) { + if (!r && mpages) { pdd = svm_range_get_pdd_by_node(prange, node); if (pdd) - WRITE_ONCE(pdd->page_in, pdd->page_in + cpages); + WRITE_ONCE(pdd->page_in, pdd->page_in + mpages); - return cpages; + return mpages; } return r; } @@ -479,6 +468,8 @@ out: * svm_migrate_ram_to_vram - migrate svm range from system to device * @prange: range structure * @best_loc: the device to migrate to + * @start_mgr: start page to migrate + * @last_mgr: last page to migrate * @mm: the process mm structure * @trigger: reason of migration * @@ -489,19 +480,20 @@ out: */ static int svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, + unsigned long start_mgr, unsigned long last_mgr, struct mm_struct *mm, uint32_t trigger) { unsigned long addr, start, end; struct vm_area_struct *vma; uint64_t ttm_res_offset; struct kfd_node *node; - unsigned long cpages = 0; + unsigned long mpages = 0; long r = 0; - if (prange->actual_loc == best_loc) { - pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n", - prange->svms, prange->start, prange->last, best_loc); - return 0; + if (start_mgr < prange->start || last_mgr > prange->last) { + pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n", + start_mgr, last_mgr, prange->start, prange->last); + return -EFAULT; } node = svm_range_get_node_by_id(prange, best_loc); @@ -510,18 +502,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, return -ENODEV; } - pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms, - prange->start, prange->last, best_loc); + pr_debug("svms 0x%p [0x%lx 0x%lx] in [0x%lx 0x%lx] to gpu 0x%x\n", + prange->svms, start_mgr, last_mgr, prange->start, prange->last, + best_loc); - start = prange->start << PAGE_SHIFT; - end = (prange->last + 1) << PAGE_SHIFT; + start = start_mgr << PAGE_SHIFT; + end = (last_mgr + 1) << PAGE_SHIFT; r = svm_range_vram_node_new(node, prange, true); if (r) { dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r); return r; } - ttm_res_offset = prange->offset << PAGE_SHIFT; + ttm_res_offset = (start_mgr - prange->start + prange->offset) << PAGE_SHIFT; for (addr = start; addr < end;) { unsigned long next; @@ -536,16 +529,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, pr_debug("failed %ld to migrate\n", r); break; } else { - cpages += r; + mpages += r; } ttm_res_offset += next - addr; addr = next; } - if (cpages) { + if (mpages) { prange->actual_loc = best_loc; - svm_range_dma_unmap(prange); - } else { + prange->vram_pages += mpages; + } else if (!prange->actual_loc) { + /* if no page migrated and all pages from prange are at + * sys ram drop svm_bo got from svm_range_vram_node_new + */ svm_range_vram_node_free(prange); } @@ -578,7 +574,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, prange->last); - addr = prange->start << PAGE_SHIFT; + addr = migrate->start; src = (uint64_t *)(scratch + npages); dst = scratch; @@ -663,9 +659,8 @@ out_oom: * Context: Process context, caller hold mmap read lock, prange->migrate_mutex * * Return: - * 0 - success with all pages migrated * negative values - indicate error - * positive values - partial migration, number of pages not migrated + * positive values or zero - number of pages got migrated */ static long svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, @@ -676,6 +671,7 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, uint64_t npages = (end - start) >> PAGE_SHIFT; unsigned long upages = npages; unsigned long cpages = 0; + unsigned long mpages = 0; struct amdgpu_device *adev = node->adev; struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; @@ -725,10 +721,10 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, goto out_free; } if (cpages != npages) - pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", + pr_debug("partial migration, 0x%lx/0x%llx pages collected\n", cpages, npages); else - pr_debug("0x%lx pages migrated\n", cpages); + pr_debug("0x%lx pages collected\n", cpages); r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, scratch, npages); @@ -751,17 +747,21 @@ out_free: kvfree(buf); out: if (!r && cpages) { + mpages = cpages - upages; pdd = svm_range_get_pdd_by_node(prange, node); if (pdd) - WRITE_ONCE(pdd->page_out, pdd->page_out + cpages); + WRITE_ONCE(pdd->page_out, pdd->page_out + mpages); } - return r ? r : upages; + + return r ? r : mpages; } /** * svm_migrate_vram_to_ram - migrate svm range from device to system * @prange: range structure * @mm: process mm, use current->mm if NULL + * @start_mgr: start page need be migrated to sys ram + * @last_mgr: last page need be migrated to sys ram * @trigger: reason of migration * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback * @@ -771,6 +771,7 @@ out: * 0 - OK, otherwise error code */ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, + unsigned long start_mgr, unsigned long last_mgr, uint32_t trigger, struct page *fault_page) { struct kfd_node *node; @@ -778,26 +779,33 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, unsigned long addr; unsigned long start; unsigned long end; - unsigned long upages = 0; + unsigned long mpages = 0; long r = 0; + /* this pragne has no any vram page to migrate to sys ram */ if (!prange->actual_loc) { pr_debug("[0x%lx 0x%lx] already migrated to ram\n", prange->start, prange->last); return 0; } + if (start_mgr < prange->start || last_mgr > prange->last) { + pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n", + start_mgr, last_mgr, prange->start, prange->last); + return -EFAULT; + } + node = svm_range_get_node_by_id(prange, prange->actual_loc); if (!node) { pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc); return -ENODEV; } pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n", - prange->svms, prange, prange->start, prange->last, + prange->svms, prange, start_mgr, last_mgr, prange->actual_loc); - start = prange->start << PAGE_SHIFT; - end = (prange->last + 1) << PAGE_SHIFT; + start = start_mgr << PAGE_SHIFT; + end = (last_mgr + 1) << PAGE_SHIFT; for (addr = start; addr < end;) { unsigned long next; @@ -816,14 +824,21 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, pr_debug("failed %ld to migrate prange %p\n", r, prange); break; } else { - upages += r; + mpages += r; } addr = next; } - if (r >= 0 && !upages) { - svm_range_vram_node_free(prange); - prange->actual_loc = 0; + if (r >= 0) { + prange->vram_pages -= mpages; + + /* prange does not have vram page set its actual_loc to system + * and drop its svm_bo ref + */ + if (prange->vram_pages == 0 && prange->ttm_res) { + prange->actual_loc = 0; + svm_range_vram_node_free(prange); + } } return r < 0 ? r : 0; @@ -833,17 +848,23 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, * svm_migrate_vram_to_vram - migrate svm range from device to device * @prange: range structure * @best_loc: the device to migrate to + * @start: start page need be migrated to sys ram + * @last: last page need be migrated to sys ram * @mm: process mm, use current->mm if NULL * @trigger: reason of migration * * Context: Process context, caller hold mmap read lock, svms lock, prange lock * + * migrate all vram pages in prange to sys ram, then migrate + * [start, last] pages from sys ram to gpu node best_loc. + * * Return: * 0 - OK, otherwise error code */ static int svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, - struct mm_struct *mm, uint32_t trigger) + unsigned long start, unsigned long last, + struct mm_struct *mm, uint32_t trigger) { int r, retries = 3; @@ -855,7 +876,8 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc); do { - r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL); + r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last, + trigger, NULL); if (r) return r; } while (prange->actual_loc && --retries); @@ -863,17 +885,21 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, if (prange->actual_loc) return -EDEADLK; - return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger); + return svm_migrate_ram_to_vram(prange, best_loc, start, last, mm, trigger); } int svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, + unsigned long start, unsigned long last, struct mm_struct *mm, uint32_t trigger) { - if (!prange->actual_loc) - return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger); + if (!prange->actual_loc || prange->actual_loc == best_loc) + return svm_migrate_ram_to_vram(prange, best_loc, start, last, + mm, trigger); + else - return svm_migrate_vram_to_vram(prange, best_loc, mm, trigger); + return svm_migrate_vram_to_vram(prange, best_loc, start, last, + mm, trigger); } @@ -889,10 +915,9 @@ svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, */ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) { + unsigned long start, last, size; unsigned long addr = vmf->address; struct svm_range_bo *svm_bo; - enum svm_work_list_ops op; - struct svm_range *parent; struct svm_range *prange; struct kfd_process *p; struct mm_struct *mm; @@ -929,51 +954,31 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) mutex_lock(&p->svms.lock); - prange = svm_range_from_addr(&p->svms, addr, &parent); + prange = svm_range_from_addr(&p->svms, addr, NULL); if (!prange) { pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr); r = -EFAULT; goto out_unlock_svms; } - mutex_lock(&parent->migrate_mutex); - if (prange != parent) - mutex_lock_nested(&prange->migrate_mutex, 1); + mutex_lock(&prange->migrate_mutex); if (!prange->actual_loc) goto out_unlock_prange; - svm_range_lock(parent); - if (prange != parent) - mutex_lock_nested(&prange->lock, 1); - r = svm_range_split_by_granularity(p, mm, addr, parent, prange); - if (prange != parent) - mutex_unlock(&prange->lock); - svm_range_unlock(parent); - if (r) { - pr_debug("failed %d to split range by granularity\n", r); - goto out_unlock_prange; - } + /* Align migration range start and size to granularity size */ + size = 1UL << prange->granularity; + start = max(ALIGN_DOWN(addr, size), prange->start); + last = min(ALIGN(addr + 1, size) - 1, prange->last); - r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, - KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, - vmf->page); + r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, start, last, + KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, vmf->page); if (r) pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n", - r, prange->svms, prange, prange->start, prange->last); - - /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */ - if (p->xnack_enabled && parent == prange) - op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP; - else - op = SVM_OP_UPDATE_RANGE_NOTIFIER; - svm_range_add_list_work(&p->svms, parent, mm, op); - schedule_deferred_list_work(&p->svms); + r, prange->svms, prange, start, last); out_unlock_prange: - if (prange != parent) - mutex_unlock(&prange->migrate_mutex); - mutex_unlock(&parent->migrate_mutex); + mutex_unlock(&prange->migrate_mutex); out_unlock_svms: mutex_unlock(&p->svms.lock); out_unref_process: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h index 487f263681..2eebf67f9c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h @@ -41,9 +41,13 @@ enum MIGRATION_COPY_DIR { }; int svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, + unsigned long start, unsigned long last, struct mm_struct *mm, uint32_t trigger); + int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, + unsigned long start, unsigned long last, uint32_t trigger, struct page *fault_page); + unsigned long svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index d722cbd317..826bc4f6c8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -55,8 +55,8 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, m = get_mqd(mqd); if (has_wa_flag) { - uint32_t wa_mask = minfo->update_flag == UPDATE_FLAG_DBG_WA_ENABLE ? - 0xffff : 0xffffffff; + uint32_t wa_mask = + (minfo->update_flag & UPDATE_FLAG_DBG_WA_ENABLE) ? 0xffff : 0xffffffff; m->compute_static_thread_mgmt_se0 = wa_mask; m->compute_static_thread_mgmt_se1 = wa_mask; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 42d881809d..697b6d530d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -303,6 +303,15 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, update_cu_mask(mm, mqd, minfo, 0); set_priority(m, q); + if (minfo && KFD_GC_VERSION(mm->dev) >= IP_VERSION(9, 4, 2)) { + if (minfo->update_flag & UPDATE_FLAG_IS_GWS) + m->compute_resource_limits |= + COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK; + else + m->compute_resource_limits &= + ~COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK; + } + q->is_active = QUEUE_IS_ACTIVE(*q); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 02d1d9afbf..fedeba3e12 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -532,6 +532,7 @@ struct queue_properties { enum mqd_update_flag { UPDATE_FLAG_DBG_WA_ENABLE = 1, UPDATE_FLAG_DBG_WA_DISABLE = 2, + UPDATE_FLAG_IS_GWS = 4, /* quirk for gfx9 IP */ }; struct mqd_update_info { @@ -748,7 +749,6 @@ struct kfd_process_device { /* VM context for GPUVM allocations */ struct file *drm_file; void *drm_priv; - atomic64_t tlb_seq; /* GPUVM allocations storage */ struct idr alloc_idr; @@ -918,7 +918,7 @@ struct kfd_process { * fence will be triggered during eviction and new one will be created * during restore */ - struct dma_fence *ef; + struct dma_fence __rcu *ef; /* Work items for evicting and restoring BOs */ struct delayed_work eviction_work; @@ -1462,7 +1462,14 @@ void kfd_signal_reset_event(struct kfd_node *dev); void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid); -void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type); +static inline void kfd_flush_tlb(struct kfd_process_device *pdd, + enum TLB_FLUSH_TYPE type) +{ + struct amdgpu_device *adev = pdd->dev->adev; + struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv); + + amdgpu_vm_flush_compute_tlb(adev, vm, type, pdd->dev->xcc_mask); +} static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 7a33e06f5c..58c1fe5421 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -664,7 +664,8 @@ int kfd_process_create_wq(void) if (!kfd_process_wq) kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0); if (!kfd_restore_wq) - kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0); + kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", + WQ_FREEZABLE); if (!kfd_process_wq || !kfd_restore_wq) { kfd_process_destroy_wq(); @@ -818,9 +819,9 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) mutex_lock(&kfd_processes_mutex); if (kfd_is_locked()) { - mutex_unlock(&kfd_processes_mutex); pr_debug("KFD is locked! Cannot create process"); - return ERR_PTR(-EINVAL); + process = ERR_PTR(-EINVAL); + goto out; } /* A prior open of /dev/kfd could have already created the process. */ @@ -1109,6 +1110,7 @@ static void kfd_process_wq_release(struct work_struct *work) { struct kfd_process *p = container_of(work, struct kfd_process, release_work); + struct dma_fence *ef; kfd_process_dequeue_from_all_devices(p); pqm_uninit(&p->pqm); @@ -1117,7 +1119,9 @@ static void kfd_process_wq_release(struct work_struct *work) * destroyed. This allows any BOs to be freed without * triggering pointless evictions or waiting for fences. */ - dma_fence_signal(p->ef); + synchronize_rcu(); + ef = rcu_access_pointer(p->ef); + dma_fence_signal(ef); kfd_process_remove_sysfs(p); @@ -1126,7 +1130,7 @@ static void kfd_process_wq_release(struct work_struct *work) svm_range_list_fini(p); kfd_process_destroy_pdds(p); - dma_fence_put(p->ef); + dma_fence_put(ef); kfd_event_free_process(p); @@ -1642,6 +1646,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, struct amdgpu_fpriv *drv_priv; struct amdgpu_vm *avm; struct kfd_process *p; + struct dma_fence *ef; struct kfd_node *dev; int ret; @@ -1661,13 +1666,13 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm, &p->kgd_process_info, - &p->ef); + &ef); if (ret) { pr_err("Failed to create process VM object\n"); return ret; } + RCU_INIT_POINTER(p->ef, ef); pdd->drm_priv = drm_file->private_data; - atomic64_set(&pdd->tlb_seq, 0); ret = kfd_process_device_reserve_ib_mem(pdd); if (ret) @@ -1909,6 +1914,23 @@ kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node, return -EINVAL; } +static int signal_eviction_fence(struct kfd_process *p) +{ + struct dma_fence *ef; + int ret; + + rcu_read_lock(); + ef = dma_fence_get_rcu_safe(&p->ef); + rcu_read_unlock(); + if (!ef) + return -EINVAL; + + ret = dma_fence_signal(ef); + dma_fence_put(ef); + + return ret; +} + static void evict_process_worker(struct work_struct *work) { int ret; @@ -1921,31 +1943,45 @@ static void evict_process_worker(struct work_struct *work) * lifetime of this thread, kfd_process p will be valid */ p = container_of(dwork, struct kfd_process, eviction_work); - WARN_ONCE(p->last_eviction_seqno != p->ef->seqno, - "Eviction fence mismatch\n"); - - /* Narrow window of overlap between restore and evict work - * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos - * unreserves KFD BOs, it is possible to evicted again. But - * restore has few more steps of finish. So lets wait for any - * previous restore work to complete - */ - flush_delayed_work(&p->restore_work); pr_debug("Started evicting pasid 0x%x\n", p->pasid); ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM); if (!ret) { - dma_fence_signal(p->ef); - dma_fence_put(p->ef); - p->ef = NULL; - queue_delayed_work(kfd_restore_wq, &p->restore_work, - msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)); + /* If another thread already signaled the eviction fence, + * they are responsible stopping the queues and scheduling + * the restore work. + */ + if (signal_eviction_fence(p) || + mod_delayed_work(kfd_restore_wq, &p->restore_work, + msecs_to_jiffies(PROCESS_RESTORE_TIME_MS))) + kfd_process_restore_queues(p); pr_debug("Finished evicting pasid 0x%x\n", p->pasid); } else pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid); } +static int restore_process_helper(struct kfd_process *p) +{ + int ret = 0; + + /* VMs may not have been acquired yet during debugging. */ + if (p->kgd_process_info) { + ret = amdgpu_amdkfd_gpuvm_restore_process_bos( + p->kgd_process_info, &p->ef); + if (ret) + return ret; + } + + ret = kfd_process_restore_queues(p); + if (!ret) + pr_debug("Finished restoring pasid 0x%x\n", p->pasid); + else + pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid); + + return ret; +} + static void restore_process_worker(struct work_struct *work) { struct delayed_work *dwork; @@ -1971,24 +2007,15 @@ static void restore_process_worker(struct work_struct *work) */ p->last_restore_timestamp = get_jiffies_64(); - /* VMs may not have been acquired yet during debugging. */ - if (p->kgd_process_info) - ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info, - &p->ef); + + ret = restore_process_helper(p); if (ret) { pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n", p->pasid, PROCESS_BACK_OFF_TIME_MS); - ret = queue_delayed_work(kfd_restore_wq, &p->restore_work, - msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS)); - WARN(!ret, "reschedule restore work failed\n"); - return; + if (mod_delayed_work(kfd_restore_wq, &p->restore_work, + msecs_to_jiffies(PROCESS_RESTORE_TIME_MS))) + kfd_process_restore_queues(p); } - - ret = kfd_process_restore_queues(p); - if (!ret) - pr_debug("Finished restoring pasid 0x%x\n", p->pasid); - else - pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid); } void kfd_suspend_all_processes(void) @@ -1999,14 +2026,9 @@ void kfd_suspend_all_processes(void) WARN(debug_evictions, "Evicting all processes"); hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { - cancel_delayed_work_sync(&p->eviction_work); - flush_delayed_work(&p->restore_work); - if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND)) pr_err("Failed to suspend process 0x%x\n", p->pasid); - dma_fence_signal(p->ef); - dma_fence_put(p->ef); - p->ef = NULL; + signal_eviction_fence(p); } srcu_read_unlock(&kfd_processes_srcu, idx); } @@ -2018,7 +2040,7 @@ int kfd_resume_all_processes(void) int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu); hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { - if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) { + if (restore_process_helper(p)) { pr_err("Restore process %d failed during resume\n", p->pasid); ret = -EFAULT; @@ -2059,36 +2081,6 @@ int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process, KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot); } -void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) -{ - struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv); - uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm); - struct kfd_node *dev = pdd->dev; - uint32_t xcc_mask = dev->xcc_mask; - int xcc = 0; - - /* - * It can be that we race and lose here, but that is extremely unlikely - * and the worst thing which could happen is that we flush the changes - * into the TLB once more which is harmless. - */ - if (atomic64_xchg(&pdd->tlb_seq, tlb_seq) == tlb_seq) - return; - - if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { - /* Nothing to flush until a VMID is assigned, which - * only happens when the first queue is created. - */ - if (pdd->qpd.vmid) - amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev, - pdd->qpd.vmid); - } else { - for_each_inst(xcc, xcc_mask) - amdgpu_amdkfd_flush_gpu_tlb_pasid( - dev->adev, pdd->process->pasid, type, xcc); - } -} - /* assumes caller holds process lock. */ int kfd_process_drain_interrupts(struct kfd_process_device *pdd) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 43eff221ea..4858112f9a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -95,6 +95,7 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd) int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, void *gws) { + struct mqd_update_info minfo = {0}; struct kfd_node *dev = NULL; struct process_queue_node *pqn; struct kfd_process_device *pdd; @@ -146,9 +147,10 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, } pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0; + minfo.update_flag = gws ? UPDATE_FLAG_IS_GWS : 0; return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, - pqn->q, NULL); + pqn->q, &minfo); } void kfd_process_dequeue_from_all_devices(struct kfd_process *p) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 9af1d09438..c50a0dc9c9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -198,6 +198,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n", addr[i] >> PAGE_SHIFT, page_to_pfn(page)); } + return 0; } @@ -349,6 +350,7 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start, INIT_LIST_HEAD(&prange->child_list); atomic_set(&prange->invalid, 0); prange->validate_timestamp = 0; + prange->vram_pages = 0; mutex_init(&prange->migrate_mutex); mutex_init(&prange->lock); @@ -395,6 +397,8 @@ static void svm_range_bo_release(struct kref *kref) prange->start, prange->last); mutex_lock(&prange->lock); prange->svm_bo = NULL; + /* prange should not hold vram page now */ + WARN_ONCE(prange->actual_loc, "prange should not hold vram page"); mutex_unlock(&prange->lock); spin_lock(&svm_bo->list_lock); @@ -873,14 +877,29 @@ static void svm_range_debug_dump(struct svm_range_list *svms) static void * svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements, - uint64_t offset) + uint64_t offset, uint64_t *vram_pages) { + unsigned char *src = (unsigned char *)psrc + offset; unsigned char *dst; + uint64_t i; dst = kvmalloc_array(num_elements, size, GFP_KERNEL); if (!dst) return NULL; - memcpy(dst, (unsigned char *)psrc + offset, num_elements * size); + + if (!vram_pages) { + memcpy(dst, src, num_elements * size); + return (void *)dst; + } + + *vram_pages = 0; + for (i = 0; i < num_elements; i++) { + dma_addr_t *temp; + temp = (dma_addr_t *)dst + i; + *temp = *((dma_addr_t *)src + i); + if (*temp&SVM_RANGE_VRAM_DOMAIN) + (*vram_pages)++; + } return (void *)dst; } @@ -894,7 +913,7 @@ svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src) if (!src->dma_addr[i]) continue; dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i], - sizeof(*src->dma_addr[i]), src->npages, 0); + sizeof(*src->dma_addr[i]), src->npages, 0, NULL); if (!dst->dma_addr[i]) return -ENOMEM; } @@ -905,7 +924,7 @@ svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src) static int svm_range_split_array(void *ppnew, void *ppold, size_t size, uint64_t old_start, uint64_t old_n, - uint64_t new_start, uint64_t new_n) + uint64_t new_start, uint64_t new_n, uint64_t *new_vram_pages) { unsigned char *new, *old, *pold; uint64_t d; @@ -917,11 +936,12 @@ svm_range_split_array(void *ppnew, void *ppold, size_t size, return 0; d = (new_start - old_start) * size; - new = svm_range_copy_array(pold, size, new_n, d); + /* get dma addr array for new range and calculte its vram page number */ + new = svm_range_copy_array(pold, size, new_n, d, new_vram_pages); if (!new) return -ENOMEM; d = (new_start == old_start) ? new_n * size : 0; - old = svm_range_copy_array(pold, size, old_n, d); + old = svm_range_copy_array(pold, size, old_n, d, NULL); if (!old) { kvfree(new); return -ENOMEM; @@ -943,10 +963,13 @@ svm_range_split_pages(struct svm_range *new, struct svm_range *old, for (i = 0; i < MAX_GPU_INSTANCE; i++) { r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i], sizeof(*old->dma_addr[i]), old->start, - npages, new->start, new->npages); + npages, new->start, new->npages, + old->actual_loc ? &new->vram_pages : NULL); if (r) return r; } + if (old->actual_loc) + old->vram_pages -= new->vram_pages; return 0; } @@ -1092,7 +1115,7 @@ static int svm_range_split_tail(struct svm_range *prange, uint64_t new_last, struct list_head *insert_list, struct list_head *remap_list) { - struct svm_range *tail; + struct svm_range *tail = NULL; int r = svm_range_split(prange, prange->start, new_last, &tail); if (!r) { @@ -1107,7 +1130,7 @@ static int svm_range_split_head(struct svm_range *prange, uint64_t new_start, struct list_head *insert_list, struct list_head *remap_list) { - struct svm_range *head; + struct svm_range *head = NULL; int r = svm_range_split(prange, new_start, prange->last, &head); if (!r) { @@ -1130,66 +1153,6 @@ svm_range_add_child(struct svm_range *prange, struct mm_struct *mm, list_add_tail(&pchild->child_list, &prange->child_list); } -/** - * svm_range_split_by_granularity - collect ranges within granularity boundary - * - * @p: the process with svms list - * @mm: mm structure - * @addr: the vm fault address in pages, to split the prange - * @parent: parent range if prange is from child list - * @prange: prange to split - * - * Trims @prange to be a single aligned block of prange->granularity if - * possible. The head and tail are added to the child_list in @parent. - * - * Context: caller must hold mmap_read_lock and prange->lock - * - * Return: - * 0 - OK, otherwise error code - */ -int -svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, - unsigned long addr, struct svm_range *parent, - struct svm_range *prange) -{ - struct svm_range *head, *tail; - unsigned long start, last, size; - int r; - - /* Align splited range start and size to granularity size, then a single - * PTE will be used for whole range, this reduces the number of PTE - * updated and the L1 TLB space used for translation. - */ - size = 1UL << prange->granularity; - start = ALIGN_DOWN(addr, size); - last = ALIGN(addr + 1, size) - 1; - - pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n", - prange->svms, prange->start, prange->last, start, last, size); - - if (start > prange->start) { - r = svm_range_split(prange, start, prange->last, &head); - if (r) - return r; - svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE); - } - - if (last < prange->last) { - r = svm_range_split(prange, prange->start, last, &tail); - if (r) - return r; - svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE); - } - - /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */ - if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) { - prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP; - pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n", - prange, prange->start, prange->last, - SVM_OP_ADD_RANGE_AND_MAP); - } - return 0; -} static bool svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b) { @@ -1524,7 +1487,7 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr) uint32_t gpuidx; int r; - drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0); + drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0, 0); drm_exec_until_all_locked(&ctx->exec) { for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) { pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx); @@ -1609,6 +1572,7 @@ static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx) * 5. Release page table (and SVM BO) reservation */ static int svm_range_validate_and_map(struct mm_struct *mm, + unsigned long map_start, unsigned long map_last, struct svm_range *prange, int32_t gpuidx, bool intr, bool wait, bool flush_tlb) { @@ -1689,10 +1653,12 @@ static int svm_range_validate_and_map(struct mm_struct *mm, } } - start = prange->start << PAGE_SHIFT; - end = (prange->last + 1) << PAGE_SHIFT; + start = map_start << PAGE_SHIFT; + end = (map_last + 1) << PAGE_SHIFT; for (addr = start; !r && addr < end; ) { struct hmm_range *hmm_range; + unsigned long map_start_vma; + unsigned long map_last_vma; struct vm_area_struct *vma; unsigned long next = 0; unsigned long offset; @@ -1720,7 +1686,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm, } if (!r) { - offset = (addr - start) >> PAGE_SHIFT; + offset = (addr >> PAGE_SHIFT) - prange->start; r = svm_range_dma_map(prange, ctx->bitmap, offset, npages, hmm_range->hmm_pfns); if (r) @@ -1738,9 +1704,16 @@ static int svm_range_validate_and_map(struct mm_struct *mm, r = -EAGAIN; } - if (!r) - r = svm_range_map_to_gpus(prange, offset, npages, readonly, - ctx->bitmap, wait, flush_tlb); + if (!r) { + map_start_vma = max(map_start, prange->start + offset); + map_last_vma = min(map_last, prange->start + offset + npages - 1); + if (map_start_vma <= map_last_vma) { + offset = map_start_vma - prange->start; + npages = map_last_vma - map_start_vma + 1; + r = svm_range_map_to_gpus(prange, offset, npages, readonly, + ctx->bitmap, wait, flush_tlb); + } + } if (!r && next == end) prange->mapped_to_gpu = true; @@ -1833,8 +1806,8 @@ static void svm_range_restore_work(struct work_struct *work) */ mutex_lock(&prange->migrate_mutex); - r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE, - false, true, false); + r = svm_range_validate_and_map(mm, prange->start, prange->last, prange, + MAX_GPU_INSTANCE, false, true, false); if (r) pr_debug("failed %d to map 0x%lx to gpus\n", r, prange->start); @@ -1871,7 +1844,7 @@ out_reschedule: /* If validation failed, reschedule another attempt */ if (evicted_ranges) { pr_debug("reschedule to restore svm range\n"); - schedule_delayed_work(&svms->restore_work, + queue_delayed_work(system_freezable_wq, &svms->restore_work, msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS)); kfd_smi_event_queue_restore_rescheduled(mm); @@ -1947,7 +1920,7 @@ svm_range_evict(struct svm_range *prange, struct mm_struct *mm, pr_debug("failed to quiesce KFD\n"); pr_debug("schedule to restore svm %p ranges\n", svms); - schedule_delayed_work(&svms->restore_work, + queue_delayed_work(system_freezable_wq, &svms->restore_work, msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS)); } else { unsigned long s, l; @@ -2002,6 +1975,7 @@ static struct svm_range *svm_range_clone(struct svm_range *old) new->actual_loc = old->actual_loc; new->granularity = old->granularity; new->mapped_to_gpu = old->mapped_to_gpu; + new->vram_pages = old->vram_pages; bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE); bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE); @@ -2911,6 +2885,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, uint32_t vmid, uint32_t node_id, uint64_t addr, bool write_fault) { + unsigned long start, last, size; struct mm_struct *mm = NULL; struct svm_range_list *svms; struct svm_range *prange; @@ -3046,40 +3021,44 @@ retry_write_locked: kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr, write_fault, timestamp); - if (prange->actual_loc != best_loc) { + /* Align migration range start and size to granularity size */ + size = 1UL << prange->granularity; + start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start); + last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last); + if (prange->actual_loc != 0 || best_loc != 0) { migration = true; + if (best_loc) { - r = svm_migrate_to_vram(prange, best_loc, mm, - KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU); + r = svm_migrate_to_vram(prange, best_loc, start, last, + mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU); if (r) { pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n", r, addr); /* Fallback to system memory if migration to * VRAM failed */ - if (prange->actual_loc) - r = svm_migrate_vram_to_ram(prange, mm, - KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, - NULL); + if (prange->actual_loc && prange->actual_loc != best_loc) + r = svm_migrate_vram_to_ram(prange, mm, start, last, + KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL); else r = 0; } } else { - r = svm_migrate_vram_to_ram(prange, mm, - KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, - NULL); + r = svm_migrate_vram_to_ram(prange, mm, start, last, + KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL); } if (r) { pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n", - r, svms, prange->start, prange->last); + r, svms, start, last); goto out_unlock_range; } } - r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false); + r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, false, + false, false); if (r) pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n", - r, svms, prange->start, prange->last); + r, svms, start, last); kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr, migration); @@ -3425,18 +3404,24 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange, *migrated = false; best_loc = svm_range_best_prefetch_location(prange); - if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED || - best_loc == prange->actual_loc) + /* when best_loc is a gpu node and same as prange->actual_loc + * we still need do migration as prange->actual_loc !=0 does + * not mean all pages in prange are vram. hmm migrate will pick + * up right pages during migration. + */ + if ((best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) || + (best_loc == 0 && prange->actual_loc == 0)) return 0; if (!best_loc) { - r = svm_migrate_vram_to_ram(prange, mm, + r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last, KFD_MIGRATE_TRIGGER_PREFETCH, NULL); *migrated = !r; return r; } - r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH); + r = svm_migrate_to_vram(prange, best_loc, prange->start, prange->last, + mm, KFD_MIGRATE_TRIGGER_PREFETCH); *migrated = !r; return r; @@ -3490,7 +3475,11 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work) mutex_lock(&prange->migrate_mutex); do { + /* migrate all vram pages in this prange to sys ram + * after that prange->actual_loc should be zero + */ r = svm_migrate_vram_to_ram(prange, mm, + prange->start, prange->last, KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL); } while (!r && prange->actual_loc && --retries); @@ -3614,8 +3603,8 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm, flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu; - r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE, - true, true, flush_tlb); + r = svm_range_validate_and_map(mm, prange->start, prange->last, prange, + MAX_GPU_INSTANCE, true, true, flush_tlb); if (r) pr_debug("failed %d to map svm range\n", r); @@ -3629,8 +3618,8 @@ out_unlock_range: pr_debug("Remapping prange 0x%p [0x%lx 0x%lx]\n", prange, prange->start, prange->last); mutex_lock(&prange->migrate_mutex); - r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE, - true, true, prange->mapped_to_gpu); + r = svm_range_validate_and_map(mm, prange->start, prange->last, prange, + MAX_GPU_INSTANCE, true, true, prange->mapped_to_gpu); if (r) pr_debug("failed %d on remap svm range\n", r); mutex_unlock(&prange->migrate_mutex); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index c528df1d0b..026863a0ab 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -78,6 +78,7 @@ struct svm_work_list_item { * @update_list:link list node used to add to update_list * @mapping: bo_va mapping structure to create and update GPU page table * @npages: number of pages + * @vram_pages: vram pages number in this svm_range * @dma_addr: dma mapping address on each GPU for system memory physical page * @ttm_res: vram ttm resource map * @offset: range start offset within mm_nodes @@ -88,7 +89,9 @@ struct svm_work_list_item { * @flags: flags defined as KFD_IOCTL_SVM_FLAG_* * @perferred_loc: perferred location, 0 for CPU, or GPU id * @perfetch_loc: last prefetch location, 0 for CPU, or GPU id - * @actual_loc: the actual location, 0 for CPU, or GPU id + * @actual_loc: this svm_range location. 0: all pages are from sys ram; + * GPU id: this svm_range may include vram pages from GPU with + * id actual_loc. * @granularity:migration granularity, log2 num pages * @invalid: not 0 means cpu page table is invalidated * @validate_timestamp: system timestamp when range is validated @@ -112,6 +115,7 @@ struct svm_range { struct list_head list; struct list_head update_list; uint64_t npages; + uint64_t vram_pages; dma_addr_t *dma_addr[MAX_GPU_INSTANCE]; struct ttm_resource *ttm_res; uint64_t offset; @@ -168,9 +172,6 @@ struct kfd_node *svm_range_get_node_by_id(struct svm_range *prange, int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, bool clear); void svm_range_vram_node_free(struct svm_range *prange); -int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, - unsigned long addr, struct svm_range *parent, - struct svm_range *prange); int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, uint32_t vmid, uint32_t node_id, uint64_t addr, bool write_fault); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index e5f7c92eeb..6ed2ec381a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1638,12 +1638,10 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext, else mode = UNKNOWN_MEMORY_PARTITION_MODE; - if (pcache->cache_level == 2) - pcache->cache_size = pcache_info[cache_type].cache_size * num_xcc; - else if (mode) - pcache->cache_size = pcache_info[cache_type].cache_size / mode; - else - pcache->cache_size = pcache_info[cache_type].cache_size; + pcache->cache_size = pcache_info[cache_type].cache_size; + /* Partition mode only affects L3 cache size */ + if (mode && pcache->cache_level == 3) + pcache->cache_size /= mode; if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE) pcache->cache_type |= HSA_CACHE_TYPE_DATA; diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile index af17ab8027..92a5c5efcf 100644 --- a/drivers/gpu/drm/amd/display/Makefile +++ b/drivers/gpu/drm/amd/display/Makefile @@ -30,6 +30,9 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/hw subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/clk_mgr subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/hwss +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/resource +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dsc +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/optc subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile index 8bf94920d2..ab2a97e354 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile @@ -25,22 +25,25 @@ +ifneq ($(CONFIG_DRM_AMD_DC),) AMDGPUDM = \ amdgpu_dm.o \ amdgpu_dm_plane.o \ amdgpu_dm_crtc.o \ amdgpu_dm_irq.o \ amdgpu_dm_mst_types.o \ - amdgpu_dm_color.o + amdgpu_dm_color.o \ + amdgpu_dm_services.o \ + amdgpu_dm_helpers.o \ + amdgpu_dm_pp_smu.o \ + amdgpu_dm_psr.o \ + amdgpu_dm_replay.o \ + amdgpu_dm_wb.o ifdef CONFIG_DRM_AMD_DC_FP AMDGPUDM += dc_fpu.o endif -ifneq ($(CONFIG_DRM_AMD_DC),) -AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o amdgpu_dm_replay.o -endif - AMDGPUDM += amdgpu_dm_hdcp.o ifneq ($(CONFIG_DEBUG_FS),) @@ -52,3 +55,4 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM)) AMD_DISPLAY_FILES += $(AMDGPU_DM) +endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index dafe9562a7..718e533ab4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -37,6 +37,7 @@ #include "dc/dc_dmub_srv.h" #include "dc/dc_edid_parser.h" #include "dc/dc_stat.h" +#include "dc/dc_state.h" #include "amdgpu_dm_trace.h" #include "dpcd_defs.h" #include "link/protocols/link_dpcd.h" @@ -54,6 +55,7 @@ #include "amdgpu_dm_crtc.h" #include "amdgpu_dm_hdcp.h" #include +#include "amdgpu_dm_wb.h" #include "amdgpu_pm.h" #include "amdgpu_atombios.h" @@ -84,12 +86,13 @@ #include #include #include +#include #include #include +#include #include #include #include -#include #include @@ -269,6 +272,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, { u32 v_blank_start, v_blank_end, h_position, v_position; struct amdgpu_crtc *acrtc = NULL; + struct dc *dc = adev->dm.dc; if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) return -EINVAL; @@ -281,6 +285,9 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, return 0; } + if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed) + dc_allow_idle_optimizations(dc, false); + /* * TODO rework base driver to use values directly. * for now parse it back into reg-format @@ -574,6 +581,7 @@ static void dm_crtc_high_irq(void *interrupt_params) { struct common_irq_params *irq_params = interrupt_params; struct amdgpu_device *adev = irq_params->adev; + struct drm_writeback_job *job; struct amdgpu_crtc *acrtc; unsigned long flags; int vrr_active; @@ -582,6 +590,33 @@ static void dm_crtc_high_irq(void *interrupt_params) if (!acrtc) return; + if (acrtc->wb_pending) { + if (acrtc->wb_conn) { + spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags); + job = list_first_entry_or_null(&acrtc->wb_conn->job_queue, + struct drm_writeback_job, + list_entry); + spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags); + + if (job) { + unsigned int v_total, refresh_hz; + struct dc_stream_state *stream = acrtc->dm_irq_params.stream; + + v_total = stream->adjust.v_total_max ? + stream->adjust.v_total_max : stream->timing.v_total; + refresh_hz = div_u64((uint64_t) stream->timing.pix_clk_100hz * + 100LL, (v_total * stream->timing.h_total)); + mdelay(1000 / refresh_hz); + + drm_writeback_signal_completion(acrtc->wb_conn, 0); + dc_stream_fc_disable_writeback(adev->dm.dc, + acrtc->dm_irq_params.stream, 0); + } + } else + DRM_ERROR("%s: no amdgpu_crtc wb_conn\n", __func__); + acrtc->wb_pending = false; + } + vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); drm_dbg_vbl(adev_to_drm(adev), @@ -724,6 +759,10 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (link && aconnector->dc_link == link) { if (notify->type == DMUB_NOTIFICATION_HPD) @@ -893,8 +932,7 @@ static int dm_early_init(void *handle); /* Allocate memory for FBC compressed data */ static void amdgpu_dm_fbc_init(struct drm_connector *connector) { - struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_device *adev = drm_to_adev(connector->dev); struct dm_compressor_info *compressor = &adev->dm.compressor; struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector); struct drm_display_mode *mode; @@ -948,6 +986,10 @@ static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (aconnector->audio_inst != port) continue; @@ -988,8 +1030,7 @@ static int amdgpu_dm_audio_component_bind(struct device *kdev, static void amdgpu_dm_audio_component_unbind(struct device *kdev, struct device *hda_kdev, void *data) { - struct drm_device *dev = dev_get_drvdata(kdev); - struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_device *adev = drm_to_adev(dev_get_drvdata(kdev)); struct drm_audio_component *acomp = data; acomp->ops = NULL; @@ -1678,6 +1719,15 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0]; init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0]; + if (amdgpu_dc_debug_mask & DC_DISABLE_IPS) + init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL; + + init_data.flags.disable_ips_in_vpb = 1; + + /* Enable DWB for tested platforms only */ + if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) + init_data.num_virtual_links = 1; + INIT_LIST_HEAD(&adev->dm.da_list); retrieve_dmi_info(&adev->dm); @@ -1720,23 +1770,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ adev->dm.dc->debug.ignore_cable_id = true; - /* TODO: There is a new drm mst change where the freedom of - * vc_next_start_slot update is revoked/moved into drm, instead of in - * driver. This forces us to make sure to get vc_next_start_slot updated - * in drm function each time without considering if mst_state is active - * or not. Otherwise, next time hotplug will give wrong start_slot - * number. We are implementing a temporary solution to even notify drm - * mst deallocation when link is no longer of MST type when uncommitting - * the stream so we will have more time to work on a proper solution. - * Ideally when dm_helpers_dp_mst_stop_top_mgr message is triggered, we - * should notify drm to do a complete "reset" of its states and stop - * calling further drm mst functions when link is no longer of an MST - * type. This could happen when we unplug an MST hubs/displays. When - * uncommit stream comes later after unplug, we should just reset - * hardware states only. - */ - adev->dm.dc->debug.temp_mst_deallocation_sequence = true; - if (adev->dm.dc->caps.dp_hdmi21_pcon_support) DRM_INFO("DP-HDMI FRL PCON supported\n"); @@ -1912,7 +1945,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) &adev->dm.dmub_bo_gpu_addr, &adev->dm.dmub_bo_cpu_addr); - if (adev->dm.hpd_rx_offload_wq) { + if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) { for (i = 0; i < adev->dm.dc->caps.max_links; i++) { if (adev->dm.hpd_rx_offload_wq[i].wq) { destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); @@ -2262,6 +2295,10 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev) drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type == dc_connection_mst_branch && aconnector->mst_mgr.aux) { @@ -2390,6 +2427,10 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type != dc_connection_mst_branch || aconnector->mst_root) @@ -2569,12 +2610,10 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) memset(del_streams, 0, sizeof(del_streams)); - context = dc_create_state(dc); + context = dc_state_create_current_copy(dc); if (context == NULL) goto context_alloc_fail; - dc_resource_state_copy_construct_current(dc, context); - /* First remove from context all streams */ for (i = 0; i < context->stream_count; i++) { struct dc_stream_state *stream = context->streams[i]; @@ -2584,12 +2623,12 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) /* Remove all planes for removed streams and then remove the streams */ for (i = 0; i < del_streams_count; i++) { - if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) { + if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) { res = DC_FAIL_DETACH_SURFACES; goto fail; } - res = dc_remove_stream_from_ctx(dc, context, del_streams[i]); + res = dc_state_remove_stream(dc, context, del_streams[i]); if (res != DC_OK) goto fail; } @@ -2597,7 +2636,7 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) res = dc_commit_streams(dc, context->streams, context->stream_count); fail: - dc_release_state(context); + dc_state_release(context); context_alloc_fail: return res; @@ -2624,7 +2663,7 @@ static int dm_suspend(void *handle) dc_allow_idle_optimizations(adev->dm.dc, false); - dm->cached_dc_state = dc_copy_state(dm->dc->current_state); + dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state); dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); @@ -2871,7 +2910,7 @@ static int dm_resume(void *handle) dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true); - dc_release_state(dm->cached_dc_state); + dc_state_release(dm->cached_dc_state); dm->cached_dc_state = NULL; amdgpu_dm_irq_resume_late(adev); @@ -2881,10 +2920,9 @@ static int dm_resume(void *handle) return 0; } /* Recreate dc_state - DC invalidates it when setting power state to S3. */ - dc_release_state(dm_state->context); - dm_state->context = dc_create_state(dm->dc); + dc_state_release(dm_state->context); + dm_state->context = dc_state_create(dm->dc); /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ - dc_resource_state_construct(dm->dc, dm_state->context); /* Before powering on DC we need to re-initialize DMUB. */ dm_dmub_hw_resume(adev); @@ -2914,6 +2952,10 @@ static int dm_resume(void *handle) /* Do detection*/ drm_connector_list_iter_begin(ddev, &iter); drm_for_each_connector_iter(connector, &iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (!aconnector->dc_link) @@ -3495,6 +3537,9 @@ static void register_hpd_handlers(struct amdgpu_device *adev) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); dc_link = aconnector->dc_link; @@ -3957,7 +4002,7 @@ dm_atomic_duplicate_state(struct drm_private_obj *obj) old_state = to_dm_atomic_state(obj->state); if (old_state && old_state->context) - new_state->context = dc_copy_state(old_state->context); + new_state->context = dc_state_create_copy(old_state->context); if (!new_state->context) { kfree(new_state); @@ -3973,7 +4018,7 @@ static void dm_atomic_destroy_state(struct drm_private_obj *obj, struct dm_atomic_state *dm_state = to_dm_atomic_state(state); if (dm_state && dm_state->context) - dc_release_state(dm_state->context); + dc_state_release(dm_state->context); kfree(dm_state); } @@ -4009,14 +4054,12 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) if (!state) return -ENOMEM; - state->context = dc_create_state(adev->dm.dc); + state->context = dc_state_create_current_copy(adev->dm.dc); if (!state->context) { kfree(state); return -ENOMEM; } - dc_resource_state_copy_construct_current(adev->dm.dc, state->context); - drm_atomic_private_obj_init(adev_to_drm(adev), &adev->dm.atomic_obj, &state->base, @@ -4024,14 +4067,19 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) r = amdgpu_display_modeset_create_props(adev); if (r) { - dc_release_state(state->context); + dc_state_release(state->context); kfree(state); return r; } +#ifdef AMD_PRIVATE_COLOR + if (amdgpu_dm_create_color_properties(adev)) + return -ENOMEM; +#endif + r = amdgpu_dm_audio_init(adev); if (r) { - dc_release_state(state->context); + dc_state_release(state->context); kfree(state); return r; } @@ -4467,6 +4515,28 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) continue; } + link = dc_get_link_at_index(dm->dc, i); + + if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) { + struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL); + + if (!wbcon) { + DRM_ERROR("KMS: Failed to allocate writeback connector\n"); + continue; + } + + if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) { + DRM_ERROR("KMS: Failed to initialize writeback connector\n"); + kfree(wbcon); + continue; + } + + link->psr_settings.psr_feature_enabled = false; + link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; + + continue; + } + aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); if (!aconnector) goto fail; @@ -4485,8 +4555,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) goto fail; } - link = dc_get_link_at_index(dm->dc, i); - if (dm->hpd_rx_offload_wq) dm->hpd_rx_offload_wq[aconnector->base.index].aconnector = aconnector; @@ -5089,7 +5157,9 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, * Always set input transfer function, since plane state is refreshed * every time. */ - ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state); + ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, + plane_state, + dc_plane_state); if (ret) return ret; @@ -5503,10 +5573,13 @@ static void fill_stream_properties_from_drm_display_mode( { struct dc_crtc_timing *timing_out = &stream->timing; const struct drm_display_info *info = &connector->display_info; - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_dm_connector *aconnector = NULL; struct hdmi_vendor_infoframe hv_frame; struct hdmi_avi_infoframe avi_frame; + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + aconnector = to_amdgpu_dm_connector(connector); + memset(&hv_frame, 0, sizeof(hv_frame)); memset(&avi_frame, 0, sizeof(avi_frame)); @@ -5676,13 +5749,13 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, } static struct dc_sink * -create_fake_sink(struct amdgpu_dm_connector *aconnector) +create_fake_sink(struct dc_link *link) { struct dc_sink_init_data sink_init_data = { 0 }; struct dc_sink *sink = NULL; - sink_init_data.link = aconnector->dc_link; - sink_init_data.sink_signal = aconnector->dc_link->connector_signal; + sink_init_data.link = link; + sink_init_data.sink_signal = link->connector_signal; sink = dc_sink_create(&sink_init_data); if (!sink) { @@ -6053,6 +6126,7 @@ create_stream_for_sink(struct drm_connector *connector, enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN; struct dsc_dec_dpcd_caps dsc_caps; + struct dc_link *link = NULL; struct dc_sink *sink = NULL; drm_mode_init(&mode, drm_mode); @@ -6066,14 +6140,24 @@ create_stream_for_sink(struct drm_connector *connector, if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) { aconnector = NULL; aconnector = to_amdgpu_dm_connector(connector); - if (!aconnector->dc_sink) { - sink = create_fake_sink(aconnector); - if (!sink) - return stream; - } else { - sink = aconnector->dc_sink; - dc_sink_retain(sink); - } + link = aconnector->dc_link; + } else { + struct drm_writeback_connector *wbcon = NULL; + struct amdgpu_dm_wb_connector *dm_wbcon = NULL; + + wbcon = drm_connector_to_writeback(connector); + dm_wbcon = to_amdgpu_dm_wb_connector(wbcon); + link = dm_wbcon->link; + } + + if (!aconnector || !aconnector->dc_sink) { + sink = create_fake_sink(link); + if (!sink) + return stream; + + } else { + sink = aconnector->dc_sink; + dc_sink_retain(sink); } stream = dc_create_stream_for_sink(sink); @@ -6173,19 +6257,16 @@ create_stream_for_sink(struct drm_connector *connector, if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); - if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) { + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || + stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || + stream->signal == SIGNAL_TYPE_EDP) { // // should decide stream support vsc sdp colorimetry capability // before building vsc info packet // - stream->use_vsc_sdp_for_colorimetry = false; - if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - stream->use_vsc_sdp_for_colorimetry = - aconnector->dc_sink->is_vsc_sdp_colorimetry_supported; - } else { - if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) - stream->use_vsc_sdp_for_colorimetry = true; - } + stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && + stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED; + if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) tf = TRANSFER_FUNC_GAMMA_22; mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); @@ -6455,7 +6536,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) struct edid *edid; struct i2c_adapter *ddc; - if (dc_link->aux_mode) + if (dc_link && dc_link->aux_mode) ddc = &aconnector->dm_dp_aux.aux.ddc; else ddc = &aconnector->i2c->base; @@ -6581,7 +6662,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc, if (!dc_plane_state) goto cleanup; - dc_state = dc_create_state(dc); + dc_state = dc_state_create(dc); if (!dc_state) goto cleanup; @@ -6608,9 +6689,9 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc, dc_result = dc_validate_plane(dc, dc_plane_state); if (dc_result == DC_OK) - dc_result = dc_add_stream_to_ctx(dc, dc_state, stream); + dc_result = dc_state_add_stream(dc, dc_state, stream); - if (dc_result == DC_OK && !dc_add_plane_to_context( + if (dc_result == DC_OK && !dc_state_add_plane( dc, stream, dc_plane_state, @@ -6622,7 +6703,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc, cleanup: if (dc_state) - dc_release_state(dc_state); + dc_state_release(dc_state); if (dc_plane_state) dc_plane_state_release(dc_plane_state); @@ -6930,7 +7011,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, if (IS_ERR(mst_state)) return PTR_ERR(mst_state); - mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link); + mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link)); if (!state->duplicated) { int max_bpc = conn_state->max_requested_bpc; @@ -6974,6 +7055,9 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, for_each_new_connector_in_state(state, connector, new_con_state, i) { + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (!aconnector->mst_output_port) @@ -7553,7 +7637,6 @@ create_i2c(struct ddc_service *ddc_service, if (!i2c) return NULL; i2c->base.owner = THIS_MODULE; - i2c->base.class = I2C_CLASS_DDC; i2c->base.dev.parent = &adev->pdev->dev; i2c->base.algo = &amdgpu_dm_i2c_algo; snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); @@ -7579,6 +7662,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, struct dc_link *link = dc_get_link_at_index(dc, link_index); struct amdgpu_i2c_adapter *i2c; + /* Not needed for writeback connector */ link->priv = aconnector; @@ -8189,6 +8273,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; + bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult; + bundle->surface_updates[planes_count].func_shaper = dc_plane->in_shaper_func; + bundle->surface_updates[planes_count].lut3d_func = dc_plane->lut3d_func; + bundle->surface_updates[planes_count].blend_tf = dc_plane->blend_tf; } amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state, @@ -8402,6 +8490,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, &acrtc_state->stream->csc_color_matrix; bundle->stream_update.out_transfer_func = acrtc_state->stream->out_transfer_func; + bundle->stream_update.lut3d_func = + (struct dc_3dlut *) acrtc_state->stream->lut3d_func; + bundle->stream_update.func_shaper = + (struct dc_transfer_func *) acrtc_state->stream->func_shaper; } acrtc_state->stream->abm_level = acrtc_state->abm_level; @@ -8535,6 +8627,9 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev, if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) continue; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + notify: aconnector = to_amdgpu_dm_connector(connector); @@ -8568,6 +8663,9 @@ notify: if (!status) continue; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); mutex_lock(&adev->dm.audio_lock); @@ -8593,6 +8691,12 @@ static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_stat stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); } +static void dm_clear_writeback(struct amdgpu_display_manager *dm, + struct dm_crtc_state *crtc_state) +{ + dc_stream_remove_writeback(dm->dc, crtc_state->stream, 0); +} + static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, struct dc_state *dc_state) { @@ -8602,9 +8706,38 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; + struct drm_connector_state *old_con_state; + struct drm_connector *connector; bool mode_set_reset_required = false; u32 i; + /* Disable writeback */ + for_each_old_connector_in_state(state, connector, old_con_state, i) { + struct dm_connector_state *dm_old_con_state; + struct amdgpu_crtc *acrtc; + + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + old_crtc_state = NULL; + + dm_old_con_state = to_dm_connector_state(old_con_state); + if (!dm_old_con_state->base.crtc) + continue; + + acrtc = to_amdgpu_crtc(dm_old_con_state->base.crtc); + if (acrtc) + old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); + + if (!acrtc->wb_enabled) + continue; + + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + + dm_clear_writeback(dm, dm_old_crtc_state); + acrtc->wb_enabled = false; + } + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); @@ -8729,7 +8862,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, dc_stream_get_status(dm_new_crtc_state->stream); if (!status) - status = dc_stream_get_status_from_state(dc_state, + status = dc_state_get_stream_status(dc_state, dm_new_crtc_state->stream); if (!status) drm_err(dev, @@ -8741,6 +8874,105 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, } } +static void dm_set_writeback(struct amdgpu_display_manager *dm, + struct dm_crtc_state *crtc_state, + struct drm_connector *connector, + struct drm_connector_state *new_con_state) +{ + struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector); + struct amdgpu_device *adev = dm->adev; + struct amdgpu_crtc *acrtc; + struct dc_writeback_info *wb_info; + struct pipe_ctx *pipe = NULL; + struct amdgpu_framebuffer *afb; + int i = 0; + + wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL); + if (!wb_info) { + DRM_ERROR("Failed to allocate wb_info\n"); + return; + } + + acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc); + if (!acrtc) { + DRM_ERROR("no amdgpu_crtc found\n"); + kfree(wb_info); + return; + } + + afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb); + if (!afb) { + DRM_ERROR("No amdgpu_framebuffer found\n"); + kfree(wb_info); + return; + } + + for (i = 0; i < MAX_PIPES; i++) { + if (dm->dc->current_state->res_ctx.pipe_ctx[i].stream == crtc_state->stream) { + pipe = &dm->dc->current_state->res_ctx.pipe_ctx[i]; + break; + } + } + + /* fill in wb_info */ + wb_info->wb_enabled = true; + + wb_info->dwb_pipe_inst = 0; + wb_info->dwb_params.dwbscl_black_color = 0; + wb_info->dwb_params.hdr_mult = 0x1F000; + wb_info->dwb_params.csc_params.gamut_adjust_type = CM_GAMUT_ADJUST_TYPE_BYPASS; + wb_info->dwb_params.csc_params.gamut_coef_format = CM_GAMUT_REMAP_COEF_FORMAT_S2_13; + wb_info->dwb_params.output_depth = DWB_OUTPUT_PIXEL_DEPTH_10BPC; + wb_info->dwb_params.cnv_params.cnv_out_bpc = DWB_CNV_OUT_BPC_10BPC; + + /* width & height from crtc */ + wb_info->dwb_params.cnv_params.src_width = acrtc->base.mode.crtc_hdisplay; + wb_info->dwb_params.cnv_params.src_height = acrtc->base.mode.crtc_vdisplay; + wb_info->dwb_params.dest_width = acrtc->base.mode.crtc_hdisplay; + wb_info->dwb_params.dest_height = acrtc->base.mode.crtc_vdisplay; + + wb_info->dwb_params.cnv_params.crop_en = false; + wb_info->dwb_params.stereo_params.stereo_enabled = false; + + wb_info->dwb_params.cnv_params.out_max_pix_val = 0x3ff; // 10 bits + wb_info->dwb_params.cnv_params.out_min_pix_val = 0; + wb_info->dwb_params.cnv_params.fc_out_format = DWB_OUT_FORMAT_32BPP_ARGB; + wb_info->dwb_params.cnv_params.out_denorm_mode = DWB_OUT_DENORM_BYPASS; + + wb_info->dwb_params.out_format = dwb_scaler_mode_bypass444; + + wb_info->dwb_params.capture_rate = dwb_capture_rate_0; + + wb_info->dwb_params.scaler_taps.h_taps = 4; + wb_info->dwb_params.scaler_taps.v_taps = 4; + wb_info->dwb_params.scaler_taps.h_taps_c = 2; + wb_info->dwb_params.scaler_taps.v_taps_c = 2; + wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING; + + wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0]; + wb_info->mcif_buf_params.chroma_pitch = afb->base.pitches[1]; + + for (i = 0; i < DWB_MCIF_BUF_COUNT; i++) { + wb_info->mcif_buf_params.luma_address[i] = afb->address; + wb_info->mcif_buf_params.chroma_address[i] = 0; + } + + wb_info->mcif_buf_params.p_vmid = 1; + if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) { + wb_info->mcif_warmup_params.start_address.quad_part = afb->address; + wb_info->mcif_warmup_params.region_size = + wb_info->mcif_buf_params.luma_pitch * wb_info->dwb_params.dest_height; + } + wb_info->mcif_warmup_params.p_vmid = 1; + wb_info->writeback_source_plane = pipe->plane_state; + + dc_stream_add_writeback(dm->dc, crtc_state->stream, wb_info); + + acrtc->wb_pending = true; + acrtc->wb_conn = wb_conn; + drm_writeback_queue_job(wb_conn, new_con_state); +} + /** * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. * @state: The atomic state to commit @@ -8768,16 +9000,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) trace_amdgpu_dm_atomic_commit_tail_begin(state); - if (dm->dc->caps.ips_support) { - for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { - if (new_con_state->crtc && - new_con_state->crtc->state->active && - drm_atomic_crtc_needs_modeset(new_con_state->crtc->state)) { - dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false); - break; - } - } - } + if (dm->dc->caps.ips_support && dm->dc->idle_optimizations_allowed) + dc_allow_idle_optimizations(dm->dc, false); drm_atomic_helper_update_legacy_modeset_state(dev, state); drm_dp_mst_atomic_wait_for_dependencies(state); @@ -8791,7 +9015,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_dm_connector *aconnector; + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + aconnector = to_amdgpu_dm_connector(connector); if (!adev->dm.hdcp_workqueue) continue; @@ -8975,6 +9204,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * To fix this, DC should permit updating only stream properties. */ dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC); + if (!dummy_updates) { + DRM_ERROR("Failed to allocate memory for dummy_updates.\n"); + continue; + } for (j = 0; j < status->plane_count; j++) dummy_updates[j].surface = status->plane_states[0]; @@ -9068,6 +9301,31 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank); } + /* Enable writeback */ + for_each_new_connector_in_state(state, connector, new_con_state, i) { + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); + + if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + if (!new_con_state->writeback_job) + continue; + + new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); + + if (!new_crtc_state) + continue; + + if (acrtc->wb_enabled) + continue; + + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + + dm_set_writeback(dm, dm_new_crtc_state, connector, new_con_state); + acrtc->wb_enabled = true; + } + /* Update audio instances for each connector. */ amdgpu_dm_commit_audio(dev, state); @@ -9185,10 +9443,15 @@ out: void dm_restore_drm_connector_state(struct drm_device *dev, struct drm_connector *connector) { - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_dm_connector *aconnector; struct amdgpu_crtc *disconnected_acrtc; struct dm_crtc_state *acrtc_state; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + return; + + aconnector = to_amdgpu_dm_connector(connector); + if (!aconnector->dc_sink || !connector->state || !connector->encoder) return; @@ -9265,12 +9528,16 @@ static void get_freesync_config_for_crtc( struct dm_connector_state *new_con_state) { struct mod_freesync_config config = {0}; - struct amdgpu_dm_connector *aconnector = - to_amdgpu_dm_connector(new_con_state->base.connector); + struct amdgpu_dm_connector *aconnector; struct drm_display_mode *mode = &new_crtc_state->base.mode; int vrefresh = drm_mode_vrefresh(mode); bool fs_vid_mode = false; + if (new_con_state->base.connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + return; + + aconnector = to_amdgpu_dm_connector(new_con_state->base.connector); + new_crtc_state->vrr_supported = new_con_state->freesync_capable && vrefresh >= aconnector->min_vfreq && vrefresh <= aconnector->max_vfreq; @@ -9516,7 +9783,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, crtc->base.id); /* i.e. reset mode */ - if (dc_remove_stream_from_ctx( + if (dc_state_remove_stream( dm->dc, dm_state->context, dm_old_crtc_state->stream) != DC_OK) { @@ -9559,7 +9826,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n", crtc->base.id); - if (dc_add_stream_to_ctx( + if (dc_state_add_stream( dm->dc, dm_state->context, dm_new_crtc_state->stream) != DC_OK) { @@ -9606,6 +9873,7 @@ skip_modeset: * when a modeset is needed, to ensure it gets reprogrammed. */ if (dm_new_crtc_state->base.color_mgmt_changed || + dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf || drm_atomic_crtc_needs_modeset(new_crtc_state)) { ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); if (ret) @@ -9639,7 +9907,8 @@ static bool should_reset_plane(struct drm_atomic_state *state, * TODO: Remove this hack for all asics once it proves that the * fast updates works fine on DCN3.2+. */ - if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset) + if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 2, 0) && + state->allow_modeset) return true; /* Exit early if we know that we're adding or removing the plane. */ @@ -9673,6 +9942,10 @@ static bool should_reset_plane(struct drm_atomic_state *state, */ for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { struct amdgpu_framebuffer *old_afb, *new_afb; + struct dm_plane_state *dm_new_other_state, *dm_old_other_state; + + dm_new_other_state = to_dm_plane_state(new_other_state); + dm_old_other_state = to_dm_plane_state(old_other_state); if (other->type == DRM_PLANE_TYPE_CURSOR) continue; @@ -9709,6 +9982,18 @@ static bool should_reset_plane(struct drm_atomic_state *state, old_other_state->color_encoding != new_other_state->color_encoding) return true; + /* HDR/Transfer Function changes. */ + if (dm_old_other_state->degamma_tf != dm_new_other_state->degamma_tf || + dm_old_other_state->degamma_lut != dm_new_other_state->degamma_lut || + dm_old_other_state->hdr_mult != dm_new_other_state->hdr_mult || + dm_old_other_state->ctm != dm_new_other_state->ctm || + dm_old_other_state->shaper_lut != dm_new_other_state->shaper_lut || + dm_old_other_state->shaper_tf != dm_new_other_state->shaper_tf || + dm_old_other_state->lut3d != dm_new_other_state->lut3d || + dm_old_other_state->blend_lut != dm_new_other_state->blend_lut || + dm_old_other_state->blend_tf != dm_new_other_state->blend_tf) + return true; + /* Framebuffer checks fall at the end. */ if (!old_other_state->fb || !new_other_state->fb) continue; @@ -9863,7 +10148,7 @@ static int dm_update_plane_state(struct dc *dc, if (ret) return ret; - if (!dc_remove_plane_from_context( + if (!dc_state_remove_plane( dc, dm_old_crtc_state->stream, dm_old_plane_state->dc_state, @@ -9941,7 +10226,7 @@ static int dm_update_plane_state(struct dc *dc, * state. It'll be released when the atomic state is * cleaned. */ - if (!dc_add_plane_to_context( + if (!dc_state_add_plane( dc, dm_new_crtc_state->stream, dc_new_plane_state, @@ -10103,6 +10388,9 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm if (conn_state->crtc != crtc) continue; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (!aconnector->mst_output_port || !aconnector->mst_root) aconnector = NULL; @@ -10815,8 +11103,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, struct dm_connector_state *dm_con_state = NULL; struct dc_sink *sink; - struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_device *adev = drm_to_adev(connector->dev); struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; bool freesync_capable = false; enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 3710f4d0f2..9c1871b866 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -32,6 +32,7 @@ #include #include #include "link_service_types.h" +#include /* * This file contains the definition for amdgpu_display_manager @@ -54,6 +55,9 @@ #define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A #define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40 #define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3 0x3 + +#define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL) + /* #include "include/amdgpu_dal_power_if.h" #include "amdgpu_dm_irq.h" @@ -714,11 +718,107 @@ static inline void amdgpu_dm_set_mst_status(uint8_t *status, #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) +struct amdgpu_dm_wb_connector { + struct drm_writeback_connector base; + struct dc_link *link; +}; + +#define to_amdgpu_dm_wb_connector(x) container_of(x, struct amdgpu_dm_wb_connector, base) + extern const struct amdgpu_ip_block_version dm_ip_block; +/* enum amdgpu_transfer_function: pre-defined transfer function supported by AMD. + * + * It includes standardized transfer functions and pure power functions. The + * transfer function coefficients are available at modules/color/color_gamma.c + */ +enum amdgpu_transfer_function { + AMDGPU_TRANSFER_FUNCTION_DEFAULT, + AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF, + AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF, + AMDGPU_TRANSFER_FUNCTION_PQ_EOTF, + AMDGPU_TRANSFER_FUNCTION_IDENTITY, + AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF, + AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF, + AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF, + AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF, + AMDGPU_TRANSFER_FUNCTION_BT709_OETF, + AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF, + AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF, + AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF, + AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF, + AMDGPU_TRANSFER_FUNCTION_COUNT +}; + struct dm_plane_state { struct drm_plane_state base; struct dc_plane_state *dc_state; + + /* Plane color mgmt */ + /** + * @degamma_lut: + * + * 1D LUT for mapping framebuffer/plane pixel data before sampling or + * blending operations. It's usually applied to linearize input space. + * The blob (if not NULL) is an array of &struct drm_color_lut. + */ + struct drm_property_blob *degamma_lut; + /** + * @degamma_tf: + * + * Predefined transfer function to tell DC driver the input space to + * linearize. + */ + enum amdgpu_transfer_function degamma_tf; + /** + * @hdr_mult: + * + * Multiplier to 'gain' the plane. When PQ is decoded using the fixed + * func transfer function to the internal FP16 fb, 1.0 -> 80 nits (on + * AMD at least). When sRGB is decoded, 1.0 -> 1.0, obviously. + * Therefore, 1.0 multiplier = 80 nits for SDR content. So if you + * want, 203 nits for SDR content, pass in (203.0 / 80.0). Format is + * S31.32 sign-magnitude. + * + * HDR multiplier can wide range beyond [0.0, 1.0]. This means that PQ + * TF is needed for any subsequent linear-to-non-linear transforms. + */ + __u64 hdr_mult; + /** + * @ctm: + * + * Color transformation matrix. The blob (if not NULL) is a &struct + * drm_color_ctm_3x4. + */ + struct drm_property_blob *ctm; + /** + * @shaper_lut: shaper lookup table blob. The blob (if not NULL) is an + * array of &struct drm_color_lut. + */ + struct drm_property_blob *shaper_lut; + /** + * @shaper_tf: + * + * Predefined transfer function to delinearize color space. + */ + enum amdgpu_transfer_function shaper_tf; + /** + * @lut3d: 3D lookup table blob. The blob (if not NULL) is an array of + * &struct drm_color_lut. + */ + struct drm_property_blob *lut3d; + /** + * @blend_lut: blend lut lookup table blob. The blob (if not NULL) is an + * array of &struct drm_color_lut. + */ + struct drm_property_blob *blend_lut; + /** + * @blend_tf: + * + * Pre-defined transfer function for converting plane pixel data before + * applying blend LUT. + */ + enum amdgpu_transfer_function blend_tf; }; struct dm_crtc_state { @@ -743,6 +843,14 @@ struct dm_crtc_state { struct dc_info_packet vrr_infopacket; int abm_level; + + /** + * @regamma_tf: + * + * Pre-defined transfer function for converting internal FB -> wire + * encoding. + */ + enum amdgpu_transfer_function regamma_tf; }; #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base) @@ -804,14 +912,22 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, void amdgpu_dm_trigger_timing_sync(struct drm_device *dev); +/* 3D LUT max size is 17x17x17 (4913 entries) */ +#define MAX_COLOR_3DLUT_SIZE 17 +#define MAX_COLOR_3DLUT_BITDEPTH 12 +int amdgpu_dm_verify_lut3d_size(struct amdgpu_device *adev, + struct drm_plane_state *plane_state); +/* 1D LUT size */ #define MAX_COLOR_LUT_ENTRIES 4096 /* Legacy gamm LUT users such as X doesn't like large LUT sizes */ #define MAX_COLOR_LEGACY_LUT_ENTRIES 256 void amdgpu_dm_init_color_mod(void); +int amdgpu_dm_create_color_properties(struct amdgpu_device *adev); int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state); int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc); int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, + struct drm_plane_state *plane_state, struct dc_plane_state *dc_plane_state); void amdgpu_dm_update_connector_after_detect( diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index a4cb23d059..c87b64e464 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -72,6 +72,7 @@ */ #define MAX_DRM_LUT_VALUE 0xFFFF +#define SDR_WHITE_LEVEL_INIT_VALUE 80 /** * amdgpu_dm_init_color_mod - Initialize the color module. @@ -84,6 +85,247 @@ void amdgpu_dm_init_color_mod(void) setup_x_points_distribution(); } +static inline struct fixed31_32 amdgpu_dm_fixpt_from_s3132(__u64 x) +{ + struct fixed31_32 val; + + /* If negative, convert to 2's complement. */ + if (x & (1ULL << 63)) + x = -(x & ~(1ULL << 63)); + + val.value = x; + return val; +} + +#ifdef AMD_PRIVATE_COLOR +/* Pre-defined Transfer Functions (TF) + * + * AMD driver supports pre-defined mathematical functions for transferring + * between encoded values and optical/linear space. Depending on HW color caps, + * ROMs and curves built by the AMD color module support these transforms. + * + * The driver-specific color implementation exposes properties for pre-blending + * degamma TF, shaper TF (before 3D LUT), and blend(dpp.ogam) TF and + * post-blending regamma (mpc.ogam) TF. However, only pre-blending degamma + * supports ROM curves. AMD color module uses pre-defined coefficients to build + * curves for the other blocks. What can be done by each color block is + * described by struct dpp_color_capsand struct mpc_color_caps. + * + * AMD driver-specific color API exposes the following pre-defined transfer + * functions: + * + * - Identity: linear/identity relationship between pixel value and + * luminance value; + * - Gamma 2.2, Gamma 2.4, Gamma 2.6: pure power functions; + * - sRGB: 2.4: The piece-wise transfer function from IEC 61966-2-1:1999; + * - BT.709: has a linear segment in the bottom part and then a power function + * with a 0.45 (~1/2.22) gamma for the rest of the range; standardized by + * ITU-R BT.709-6; + * - PQ (Perceptual Quantizer): used for HDR display, allows luminance range + * capability of 0 to 10,000 nits; standardized by SMPTE ST 2084. + * + * The AMD color model is designed with an assumption that SDR (sRGB, BT.709, + * Gamma 2.2, etc.) peak white maps (normalized to 1.0 FP) to 80 nits in the PQ + * system. This has the implication that PQ EOTF (non-linear to linear) maps to + * [0.0..125.0] where 125.0 = 10,000 nits / 80 nits. + * + * Non-linear and linear forms are described in the table below: + * + * ┌───────────┬─────────────────────┬──────────────────────┐ + * │ │ Non-linear │ Linear │ + * ├───────────┼─────────────────────┼──────────────────────┤ + * │ sRGB │ UNORM or [0.0, 1.0] │ [0.0, 1.0] │ + * ├───────────┼─────────────────────┼──────────────────────┤ + * │ BT709 │ UNORM or [0.0, 1.0] │ [0.0, 1.0] │ + * ├───────────┼─────────────────────┼──────────────────────┤ + * │ Gamma 2.x │ UNORM or [0.0, 1.0] │ [0.0, 1.0] │ + * ├───────────┼─────────────────────┼──────────────────────┤ + * │ PQ │ UNORM or FP16 CCCS* │ [0.0, 125.0] │ + * ├───────────┼─────────────────────┼──────────────────────┤ + * │ Identity │ UNORM or FP16 CCCS* │ [0.0, 1.0] or CCCS** │ + * └───────────┴─────────────────────┴──────────────────────┘ + * * CCCS: Windows canonical composition color space + * ** Respectively + * + * In the driver-specific API, color block names attached to TF properties + * suggest the intention regarding non-linear encoding pixel's luminance + * values. As some newer encodings don't use gamma curve, we make encoding and + * decoding explicit by defining an enum list of transfer functions supported + * in terms of EOTF and inverse EOTF, where: + * + * - EOTF (electro-optical transfer function): is the transfer function to go + * from the encoded value to an optical (linear) value. De-gamma functions + * traditionally do this. + * - Inverse EOTF (simply the inverse of the EOTF): is usually intended to go + * from an optical/linear space (which might have been used for blending) + * back to the encoded values. Gamma functions traditionally do this. + */ +static const char * const +amdgpu_transfer_function_names[] = { + [AMDGPU_TRANSFER_FUNCTION_DEFAULT] = "Default", + [AMDGPU_TRANSFER_FUNCTION_IDENTITY] = "Identity", + [AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF] = "sRGB EOTF", + [AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF] = "BT.709 inv_OETF", + [AMDGPU_TRANSFER_FUNCTION_PQ_EOTF] = "PQ EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF] = "Gamma 2.2 EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF] = "Gamma 2.4 EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF] = "Gamma 2.6 EOTF", + [AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF] = "sRGB inv_EOTF", + [AMDGPU_TRANSFER_FUNCTION_BT709_OETF] = "BT.709 OETF", + [AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF] = "PQ inv_EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF] = "Gamma 2.2 inv_EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF] = "Gamma 2.4 inv_EOTF", + [AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF] = "Gamma 2.6 inv_EOTF", +}; + +static const u32 amdgpu_eotf = + BIT(AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF) | + BIT(AMDGPU_TRANSFER_FUNCTION_PQ_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF); + +static const u32 amdgpu_inv_eotf = + BIT(AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_BT709_OETF) | + BIT(AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF) | + BIT(AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF); + +static struct drm_property * +amdgpu_create_tf_property(struct drm_device *dev, + const char *name, + u32 supported_tf) +{ + u32 transfer_functions = supported_tf | + BIT(AMDGPU_TRANSFER_FUNCTION_DEFAULT) | + BIT(AMDGPU_TRANSFER_FUNCTION_IDENTITY); + struct drm_prop_enum_list enum_list[AMDGPU_TRANSFER_FUNCTION_COUNT]; + int i, len; + + len = 0; + for (i = 0; i < AMDGPU_TRANSFER_FUNCTION_COUNT; i++) { + if ((transfer_functions & BIT(i)) == 0) + continue; + + enum_list[len].type = i; + enum_list[len].name = amdgpu_transfer_function_names[i]; + len++; + } + + return drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, + name, enum_list, len); +} + +int +amdgpu_dm_create_color_properties(struct amdgpu_device *adev) +{ + struct drm_property *prop; + + prop = drm_property_create(adev_to_drm(adev), + DRM_MODE_PROP_BLOB, + "AMD_PLANE_DEGAMMA_LUT", 0); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_degamma_lut_property = prop; + + prop = drm_property_create_range(adev_to_drm(adev), + DRM_MODE_PROP_IMMUTABLE, + "AMD_PLANE_DEGAMMA_LUT_SIZE", + 0, UINT_MAX); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_degamma_lut_size_property = prop; + + prop = amdgpu_create_tf_property(adev_to_drm(adev), + "AMD_PLANE_DEGAMMA_TF", + amdgpu_eotf); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_degamma_tf_property = prop; + + prop = drm_property_create_range(adev_to_drm(adev), + 0, "AMD_PLANE_HDR_MULT", 0, U64_MAX); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_hdr_mult_property = prop; + + prop = drm_property_create(adev_to_drm(adev), + DRM_MODE_PROP_BLOB, + "AMD_PLANE_CTM", 0); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_ctm_property = prop; + + prop = drm_property_create(adev_to_drm(adev), + DRM_MODE_PROP_BLOB, + "AMD_PLANE_SHAPER_LUT", 0); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_shaper_lut_property = prop; + + prop = drm_property_create_range(adev_to_drm(adev), + DRM_MODE_PROP_IMMUTABLE, + "AMD_PLANE_SHAPER_LUT_SIZE", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_shaper_lut_size_property = prop; + + prop = amdgpu_create_tf_property(adev_to_drm(adev), + "AMD_PLANE_SHAPER_TF", + amdgpu_inv_eotf); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_shaper_tf_property = prop; + + prop = drm_property_create(adev_to_drm(adev), + DRM_MODE_PROP_BLOB, + "AMD_PLANE_LUT3D", 0); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_lut3d_property = prop; + + prop = drm_property_create_range(adev_to_drm(adev), + DRM_MODE_PROP_IMMUTABLE, + "AMD_PLANE_LUT3D_SIZE", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_lut3d_size_property = prop; + + prop = drm_property_create(adev_to_drm(adev), + DRM_MODE_PROP_BLOB, + "AMD_PLANE_BLEND_LUT", 0); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_blend_lut_property = prop; + + prop = drm_property_create_range(adev_to_drm(adev), + DRM_MODE_PROP_IMMUTABLE, + "AMD_PLANE_BLEND_LUT_SIZE", 0, UINT_MAX); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_blend_lut_size_property = prop; + + prop = amdgpu_create_tf_property(adev_to_drm(adev), + "AMD_PLANE_BLEND_TF", + amdgpu_eotf); + if (!prop) + return -ENOMEM; + adev->mode_info.plane_blend_tf_property = prop; + + prop = amdgpu_create_tf_property(adev_to_drm(adev), + "AMD_CRTC_REGAMMA_TF", + amdgpu_inv_eotf); + if (!prop) + return -ENOMEM; + adev->mode_info.regamma_tf_property = prop; + + return 0; +} +#endif + /** * __extract_blob_lut - Extracts the DRM lut and lut size from a blob. * @blob: DRM color mgmt property blob @@ -182,7 +424,6 @@ static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut, static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm, struct fixed31_32 *matrix) { - int64_t val; int i; /* @@ -201,12 +442,29 @@ static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm, } /* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */ - val = ctm->matrix[i - (i / 4)]; - /* If negative, convert to 2's complement. */ - if (val & (1ULL << 63)) - val = -(val & ~(1ULL << 63)); + matrix[i] = amdgpu_dm_fixpt_from_s3132(ctm->matrix[i - (i / 4)]); + } +} - matrix[i].value = val; +/** + * __drm_ctm_3x4_to_dc_matrix - converts a DRM CTM 3x4 to a DC CSC float matrix + * @ctm: DRM color transformation matrix with 3x4 dimensions + * @matrix: DC CSC float matrix + * + * The matrix needs to be a 3x4 (12 entry) matrix. + */ +static void __drm_ctm_3x4_to_dc_matrix(const struct drm_color_ctm_3x4 *ctm, + struct fixed31_32 *matrix) +{ + int i; + + /* The format provided is S31.32, using signed-magnitude representation. + * Our fixed31_32 is also S31.32, but is using 2's complement. We have + * to convert from signed-magnitude to 2's complement. + */ + for (i = 0; i < 12; i++) { + /* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */ + matrix[i] = amdgpu_dm_fixpt_from_s3132(ctm->matrix[i]); } } @@ -268,16 +526,18 @@ static int __set_output_tf(struct dc_transfer_func *func, struct calculate_buffer cal_buffer = {0}; bool res; - ASSERT(lut && lut_size == MAX_COLOR_LUT_ENTRIES); - cal_buffer.buffer_index = -1; - gamma = dc_create_gamma(); - if (!gamma) - return -ENOMEM; + if (lut_size) { + ASSERT(lut && lut_size == MAX_COLOR_LUT_ENTRIES); - gamma->num_entries = lut_size; - __drm_lut_to_dc_gamma(lut, gamma, false); + gamma = dc_create_gamma(); + if (!gamma) + return -ENOMEM; + + gamma->num_entries = lut_size; + __drm_lut_to_dc_gamma(lut, gamma, false); + } if (func->tf == TRANSFER_FUNCTION_LINEAR) { /* @@ -285,27 +545,68 @@ static int __set_output_tf(struct dc_transfer_func *func, * on top of a linear input. But degamma params can be used * instead to simulate this. */ - gamma->type = GAMMA_CUSTOM; + if (gamma) + gamma->type = GAMMA_CUSTOM; res = mod_color_calculate_degamma_params(NULL, func, - gamma, true); + gamma, gamma != NULL); } else { /* * Assume sRGB. The actual mapping will depend on whether the * input was legacy or not. */ - gamma->type = GAMMA_CS_TFM_1D; - res = mod_color_calculate_regamma_params(func, gamma, false, + if (gamma) + gamma->type = GAMMA_CS_TFM_1D; + res = mod_color_calculate_regamma_params(func, gamma, gamma != NULL, has_rom, NULL, &cal_buffer); } - dc_gamma_release(&gamma); + if (gamma) + dc_gamma_release(&gamma); return res ? 0 : -ENOMEM; } +static int amdgpu_dm_set_atomic_regamma(struct dc_stream_state *stream, + const struct drm_color_lut *regamma_lut, + uint32_t regamma_size, bool has_rom, + enum dc_transfer_func_predefined tf) +{ + struct dc_transfer_func *out_tf = stream->out_transfer_func; + int ret = 0; + + if (regamma_size || tf != TRANSFER_FUNCTION_LINEAR) { + /* + * CRTC RGM goes into RGM LUT. + * + * Note: there is no implicit sRGB regamma here. We are using + * degamma calculation from color module to calculate the curve + * from a linear base if gamma TF is not set. However, if gamma + * TF (!= Linear) and LUT are set at the same time, we will use + * regamma calculation, and the color module will combine the + * pre-defined TF and the custom LUT values into the LUT that's + * actually programmed. + */ + out_tf->type = TF_TYPE_DISTRIBUTED_POINTS; + out_tf->tf = tf; + out_tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + + ret = __set_output_tf(out_tf, regamma_lut, regamma_size, has_rom); + } else { + /* + * No CRTC RGM means we can just put the block into bypass + * since we don't have any plane level adjustments using it. + */ + out_tf->type = TF_TYPE_BYPASS; + out_tf->tf = TRANSFER_FUNCTION_LINEAR; + } + + return ret; +} + /** * __set_input_tf - calculates the input transfer function based on expected * input space. + * @caps: dc color capabilities * @func: transfer function * @lut: lookup table that defines the color space * @lut_size: size of respective lut. @@ -313,27 +614,240 @@ static int __set_output_tf(struct dc_transfer_func *func, * Returns: * 0 in case of success. -ENOMEM if fails. */ -static int __set_input_tf(struct dc_transfer_func *func, +static int __set_input_tf(struct dc_color_caps *caps, struct dc_transfer_func *func, const struct drm_color_lut *lut, uint32_t lut_size) { struct dc_gamma *gamma = NULL; bool res; - gamma = dc_create_gamma(); - if (!gamma) - return -ENOMEM; + if (lut_size) { + gamma = dc_create_gamma(); + if (!gamma) + return -ENOMEM; - gamma->type = GAMMA_CUSTOM; - gamma->num_entries = lut_size; + gamma->type = GAMMA_CUSTOM; + gamma->num_entries = lut_size; + + __drm_lut_to_dc_gamma(lut, gamma, false); + } - __drm_lut_to_dc_gamma(lut, gamma, false); + res = mod_color_calculate_degamma_params(caps, func, gamma, gamma != NULL); - res = mod_color_calculate_degamma_params(NULL, func, gamma, true); - dc_gamma_release(&gamma); + if (gamma) + dc_gamma_release(&gamma); return res ? 0 : -ENOMEM; } +static enum dc_transfer_func_predefined +amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf) +{ + switch (tf) { + default: + case AMDGPU_TRANSFER_FUNCTION_DEFAULT: + case AMDGPU_TRANSFER_FUNCTION_IDENTITY: + return TRANSFER_FUNCTION_LINEAR; + case AMDGPU_TRANSFER_FUNCTION_SRGB_EOTF: + case AMDGPU_TRANSFER_FUNCTION_SRGB_INV_EOTF: + return TRANSFER_FUNCTION_SRGB; + case AMDGPU_TRANSFER_FUNCTION_BT709_OETF: + case AMDGPU_TRANSFER_FUNCTION_BT709_INV_OETF: + return TRANSFER_FUNCTION_BT709; + case AMDGPU_TRANSFER_FUNCTION_PQ_EOTF: + case AMDGPU_TRANSFER_FUNCTION_PQ_INV_EOTF: + return TRANSFER_FUNCTION_PQ; + case AMDGPU_TRANSFER_FUNCTION_GAMMA22_EOTF: + case AMDGPU_TRANSFER_FUNCTION_GAMMA22_INV_EOTF: + return TRANSFER_FUNCTION_GAMMA22; + case AMDGPU_TRANSFER_FUNCTION_GAMMA24_EOTF: + case AMDGPU_TRANSFER_FUNCTION_GAMMA24_INV_EOTF: + return TRANSFER_FUNCTION_GAMMA24; + case AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF: + case AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF: + return TRANSFER_FUNCTION_GAMMA26; + } +} + +static void __to_dc_lut3d_color(struct dc_rgb *rgb, + const struct drm_color_lut lut, + int bit_precision) +{ + rgb->red = drm_color_lut_extract(lut.red, bit_precision); + rgb->green = drm_color_lut_extract(lut.green, bit_precision); + rgb->blue = drm_color_lut_extract(lut.blue, bit_precision); +} + +static void __drm_3dlut_to_dc_3dlut(const struct drm_color_lut *lut, + uint32_t lut3d_size, + struct tetrahedral_params *params, + bool use_tetrahedral_9, + int bit_depth) +{ + struct dc_rgb *lut0; + struct dc_rgb *lut1; + struct dc_rgb *lut2; + struct dc_rgb *lut3; + int lut_i, i; + + + if (use_tetrahedral_9) { + lut0 = params->tetrahedral_9.lut0; + lut1 = params->tetrahedral_9.lut1; + lut2 = params->tetrahedral_9.lut2; + lut3 = params->tetrahedral_9.lut3; + } else { + lut0 = params->tetrahedral_17.lut0; + lut1 = params->tetrahedral_17.lut1; + lut2 = params->tetrahedral_17.lut2; + lut3 = params->tetrahedral_17.lut3; + } + + for (lut_i = 0, i = 0; i < lut3d_size - 4; lut_i++, i += 4) { + /* + * We should consider the 3D LUT RGB values are distributed + * along four arrays lut0-3 where the first sizes 1229 and the + * other 1228. The bit depth supported for 3dlut channel is + * 12-bit, but DC also supports 10-bit. + * + * TODO: improve color pipeline API to enable the userspace set + * bit depth and 3D LUT size/stride, as specified by VA-API. + */ + __to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth); + __to_dc_lut3d_color(&lut1[lut_i], lut[i + 1], bit_depth); + __to_dc_lut3d_color(&lut2[lut_i], lut[i + 2], bit_depth); + __to_dc_lut3d_color(&lut3[lut_i], lut[i + 3], bit_depth); + } + /* lut0 has 1229 points (lut_size/4 + 1) */ + __to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth); +} + +/* amdgpu_dm_atomic_lut3d - set DRM 3D LUT to DC stream + * @drm_lut3d: user 3D LUT + * @drm_lut3d_size: size of 3D LUT + * @lut3d: DC 3D LUT + * + * Map user 3D LUT data to DC 3D LUT and all necessary bits to program it + * on DCN accordingly. + */ +static void amdgpu_dm_atomic_lut3d(const struct drm_color_lut *drm_lut3d, + uint32_t drm_lut3d_size, + struct dc_3dlut *lut) +{ + if (!drm_lut3d_size) { + lut->state.bits.initialized = 0; + } else { + /* Stride and bit depth are not programmable by API yet. + * Therefore, only supports 17x17x17 3D LUT (12-bit). + */ + lut->lut_3d.use_tetrahedral_9 = false; + lut->lut_3d.use_12bits = true; + lut->state.bits.initialized = 1; + __drm_3dlut_to_dc_3dlut(drm_lut3d, drm_lut3d_size, &lut->lut_3d, + lut->lut_3d.use_tetrahedral_9, + MAX_COLOR_3DLUT_BITDEPTH); + } +} + +static int amdgpu_dm_atomic_shaper_lut(const struct drm_color_lut *shaper_lut, + bool has_rom, + enum dc_transfer_func_predefined tf, + uint32_t shaper_size, + struct dc_transfer_func *func_shaper) +{ + int ret = 0; + + if (shaper_size || tf != TRANSFER_FUNCTION_LINEAR) { + /* + * If user shaper LUT is set, we assume a linear color space + * (linearized by degamma 1D LUT or not). + */ + func_shaper->type = TF_TYPE_DISTRIBUTED_POINTS; + func_shaper->tf = tf; + func_shaper->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + + ret = __set_output_tf(func_shaper, shaper_lut, shaper_size, has_rom); + } else { + func_shaper->type = TF_TYPE_BYPASS; + func_shaper->tf = TRANSFER_FUNCTION_LINEAR; + } + + return ret; +} + +static int amdgpu_dm_atomic_blend_lut(const struct drm_color_lut *blend_lut, + bool has_rom, + enum dc_transfer_func_predefined tf, + uint32_t blend_size, + struct dc_transfer_func *func_blend) +{ + int ret = 0; + + if (blend_size || tf != TRANSFER_FUNCTION_LINEAR) { + /* + * DRM plane gamma LUT or TF means we are linearizing color + * space before blending (similar to degamma programming). As + * we don't have hardcoded curve support, or we use AMD color + * module to fill the parameters that will be translated to HW + * points. + */ + func_blend->type = TF_TYPE_DISTRIBUTED_POINTS; + func_blend->tf = tf; + func_blend->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; + + ret = __set_input_tf(NULL, func_blend, blend_lut, blend_size); + } else { + func_blend->type = TF_TYPE_BYPASS; + func_blend->tf = TRANSFER_FUNCTION_LINEAR; + } + + return ret; +} + +/** + * amdgpu_dm_verify_lut3d_size - verifies if 3D LUT is supported and if user + * shaper and 3D LUTs match the hw supported size + * @adev: amdgpu device + * @plane_state: the DRM plane state + * + * Verifies if pre-blending (DPP) 3D LUT is supported by the HW (DCN 2.0 or + * newer) and if the user shaper and 3D LUTs match the supported size. + * + * Returns: + * 0 on success. -EINVAL if lut size are invalid. + */ +int amdgpu_dm_verify_lut3d_size(struct amdgpu_device *adev, + struct drm_plane_state *plane_state) +{ + struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state); + const struct drm_color_lut *shaper = NULL, *lut3d = NULL; + uint32_t exp_size, size, dim_size = MAX_COLOR_3DLUT_SIZE; + bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut; + + /* shaper LUT is only available if 3D LUT color caps */ + exp_size = has_3dlut ? MAX_COLOR_LUT_ENTRIES : 0; + shaper = __extract_blob_lut(dm_plane_state->shaper_lut, &size); + + if (shaper && size != exp_size) { + drm_dbg(&adev->ddev, + "Invalid Shaper LUT size. Should be %u but got %u.\n", + exp_size, size); + return -EINVAL; + } + + /* The number of 3D LUT entries is the dimension size cubed */ + exp_size = has_3dlut ? dim_size * dim_size * dim_size : 0; + lut3d = __extract_blob_lut(dm_plane_state->lut3d, &size); + + if (lut3d && size != exp_size) { + drm_dbg(&adev->ddev, + "Invalid 3D LUT size. Should be %u but got %u.\n", + exp_size, size); + return -EINVAL; + } + + return 0; +} + /** * amdgpu_dm_verify_lut_sizes - verifies if DRM luts match the hw supported sizes * @crtc_state: the DRM CRTC state @@ -401,9 +915,12 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) const struct drm_color_lut *degamma_lut, *regamma_lut; uint32_t degamma_size, regamma_size; bool has_regamma, has_degamma; + enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_LINEAR; bool is_legacy; int r; + tf = amdgpu_tf_to_dc_tf(crtc->regamma_tf); + r = amdgpu_dm_verify_lut_sizes(&crtc->base); if (r) return r; @@ -439,27 +956,23 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) crtc->cm_is_degamma_srgb = true; stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; - + /* + * Note: although we pass has_rom as parameter here, we never + * actually use ROM because the color module only takes the ROM + * path if transfer_func->type == PREDEFINED. + * + * See more in mod_color_calculate_regamma_params() + */ r = __set_legacy_tf(stream->out_transfer_func, regamma_lut, regamma_size, has_rom); if (r) return r; - } else if (has_regamma) { - /* If atomic regamma, CRTC RGM goes into RGM LUT. */ - stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; - - r = __set_output_tf(stream->out_transfer_func, regamma_lut, - regamma_size, has_rom); + } else { + regamma_size = has_regamma ? regamma_size : 0; + r = amdgpu_dm_set_atomic_regamma(stream, regamma_lut, + regamma_size, has_rom, tf); if (r) return r; - } else { - /* - * No CRTC RGM means we can just put the block into bypass - * since we don't have any plane level adjustments using it. - */ - stream->out_transfer_func->type = TF_TYPE_BYPASS; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; } /* @@ -495,20 +1008,10 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) return 0; } -/** - * amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane. - * @crtc: amdgpu_dm crtc state - * @dc_plane_state: target DC surface - * - * Update the underlying dc_stream_state's input transfer function (ITF) in - * preparation for hardware commit. The transfer function used depends on - * the preparation done on the stream for color management. - * - * Returns: - * 0 on success. -ENOMEM if mem allocation fails. - */ -int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, - struct dc_plane_state *dc_plane_state) +static int +map_crtc_degamma_to_dc_plane(struct dm_crtc_state *crtc, + struct dc_plane_state *dc_plane_state, + struct dc_color_caps *caps) { const struct drm_color_lut *degamma_lut; enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; @@ -531,8 +1034,7 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, °amma_size); ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES); - dc_plane_state->in_transfer_func->type = - TF_TYPE_DISTRIBUTED_POINTS; + dc_plane_state->in_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; /* * This case isn't fully correct, but also fairly @@ -564,11 +1066,11 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; - r = __set_input_tf(dc_plane_state->in_transfer_func, + r = __set_input_tf(caps, dc_plane_state->in_transfer_func, degamma_lut, degamma_size); if (r) return r; - } else if (crtc->cm_is_degamma_srgb) { + } else { /* * For legacy gamma support we need the regamma input * in linear space. Assume that the input is sRGB. @@ -577,14 +1079,209 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, dc_plane_state->in_transfer_func->tf = tf; if (tf != TRANSFER_FUNCTION_SRGB && - !mod_color_calculate_degamma_params(NULL, - dc_plane_state->in_transfer_func, NULL, false)) + !mod_color_calculate_degamma_params(caps, + dc_plane_state->in_transfer_func, + NULL, false)) + return -ENOMEM; + } + + return 0; +} + +static int +__set_dm_plane_degamma(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state, + struct dc_color_caps *color_caps) +{ + struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state); + const struct drm_color_lut *degamma_lut; + enum amdgpu_transfer_function tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; + uint32_t degamma_size; + bool has_degamma_lut; + int ret; + + degamma_lut = __extract_blob_lut(dm_plane_state->degamma_lut, + °amma_size); + + has_degamma_lut = degamma_lut && + !__is_lut_linear(degamma_lut, degamma_size); + + tf = dm_plane_state->degamma_tf; + + /* If we don't have plane degamma LUT nor TF to set on DC, we have + * nothing to do here, return. + */ + if (!has_degamma_lut && tf == AMDGPU_TRANSFER_FUNCTION_DEFAULT) + return -EINVAL; + + dc_plane_state->in_transfer_func->tf = amdgpu_tf_to_dc_tf(tf); + + if (has_degamma_lut) { + ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES); + + dc_plane_state->in_transfer_func->type = + TF_TYPE_DISTRIBUTED_POINTS; + + ret = __set_input_tf(color_caps, dc_plane_state->in_transfer_func, + degamma_lut, degamma_size); + if (ret) + return ret; + } else { + dc_plane_state->in_transfer_func->type = + TF_TYPE_PREDEFINED; + + if (!mod_color_calculate_degamma_params(color_caps, + dc_plane_state->in_transfer_func, NULL, false)) return -ENOMEM; - } else { - /* ...Otherwise we can just bypass the DGM block. */ - dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS; - dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; + } + return 0; +} + +static int +amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state) +{ + struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state); + enum amdgpu_transfer_function shaper_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; + enum amdgpu_transfer_function blend_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; + const struct drm_color_lut *shaper_lut, *lut3d, *blend_lut; + uint32_t shaper_size, lut3d_size, blend_size; + int ret; + + dc_plane_state->hdr_mult = amdgpu_dm_fixpt_from_s3132(dm_plane_state->hdr_mult); + + shaper_lut = __extract_blob_lut(dm_plane_state->shaper_lut, &shaper_size); + shaper_size = shaper_lut != NULL ? shaper_size : 0; + shaper_tf = dm_plane_state->shaper_tf; + lut3d = __extract_blob_lut(dm_plane_state->lut3d, &lut3d_size); + lut3d_size = lut3d != NULL ? lut3d_size : 0; + + amdgpu_dm_atomic_lut3d(lut3d, lut3d_size, dc_plane_state->lut3d_func); + ret = amdgpu_dm_atomic_shaper_lut(shaper_lut, false, + amdgpu_tf_to_dc_tf(shaper_tf), + shaper_size, + dc_plane_state->in_shaper_func); + if (ret) { + drm_dbg_kms(plane_state->plane->dev, + "setting plane %d shaper LUT failed.\n", + plane_state->plane->index); + + return ret; + } + + blend_tf = dm_plane_state->blend_tf; + blend_lut = __extract_blob_lut(dm_plane_state->blend_lut, &blend_size); + blend_size = blend_lut != NULL ? blend_size : 0; + + ret = amdgpu_dm_atomic_blend_lut(blend_lut, false, + amdgpu_tf_to_dc_tf(blend_tf), + blend_size, dc_plane_state->blend_tf); + if (ret) { + drm_dbg_kms(plane_state->plane->dev, + "setting plane %d gamma lut failed.\n", + plane_state->plane->index); + + return ret; } return 0; } + +/** + * amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane. + * @crtc: amdgpu_dm crtc state + * @plane_state: DRM plane state + * @dc_plane_state: target DC surface + * + * Update the underlying dc_stream_state's input transfer function (ITF) in + * preparation for hardware commit. The transfer function used depends on + * the preparation done on the stream for color management. + * + * Returns: + * 0 on success. -ENOMEM if mem allocation fails. + */ +int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, + struct drm_plane_state *plane_state, + struct dc_plane_state *dc_plane_state) +{ + struct amdgpu_device *adev = drm_to_adev(crtc->base.state->dev); + struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state); + struct drm_color_ctm_3x4 *ctm = NULL; + struct dc_color_caps *color_caps = NULL; + bool has_crtc_cm_degamma; + int ret; + + ret = amdgpu_dm_verify_lut3d_size(adev, plane_state); + if (ret) { + drm_dbg_driver(&adev->ddev, "amdgpu_dm_verify_lut3d_size() failed\n"); + return ret; + } + + if (dc_plane_state->ctx && dc_plane_state->ctx->dc) + color_caps = &dc_plane_state->ctx->dc->caps.color; + + /* Initially, we can just bypass the DGM block. */ + dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS; + dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; + + /* After, we start to update values according to color props */ + has_crtc_cm_degamma = (crtc->cm_has_degamma || crtc->cm_is_degamma_srgb); + + ret = __set_dm_plane_degamma(plane_state, dc_plane_state, color_caps); + if (ret == -ENOMEM) + return ret; + + /* We only have one degamma block available (pre-blending) for the + * whole color correction pipeline, so that we can't actually perform + * plane and CRTC degamma at the same time. Explicitly reject atomic + * updates when userspace sets both plane and CRTC degamma properties. + */ + if (has_crtc_cm_degamma && ret != -EINVAL) { + drm_dbg_kms(crtc->base.crtc->dev, + "doesn't support plane and CRTC degamma at the same time\n"); + return -EINVAL; + } + + /* If we are here, it means we don't have plane degamma settings, check + * if we have CRTC degamma waiting for mapping to pre-blending degamma + * block + */ + if (has_crtc_cm_degamma) { + /* + * AMD HW doesn't have post-blending degamma caps. When DRM + * CRTC atomic degamma is set, we maps it to DPP degamma block + * (pre-blending) or, on legacy gamma, we use DPP degamma to + * linearize (implicit degamma) from sRGB/BT709 according to + * the input space. + */ + ret = map_crtc_degamma_to_dc_plane(crtc, dc_plane_state, color_caps); + if (ret) + return ret; + } + + /* Setup CRTC CTM. */ + if (dm_plane_state->ctm) { + ctm = (struct drm_color_ctm_3x4 *)dm_plane_state->ctm->data; + /* + * DCN2 and older don't support both pre-blending and + * post-blending gamut remap. For this HW family, if we have + * the plane and CRTC CTMs simultaneously, CRTC CTM takes + * priority, and we discard plane CTM, as implemented in + * dcn10_program_gamut_remap(). However, DCN3+ has DPP + * (pre-blending) and MPC (post-blending) `gamut remap` blocks; + * therefore, we can program plane and CRTC CTMs together by + * mapping CRTC CTM to MPC and keeping plane CTM setup at DPP, + * as it's done by dcn30_program_gamut_remap(). + */ + __drm_ctm_3x4_to_dc_matrix(ctm, dc_plane_state->gamut_remap_matrix.matrix); + + dc_plane_state->gamut_remap_matrix.enable_remap = true; + dc_plane_state->input_csc_color_matrix.enable_adjustment = false; + } else { + /* Bypass CTM. */ + dc_plane_state->gamut_remap_matrix.enable_remap = false; + dc_plane_state->input_csc_color_matrix.enable_adjustment = false; + } + + return amdgpu_dm_plane_set_color_properties(plane_state, dc_plane_state); +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index 52ecfa746b..f936a35fa9 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -326,6 +326,9 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) if (!connector->state || connector->state->crtc != crtc) continue; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconn = to_amdgpu_dm_connector(connector); break; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index d2834ad85a..6e715ef3a5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -253,6 +253,7 @@ static struct drm_crtc_state *amdgpu_dm_crtc_duplicate_state(struct drm_crtc *cr state->freesync_config = cur->freesync_config; state->cm_has_degamma = cur->cm_has_degamma; state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; + state->regamma_tf = cur->regamma_tf; state->crc_skip_count = cur->crc_skip_count; state->mpo_requested = cur->mpo_requested; /* TODO Duplicate dc_stream after objects are stream object is flattened */ @@ -289,6 +290,70 @@ static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc) } #endif +#ifdef AMD_PRIVATE_COLOR +/** + * dm_crtc_additional_color_mgmt - enable additional color properties + * @crtc: DRM CRTC + * + * This function lets the driver enable post-blending CRTC regamma transfer + * function property in addition to DRM CRTC gamma LUT. Default value means + * linear transfer function, which is the default CRTC gamma LUT behaviour + * without this property. + */ +static void +dm_crtc_additional_color_mgmt(struct drm_crtc *crtc) +{ + struct amdgpu_device *adev = drm_to_adev(crtc->dev); + + if (adev->dm.dc->caps.color.mpc.ogam_ram) + drm_object_attach_property(&crtc->base, + adev->mode_info.regamma_tf_property, + AMDGPU_TRANSFER_FUNCTION_DEFAULT); +} + +static int +amdgpu_dm_atomic_crtc_set_property(struct drm_crtc *crtc, + struct drm_crtc_state *state, + struct drm_property *property, + uint64_t val) +{ + struct amdgpu_device *adev = drm_to_adev(crtc->dev); + struct dm_crtc_state *acrtc_state = to_dm_crtc_state(state); + + if (property == adev->mode_info.regamma_tf_property) { + if (acrtc_state->regamma_tf != val) { + acrtc_state->regamma_tf = val; + acrtc_state->base.color_mgmt_changed |= 1; + } + } else { + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n", + crtc->base.id, crtc->name, + property->base.id, property->name); + return -EINVAL; + } + + return 0; +} + +static int +amdgpu_dm_atomic_crtc_get_property(struct drm_crtc *crtc, + const struct drm_crtc_state *state, + struct drm_property *property, + uint64_t *val) +{ + struct amdgpu_device *adev = drm_to_adev(crtc->dev); + struct dm_crtc_state *acrtc_state = to_dm_crtc_state(state); + + if (property == adev->mode_info.regamma_tf_property) + *val = acrtc_state->regamma_tf; + else + return -EINVAL; + + return 0; +} +#endif + /* Implemented only the options currently available for the driver */ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { .reset = amdgpu_dm_crtc_reset_state, @@ -307,6 +372,10 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { #if defined(CONFIG_DEBUG_FS) .late_register = amdgpu_dm_crtc_late_register, #endif +#ifdef AMD_PRIVATE_COLOR + .atomic_set_property = amdgpu_dm_atomic_crtc_set_property, + .atomic_get_property = amdgpu_dm_atomic_crtc_get_property, +#endif }; static void amdgpu_dm_crtc_helper_disable(struct drm_crtc *crtc) @@ -482,6 +551,9 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); +#ifdef AMD_PRIVATE_COLOR + dm_crtc_additional_color_mgmt(&acrtc->base); +#endif return 0; fail: diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 417c9cb47b..85fc618130 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -2971,6 +2971,85 @@ static int allow_edp_hotplug_detection_set(void *data, u64 val) return 0; } +static int dmub_trace_mask_set(void *data, u64 val) +{ + struct amdgpu_device *adev = data; + struct dmub_srv *srv = adev->dm.dc->ctx->dmub_srv->dmub; + enum dmub_gpint_command cmd; + u64 mask = 0xffff; + u8 shift = 0; + u32 res; + int i; + + if (!srv->fw_version) + return -EINVAL; + + for (i = 0; i < 4; i++) { + res = (val & mask) >> shift; + + switch (i) { + case 0: + cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD0; + break; + case 1: + cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1; + break; + case 2: + cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD2; + break; + case 3: + cmd = DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD3; + break; + } + + if (!dc_wake_and_execute_gpint(adev->dm.dc->ctx, cmd, res, NULL, DM_DMUB_WAIT_TYPE_WAIT)) + return -EIO; + + usleep_range(100, 1000); + + mask <<= 16; + shift += 16; + } + + return 0; +} + +static int dmub_trace_mask_show(void *data, u64 *val) +{ + enum dmub_gpint_command cmd = DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD0; + struct amdgpu_device *adev = data; + struct dmub_srv *srv = adev->dm.dc->ctx->dmub_srv->dmub; + u8 shift = 0; + u64 raw = 0; + u64 res = 0; + int i = 0; + + if (!srv->fw_version) + return -EINVAL; + + while (i < 4) { + uint32_t response; + + if (!dc_wake_and_execute_gpint(adev->dm.dc->ctx, cmd, 0, &response, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + return -EIO; + + raw = response; + usleep_range(100, 1000); + + cmd++; + res |= (raw << shift); + shift += 16; + i++; + } + + *val = res; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(dmub_trace_mask_fops, dmub_trace_mask_show, + dmub_trace_mask_set, "0x%llx\n"); + /* * Set dmcub trace event IRQ enable or disable. * Usage to enable dmcub trace event IRQ: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en @@ -3884,6 +3963,9 @@ void dtn_debugfs_init(struct amdgpu_device *adev) debugfs_create_file_unsafe("amdgpu_dm_force_timing_sync", 0644, root, adev, &force_timing_sync_ops); + debugfs_create_file_unsafe("amdgpu_dm_dmub_trace_mask", 0644, root, + adev, &dmub_trace_mask_fops); + debugfs_create_file_unsafe("amdgpu_dm_dmcub_trace_event_en", 0644, root, adev, &dmcub_trace_event_state_fops); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 7a09a72e18..c27063305a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -31,6 +31,7 @@ #include #include #include +#include #include "dm_services.h" #include "amdgpu.h" @@ -218,7 +219,7 @@ static void dm_helpers_construct_old_payload( struct drm_dp_mst_atomic_payload *old_payload) { struct drm_dp_mst_atomic_payload *pos; - int pbn_per_slot = mst_state->pbn_div; + int pbn_per_slot = dfixed_trunc(mst_state->pbn_div); u8 next_payload_vc_start = mgr->next_start_slot; u8 payload_vc_start = new_payload->vc_start_slot; u8 allocated_time_slots; @@ -341,15 +342,14 @@ enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( return ACT_SUCCESS; } -bool dm_helpers_dp_mst_send_payload_allocation( +void dm_helpers_dp_mst_send_payload_allocation( struct dc_context *ctx, - const struct dc_stream_state *stream, - bool enable) + const struct dc_stream_state *stream) { struct amdgpu_dm_connector *aconnector; struct drm_dp_mst_topology_state *mst_state; struct drm_dp_mst_topology_mgr *mst_mgr; - struct drm_dp_mst_atomic_payload *new_payload, old_payload; + struct drm_dp_mst_atomic_payload *new_payload; enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD; enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD; int ret = 0; @@ -357,25 +357,13 @@ bool dm_helpers_dp_mst_send_payload_allocation( aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; if (!aconnector || !aconnector->mst_root) - return false; + return; mst_mgr = &aconnector->mst_root->mst_mgr; mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); - new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); - if (!enable) { - set_flag = MST_CLEAR_ALLOCATED_PAYLOAD; - clr_flag = MST_ALLOCATE_NEW_PAYLOAD; - } - - if (enable) { - ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload); - } else { - dm_helpers_construct_old_payload(mst_mgr, mst_state, - new_payload, &old_payload); - drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload); - } + ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload); if (ret) { amdgpu_dm_set_mst_status(&aconnector->mst_status, @@ -386,10 +374,36 @@ bool dm_helpers_dp_mst_send_payload_allocation( amdgpu_dm_set_mst_status(&aconnector->mst_status, clr_flag, false); } - - return true; } +void dm_helpers_dp_mst_update_mst_mgr_for_deallocation( + struct dc_context *ctx, + const struct dc_stream_state *stream) +{ + struct amdgpu_dm_connector *aconnector; + struct drm_dp_mst_topology_state *mst_state; + struct drm_dp_mst_topology_mgr *mst_mgr; + struct drm_dp_mst_atomic_payload *new_payload, old_payload; + enum mst_progress_status set_flag = MST_CLEAR_ALLOCATED_PAYLOAD; + enum mst_progress_status clr_flag = MST_ALLOCATE_NEW_PAYLOAD; + + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + + if (!aconnector || !aconnector->mst_root) + return; + + mst_mgr = &aconnector->mst_root->mst_mgr; + mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); + new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); + dm_helpers_construct_old_payload(mst_mgr, mst_state, + new_payload, &old_payload); + + drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload); + + amdgpu_dm_set_mst_status(&aconnector->mst_status, set_flag, true); + amdgpu_dm_set_mst_status(&aconnector->mst_status, clr_flag, false); + } + void dm_dtn_log_begin(struct dc_context *ctx, struct dc_log_buffer_ctx *log_ctx) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index d595030c33..3390f0d842 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -897,10 +897,15 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { - struct amdgpu_dm_connector *amdgpu_dm_connector = - to_amdgpu_dm_connector(connector); + struct amdgpu_dm_connector *amdgpu_dm_connector; + const struct dc_link *dc_link; - const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + + dc_link = amdgpu_dm_connector->dc_link; if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, @@ -933,9 +938,14 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { - struct amdgpu_dm_connector *amdgpu_dm_connector = - to_amdgpu_dm_connector(connector); - const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; + struct amdgpu_dm_connector *amdgpu_dm_connector; + const struct dc_link *dc_link; + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + + amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + dc_link = amdgpu_dm_connector->dc_link; if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { dc_interrupt_set(adev->dm.dc, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 602a2ab98a..941e96f100 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -27,6 +27,8 @@ #include #include #include +#include +#include #include "dm_services.h" #include "amdgpu.h" #include "amdgpu_dm.h" @@ -44,7 +46,7 @@ #include "amdgpu_dm_debugfs.h" #endif -#include "dc/dcn20/dcn20_resource.h" +#include "dc/resource/dcn20/dcn20_resource.h" #define PEAK_FACTOR_X1000 1006 @@ -424,8 +426,7 @@ dm_mst_atomic_best_encoder(struct drm_connector *connector, { struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, connector); - struct drm_device *dev = connector->dev; - struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_device *adev = drm_to_adev(connector->dev); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc); return &adev->dm.mst_encoders[acrtc->crtc_id].base; @@ -941,10 +942,10 @@ static int increase_dsc_bpp(struct drm_atomic_state *state, link_timeslots_used = 0; for (i = 0; i < count; i++) - link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, mst_state->pbn_div); + link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, dfixed_trunc(mst_state->pbn_div)); fair_pbn_alloc = - (63 - link_timeslots_used) / remaining_to_increase * mst_state->pbn_div; + (63 - link_timeslots_used) / remaining_to_increase * dfixed_trunc(mst_state->pbn_div); if (initial_slack[next_index] > fair_pbn_alloc) { vars[next_index].pbn += fair_pbn_alloc; @@ -1604,9 +1605,8 @@ enum dc_status dm_dp_mst_is_port_support_mode( struct dc_link_settings cur_link_settings; unsigned int end_to_end_bw_in_kbps = 0; unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0; - unsigned int max_compressed_bw_in_kbps = 0; struct dc_dsc_bw_range bw_range = {0}; - uint16_t full_pbn = aconnector->mst_output_port->full_pbn; + struct dc_dsc_config_options dsc_options = {0}; /* * Consider the case with the depth of the mst topology tree is equal or less than 2 @@ -1622,30 +1622,39 @@ enum dc_status dm_dp_mst_is_port_support_mode( (aconnector->mst_output_port->passthrough_aux || aconnector->dsc_aux == &aconnector->mst_output_port->aux)) { cur_link_settings = stream->link->verified_link_cap; + upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings); + down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn); - upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, - &cur_link_settings); - down_link_bw_in_kbps = kbps_from_pbn(full_pbn); - - /* pick the bottleneck */ - end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps, - down_link_bw_in_kbps); - - /* - * use the maximum dsc compression bandwidth as the required - * bandwidth for the mode - */ - max_compressed_bw_in_kbps = bw_range.min_kbps; + /* pick the end to end bw bottleneck */ + end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps, down_link_bw_in_kbps); - if (end_to_end_bw_in_kbps < max_compressed_bw_in_kbps) { - DRM_DEBUG_DRIVER("Mode does not fit into DSC pass-through bandwidth validation\n"); + if (end_to_end_bw_in_kbps < bw_range.min_kbps) { + DRM_DEBUG_DRIVER("maximum dsc compression cannot fit into end-to-end bandwidth\n"); return DC_FAIL_BANDWIDTH_VALIDATE; } + + if (end_to_end_bw_in_kbps < bw_range.stream_kbps) { + dc_dsc_get_default_config_option(stream->link->dc, &dsc_options); + dsc_options.max_target_bpp_limit_override_x16 = aconnector->base.display_info.max_dsc_bpp * 16; + if (dc_dsc_compute_config(stream->sink->ctx->dc->res_pool->dscs[0], + &stream->sink->dsc_caps.dsc_dec_caps, + &dsc_options, + end_to_end_bw_in_kbps, + &stream->timing, + dc_link_get_highest_encoding_format(stream->link), + &stream->timing.dsc_cfg)) { + stream->timing.flags.DSC = 1; + DRM_DEBUG_DRIVER("end-to-end bandwidth require dsc and dsc config found\n"); + } else { + DRM_DEBUG_DRIVER("end-to-end bandwidth require dsc but dsc config not found\n"); + return DC_FAIL_BANDWIDTH_VALIDATE; + } + } } else { /* check if mode could be supported within full_pbn */ bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3; pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp << 4); - if (pbn > full_pbn) + if (pbn > aconnector->mst_output_port->full_pbn) return DC_FAIL_BANDWIDTH_VALIDATE; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index 116121e647..8a4c40b4c2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -1337,8 +1337,14 @@ static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane) amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL); WARN_ON(amdgpu_state == NULL); - if (amdgpu_state) - __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); + if (!amdgpu_state) + return; + + __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); + amdgpu_state->degamma_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; + amdgpu_state->hdr_mult = AMDGPU_HDR_MULT_DEFAULT; + amdgpu_state->shaper_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; + amdgpu_state->blend_tf = AMDGPU_TRANSFER_FUNCTION_DEFAULT; } static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct drm_plane *plane) @@ -1357,6 +1363,27 @@ static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct dc_plane_state_retain(dm_plane_state->dc_state); } + if (old_dm_plane_state->degamma_lut) + dm_plane_state->degamma_lut = + drm_property_blob_get(old_dm_plane_state->degamma_lut); + if (old_dm_plane_state->ctm) + dm_plane_state->ctm = + drm_property_blob_get(old_dm_plane_state->ctm); + if (old_dm_plane_state->shaper_lut) + dm_plane_state->shaper_lut = + drm_property_blob_get(old_dm_plane_state->shaper_lut); + if (old_dm_plane_state->lut3d) + dm_plane_state->lut3d = + drm_property_blob_get(old_dm_plane_state->lut3d); + if (old_dm_plane_state->blend_lut) + dm_plane_state->blend_lut = + drm_property_blob_get(old_dm_plane_state->blend_lut); + + dm_plane_state->degamma_tf = old_dm_plane_state->degamma_tf; + dm_plane_state->hdr_mult = old_dm_plane_state->hdr_mult; + dm_plane_state->shaper_tf = old_dm_plane_state->shaper_tf; + dm_plane_state->blend_tf = old_dm_plane_state->blend_tf; + return &dm_plane_state->base; } @@ -1424,12 +1451,206 @@ static void amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane *plane, { struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); + if (dm_plane_state->degamma_lut) + drm_property_blob_put(dm_plane_state->degamma_lut); + if (dm_plane_state->ctm) + drm_property_blob_put(dm_plane_state->ctm); + if (dm_plane_state->lut3d) + drm_property_blob_put(dm_plane_state->lut3d); + if (dm_plane_state->shaper_lut) + drm_property_blob_put(dm_plane_state->shaper_lut); + if (dm_plane_state->blend_lut) + drm_property_blob_put(dm_plane_state->blend_lut); + if (dm_plane_state->dc_state) dc_plane_state_release(dm_plane_state->dc_state); drm_atomic_helper_plane_destroy_state(plane, state); } +#ifdef AMD_PRIVATE_COLOR +static void +dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm, + struct drm_plane *plane) +{ + struct amdgpu_mode_info mode_info = dm->adev->mode_info; + struct dpp_color_caps dpp_color_caps = dm->dc->caps.color.dpp; + + /* Check HW color pipeline capabilities on DPP block (pre-blending) + * before exposing related properties. + */ + if (dpp_color_caps.dgam_ram || dpp_color_caps.gamma_corr) { + drm_object_attach_property(&plane->base, + mode_info.plane_degamma_lut_property, + 0); + drm_object_attach_property(&plane->base, + mode_info.plane_degamma_lut_size_property, + MAX_COLOR_LUT_ENTRIES); + drm_object_attach_property(&plane->base, + dm->adev->mode_info.plane_degamma_tf_property, + AMDGPU_TRANSFER_FUNCTION_DEFAULT); + } + /* HDR MULT is always available */ + drm_object_attach_property(&plane->base, + dm->adev->mode_info.plane_hdr_mult_property, + AMDGPU_HDR_MULT_DEFAULT); + + /* Only enable plane CTM if both DPP and MPC gamut remap is available. */ + if (dm->dc->caps.color.mpc.gamut_remap) + drm_object_attach_property(&plane->base, + dm->adev->mode_info.plane_ctm_property, 0); + + if (dpp_color_caps.hw_3d_lut) { + drm_object_attach_property(&plane->base, + mode_info.plane_shaper_lut_property, 0); + drm_object_attach_property(&plane->base, + mode_info.plane_shaper_lut_size_property, + MAX_COLOR_LUT_ENTRIES); + drm_object_attach_property(&plane->base, + mode_info.plane_shaper_tf_property, + AMDGPU_TRANSFER_FUNCTION_DEFAULT); + drm_object_attach_property(&plane->base, + mode_info.plane_lut3d_property, 0); + drm_object_attach_property(&plane->base, + mode_info.plane_lut3d_size_property, + MAX_COLOR_3DLUT_SIZE); + } + + if (dpp_color_caps.ogam_ram) { + drm_object_attach_property(&plane->base, + mode_info.plane_blend_lut_property, 0); + drm_object_attach_property(&plane->base, + mode_info.plane_blend_lut_size_property, + MAX_COLOR_LUT_ENTRIES); + drm_object_attach_property(&plane->base, + mode_info.plane_blend_tf_property, + AMDGPU_TRANSFER_FUNCTION_DEFAULT); + } +} + +static int +dm_atomic_plane_set_property(struct drm_plane *plane, + struct drm_plane_state *state, + struct drm_property *property, + uint64_t val) +{ + struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); + struct amdgpu_device *adev = drm_to_adev(plane->dev); + bool replaced = false; + int ret; + + if (property == adev->mode_info.plane_degamma_lut_property) { + ret = drm_property_replace_blob_from_id(plane->dev, + &dm_plane_state->degamma_lut, + val, -1, + sizeof(struct drm_color_lut), + &replaced); + dm_plane_state->base.color_mgmt_changed |= replaced; + return ret; + } else if (property == adev->mode_info.plane_degamma_tf_property) { + if (dm_plane_state->degamma_tf != val) { + dm_plane_state->degamma_tf = val; + dm_plane_state->base.color_mgmt_changed = 1; + } + } else if (property == adev->mode_info.plane_hdr_mult_property) { + if (dm_plane_state->hdr_mult != val) { + dm_plane_state->hdr_mult = val; + dm_plane_state->base.color_mgmt_changed = 1; + } + } else if (property == adev->mode_info.plane_ctm_property) { + ret = drm_property_replace_blob_from_id(plane->dev, + &dm_plane_state->ctm, + val, + sizeof(struct drm_color_ctm_3x4), -1, + &replaced); + dm_plane_state->base.color_mgmt_changed |= replaced; + return ret; + } else if (property == adev->mode_info.plane_shaper_lut_property) { + ret = drm_property_replace_blob_from_id(plane->dev, + &dm_plane_state->shaper_lut, + val, -1, + sizeof(struct drm_color_lut), + &replaced); + dm_plane_state->base.color_mgmt_changed |= replaced; + return ret; + } else if (property == adev->mode_info.plane_shaper_tf_property) { + if (dm_plane_state->shaper_tf != val) { + dm_plane_state->shaper_tf = val; + dm_plane_state->base.color_mgmt_changed = 1; + } + } else if (property == adev->mode_info.plane_lut3d_property) { + ret = drm_property_replace_blob_from_id(plane->dev, + &dm_plane_state->lut3d, + val, -1, + sizeof(struct drm_color_lut), + &replaced); + dm_plane_state->base.color_mgmt_changed |= replaced; + return ret; + } else if (property == adev->mode_info.plane_blend_lut_property) { + ret = drm_property_replace_blob_from_id(plane->dev, + &dm_plane_state->blend_lut, + val, -1, + sizeof(struct drm_color_lut), + &replaced); + dm_plane_state->base.color_mgmt_changed |= replaced; + return ret; + } else if (property == adev->mode_info.plane_blend_tf_property) { + if (dm_plane_state->blend_tf != val) { + dm_plane_state->blend_tf = val; + dm_plane_state->base.color_mgmt_changed = 1; + } + } else { + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n", + plane->base.id, plane->name, + property->base.id, property->name); + return -EINVAL; + } + + return 0; +} + +static int +dm_atomic_plane_get_property(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, + uint64_t *val) +{ + struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); + struct amdgpu_device *adev = drm_to_adev(plane->dev); + + if (property == adev->mode_info.plane_degamma_lut_property) { + *val = (dm_plane_state->degamma_lut) ? + dm_plane_state->degamma_lut->base.id : 0; + } else if (property == adev->mode_info.plane_degamma_tf_property) { + *val = dm_plane_state->degamma_tf; + } else if (property == adev->mode_info.plane_hdr_mult_property) { + *val = dm_plane_state->hdr_mult; + } else if (property == adev->mode_info.plane_ctm_property) { + *val = (dm_plane_state->ctm) ? + dm_plane_state->ctm->base.id : 0; + } else if (property == adev->mode_info.plane_shaper_lut_property) { + *val = (dm_plane_state->shaper_lut) ? + dm_plane_state->shaper_lut->base.id : 0; + } else if (property == adev->mode_info.plane_shaper_tf_property) { + *val = dm_plane_state->shaper_tf; + } else if (property == adev->mode_info.plane_lut3d_property) { + *val = (dm_plane_state->lut3d) ? + dm_plane_state->lut3d->base.id : 0; + } else if (property == adev->mode_info.plane_blend_lut_property) { + *val = (dm_plane_state->blend_lut) ? + dm_plane_state->blend_lut->base.id : 0; + } else if (property == adev->mode_info.plane_blend_tf_property) { + *val = dm_plane_state->blend_tf; + + } else { + return -EINVAL; + } + + return 0; +} +#endif + static const struct drm_plane_funcs dm_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, @@ -1438,6 +1659,10 @@ static const struct drm_plane_funcs dm_plane_funcs = { .atomic_duplicate_state = amdgpu_dm_plane_drm_plane_duplicate_state, .atomic_destroy_state = amdgpu_dm_plane_drm_plane_destroy_state, .format_mod_supported = amdgpu_dm_plane_format_mod_supported, +#ifdef AMD_PRIVATE_COLOR + .atomic_set_property = dm_atomic_plane_set_property, + .atomic_get_property = dm_atomic_plane_get_property, +#endif }; int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, @@ -1517,6 +1742,9 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, drm_plane_helper_add(plane, &dm_plane_helper_funcs); +#ifdef AMD_PRIVATE_COLOR + dm_atomic_plane_attach_color_mgmt_properties(dm, plane); +#endif /* Create (reset) the plane state */ if (plane->funcs->reset) plane->funcs->reset(plane); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index 08ce3bb8f6..286ecd28cc 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -51,6 +51,9 @@ static bool link_supports_psrsu(struct dc_link *link) !link->dpcd_caps.psr_info.psr2_su_y_granularity_cap) return false; + if (amdgpu_dc_debug_mask & DC_DISABLE_PSR_SU) + return false; + return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub); } @@ -138,9 +141,8 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) * amdgpu_dm_psr_enable() - enable psr f/w * @stream: stream state * - * Return: true if success */ -bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) +void amdgpu_dm_psr_enable(struct dc_stream_state *stream) { struct dc_link *link = stream->link; unsigned int vsync_rate_hz = 0; @@ -187,7 +189,10 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1) power_opt |= psr_power_opt_z10_static_screen; - return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt); + dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt); + + if (link->ctx->dc->caps.ips_support) + dc_allow_idle_optimizations(link->ctx->dc, true); } /* diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h index 6806b3c9c8..1fdfd183c0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h @@ -32,7 +32,7 @@ #define AMDGPU_DM_PSR_ENTRY_DELAY 5 void amdgpu_dm_set_psr_caps(struct dc_link *link); -bool amdgpu_dm_psr_enable(struct dc_stream_state *stream); +void amdgpu_dm_psr_enable(struct dc_stream_state *stream); bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream); bool amdgpu_dm_psr_disable(struct dc_stream_state *stream); bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c new file mode 100644 index 0000000000..08c494a7a2 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services_types.h" + +#include "amdgpu.h" +#include "amdgpu_dm.h" +#include "amdgpu_dm_wb.h" +#include "amdgpu_display.h" +#include "dc.h" + +#include +#include +#include + +static const u32 amdgpu_dm_wb_formats[] = { + DRM_FORMAT_XRGB2101010, +}; + +static int amdgpu_dm_wb_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct drm_framebuffer *fb; + const struct drm_display_mode *mode = &crtc_state->mode; + bool found = false; + uint8_t i; + + if (!conn_state->writeback_job || !conn_state->writeback_job->fb) + return 0; + + fb = conn_state->writeback_job->fb; + if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) { + DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n", + fb->width, fb->height); + return -EINVAL; + } + + for (i = 0; i < sizeof(amdgpu_dm_wb_formats) / sizeof(u32); i++) { + if (fb->format->format == amdgpu_dm_wb_formats[i]) + found = true; + } + + if (!found) { + DRM_DEBUG_KMS("Invalid pixel format %p4cc\n", + &fb->format->format); + return -EINVAL; + } + + return 0; +} + + +static int amdgpu_dm_wb_connector_get_modes(struct drm_connector *connector) +{ + /* Maximum resolution supported by DWB */ + return drm_add_modes_noedid(connector, 3840, 2160); +} + +static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector, + struct drm_writeback_job *job) +{ + struct amdgpu_framebuffer *afb; + struct drm_gem_object *obj; + struct amdgpu_device *adev; + struct amdgpu_bo *rbo; + uint32_t domain; + int r; + + if (!job->fb) { + DRM_DEBUG_KMS("No FB bound\n"); + return 0; + } + + afb = to_amdgpu_framebuffer(job->fb); + obj = job->fb->obj[0]; + rbo = gem_to_amdgpu_bo(obj); + adev = amdgpu_ttm_adev(rbo->tbo.bdev); + + r = amdgpu_bo_reserve(rbo, true); + if (r) { + dev_err(adev->dev, "fail to reserve bo (%d)\n", r); + return r; + } + + r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1); + if (r) { + dev_err(adev->dev, "reserving fence slot failed (%d)\n", r); + goto error_unlock; + } + + domain = amdgpu_display_supported_domains(adev, rbo->flags); + + r = amdgpu_bo_pin(rbo, domain); + if (unlikely(r != 0)) { + if (r != -ERESTARTSYS) + DRM_ERROR("Failed to pin framebuffer with error %d\n", r); + goto error_unlock; + } + + r = amdgpu_ttm_alloc_gart(&rbo->tbo); + if (unlikely(r != 0)) { + DRM_ERROR("%p bind failed\n", rbo); + goto error_unpin; + } + + amdgpu_bo_unreserve(rbo); + + afb->address = amdgpu_bo_gpu_offset(rbo); + + amdgpu_bo_ref(rbo); + + return 0; + +error_unpin: + amdgpu_bo_unpin(rbo); + +error_unlock: + amdgpu_bo_unreserve(rbo); + return r; +} + +static void amdgpu_dm_wb_cleanup_job(struct drm_writeback_connector *connector, + struct drm_writeback_job *job) +{ + struct amdgpu_bo *rbo; + int r; + + if (!job->fb) + return; + + rbo = gem_to_amdgpu_bo(job->fb->obj[0]); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r)) { + DRM_ERROR("failed to reserve rbo before unpin\n"); + return; + } + + amdgpu_bo_unpin(rbo); + amdgpu_bo_unreserve(rbo); + amdgpu_bo_unref(&rbo); +} + +static const struct drm_encoder_helper_funcs amdgpu_dm_wb_encoder_helper_funcs = { + .atomic_check = amdgpu_dm_wb_encoder_atomic_check, +}; + +static const struct drm_connector_funcs amdgpu_dm_wb_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .reset = amdgpu_dm_connector_funcs_reset, + .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static const struct drm_connector_helper_funcs amdgpu_dm_wb_conn_helper_funcs = { + .get_modes = amdgpu_dm_wb_connector_get_modes, + .prepare_writeback_job = amdgpu_dm_wb_prepare_job, + .cleanup_writeback_job = amdgpu_dm_wb_cleanup_job, +}; + +int amdgpu_dm_wb_connector_init(struct amdgpu_display_manager *dm, + struct amdgpu_dm_wb_connector *wbcon, + uint32_t link_index) +{ + struct dc *dc = dm->dc; + struct dc_link *link = dc_get_link_at_index(dc, link_index); + int res = 0; + + wbcon->link = link; + + drm_connector_helper_add(&wbcon->base.base, &amdgpu_dm_wb_conn_helper_funcs); + + res = drm_writeback_connector_init(&dm->adev->ddev, &wbcon->base, + &amdgpu_dm_wb_connector_funcs, + &amdgpu_dm_wb_encoder_helper_funcs, + amdgpu_dm_wb_formats, + ARRAY_SIZE(amdgpu_dm_wb_formats), + amdgpu_dm_get_encoder_crtc_mask(dm->adev)); + + if (res) + return res; + /* + * Some of the properties below require access to state, like bpc. + * Allocate some default initial connector state with our reset helper. + */ + if (wbcon->base.base.funcs->reset) + wbcon->base.base.funcs->reset(&wbcon->base.base); + + return 0; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h new file mode 100644 index 0000000000..13d31c857d --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __AMDGPU_DM_WB_H__ +#define __AMDGPU_DM_WB_H__ + +#include + +int amdgpu_dm_wb_connector_init(struct amdgpu_display_manager *dm, + struct amdgpu_dm_wb_connector *dm_wbcon, + uint32_t link_index); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 3a169b78e7..7991ae468f 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -22,7 +22,7 @@ # # Makefile for Display Core (dc) component. -DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc +DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc ifdef CONFIG_DRM_AMD_DC_FP @@ -34,12 +34,8 @@ DC_LIBS += dcn21 DC_LIBS += dcn201 DC_LIBS += dcn30 DC_LIBS += dcn301 -DC_LIBS += dcn302 -DC_LIBS += dcn303 DC_LIBS += dcn31 DC_LIBS += dcn314 -DC_LIBS += dcn315 -DC_LIBS += dcn316 DC_LIBS += dcn32 DC_LIBS += dcn321 DC_LIBS += dcn35 @@ -51,7 +47,6 @@ DC_LIBS += dce120 DC_LIBS += dce112 DC_LIBS += dce110 -DC_LIBS += dce100 DC_LIBS += dce80 ifdef CONFIG_DRM_AMD_DC_SI @@ -65,7 +60,7 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI include $(AMD_DC) DISPLAY_CORE = dc.o dc_stat.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ -dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o +dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o dc_state.o DISPLAY_CORE += dc_vm_helper.o diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c index e295a839ab..1090d23508 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c +++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c @@ -103,7 +103,8 @@ void convert_float_matrix( static uint32_t find_gcd(uint32_t a, uint32_t b) { - uint32_t remainder = 0; + uint32_t remainder; + while (b != 0) { remainder = a % b; a = b; diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c index f2dfa96f9e..39530b2ea4 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c @@ -94,7 +94,7 @@ static void calculate_bandwidth( const uint32_t s_high = 7; const uint32_t dmif_chunk_buff_margin = 1; - uint32_t max_chunks_fbc_mode; + uint32_t max_chunks_fbc_mode = 0; int32_t num_cursor_lines; int32_t i, j, k; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index bc7a375f43..05f392501c 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -2223,22 +2223,22 @@ static enum bp_result bios_parser_get_disp_connector_caps_info( switch (bp->object_info_tbl.revision.minor) { case 4: - default: - object = get_bios_object(bp, object_id); - - if (!object) - return BP_RESULT_BADINPUT; - - record = get_disp_connector_caps_record(bp, object); - if (!record) - return BP_RESULT_NORECORD; - - info->INTERNAL_DISPLAY = - (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY) ? 1 : 0; - info->INTERNAL_DISPLAY_BL = - (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY_BL) ? 1 : 0; - break; - case 5: + default: + object = get_bios_object(bp, object_id); + + if (!object) + return BP_RESULT_BADINPUT; + + record = get_disp_connector_caps_record(bp, object); + if (!record) + return BP_RESULT_NORECORD; + + info->INTERNAL_DISPLAY = + (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY) ? 1 : 0; + info->INTERNAL_DISPLAY_BL = + (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY_BL) ? 1 : 0; + break; + case 5: object_path_v3 = get_bios_object_from_path_v3(bp, object_id); if (!object_path_v3) @@ -2400,7 +2400,6 @@ static enum bp_result get_vram_info_v30( return result; } - /* * get_integrated_info_v11 * @@ -3336,27 +3335,28 @@ static enum bp_result get_bracket_layout_record( DC_LOG_DETECTION_EDID_PARSER("Invalid slot_layout_info\n"); return BP_RESULT_BADINPUT; } + tbl = &bp->object_info_tbl; v1_4 = tbl->v1_4; v1_5 = tbl->v1_5; result = BP_RESULT_NORECORD; switch (bp->object_info_tbl.revision.minor) { - case 4: - default: - for (i = 0; i < v1_4->number_of_path; ++i) { - if (bracket_layout_id == - v1_4->display_path[i].display_objid) { - result = update_slot_layout_info(dcb, i, slot_layout_info); - break; - } + case 4: + default: + for (i = 0; i < v1_4->number_of_path; ++i) { + if (bracket_layout_id == v1_4->display_path[i].display_objid) { + result = update_slot_layout_info(dcb, i, slot_layout_info); + break; } - break; - case 5: - for (i = 0; i < v1_5->number_of_path; ++i) - result = update_slot_layout_info_v2(dcb, i, slot_layout_info); - break; + } + break; + case 5: + for (i = 0; i < v1_5->number_of_path; ++i) + result = update_slot_layout_info_v2(dcb, i, slot_layout_info); + break; } + return result; } @@ -3365,9 +3365,7 @@ static enum bp_result bios_get_board_layout_info( struct board_layout_info *board_layout_info) { unsigned int i; - struct bios_parser *bp; - static enum bp_result record_result; unsigned int max_slots; @@ -3377,7 +3375,6 @@ static enum bp_result bios_get_board_layout_info( 0, 0 }; - bp = BP_FROM_DCB(dcb); if (board_layout_info == NULL) { @@ -3558,7 +3555,6 @@ static const struct dc_vbios_funcs vbios_funcs = { .bios_parser_destroy = firmware_parser_destroy, .get_board_layout_info = bios_get_board_layout_info, - /* TODO: use this fn in hw init?*/ .pack_data_tables = bios_parser_pack_data_tables, .get_atom_dc_golden_table = bios_get_atom_dc_golden_table, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 3e73c4e59d..28a2a837d2 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -29,6 +29,7 @@ #include "dc_types.h" #include "dccg.h" #include "clk_mgr_internal.h" +#include "dc_state_priv.h" #include "link.h" #include "dce100/dce_clk_mgr.h" @@ -63,7 +64,7 @@ int clk_mgr_helper_get_active_display_cnt( /* Don't count SubVP phantom pipes as part of active * display count */ - if (stream->mall_stream_config.type == SUBVP_PHANTOM) + if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM) continue; /* @@ -368,7 +369,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p } break; -#endif /* CONFIG_DRM_AMD_DC_FP - Family RV */ +#endif /* CONFIG_DRM_AMD_DC_FP */ default: ASSERT(0); /* Unknown Asic */ break; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index a5489fe687..aa9fd1dc55 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -546,6 +546,8 @@ static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_ta int i; for (i = 0; i < VG_NUM_SOC_VOLTAGE_LEVELS; i++) { + if (i >= VG_NUM_DCFCLK_DPM_LEVELS) + break; if (clock_table->SocVoltage[i] == voltage) return clock_table->DcfClocks[i]; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index 12f3e8aa46..6ad4f4efec 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -99,20 +99,25 @@ static int dcn316_get_active_display_cnt_wa( return display_count; } -static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable) +static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, + bool safe_to_lower, bool disable) { struct dc *dc = clk_mgr_base->ctx->dc; int i; for (i = 0; i < dc->res_pool->pipe_count; ++i) { - struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe = safe_to_lower + ? &context->res_ctx.pipe_ctx[i] + : &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->top_pipe || pipe->prev_odm_pipe) continue; - if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL || - dc_is_virtual_signal(pipe->stream->signal))) { + if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) || + !pipe->stream->link_enc)) { if (disable) { - pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); + if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc) + pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); + reset_sync_context_for_pipe(dc, context, i); } else pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); @@ -207,11 +212,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, } if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { - dcn316_disable_otg_wa(clk_mgr_base, context, true); + dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); - dcn316_disable_otg_wa(clk_mgr_base, context, false); + dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false); update_dispclk = true; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 289918ea72..bbdbc78161 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -25,7 +25,6 @@ #include "dccg.h" #include "clk_mgr_internal.h" - #include "dcn32/dcn32_clk_mgr_smu_msg.h" #include "dcn20/dcn20_clk_mgr.h" #include "dce100/dce_clk_mgr.h" @@ -34,7 +33,7 @@ #include "core_types.h" #include "dm_helpers.h" #include "link.h" - +#include "dc_state_priv.h" #include "atomfirmware.h" #include "smu13_driver_if.h" @@ -472,20 +471,56 @@ static int dcn32_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base) return 0; } -static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr_internal *clk_mgr) +static bool dcn32_check_native_scaling(struct pipe_ctx *pipe) { - unsigned int dispclk_khz_reg = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK - unsigned int dppclk_khz_reg = REG_READ(CLK1_CLK1_CURRENT_CNT); // DPPCLK - unsigned int dprefclk_khz_reg = REG_READ(CLK1_CLK2_CURRENT_CNT); // DPREFCLK - unsigned int dcfclk_khz_reg = REG_READ(CLK1_CLK3_CURRENT_CNT); // DCFCLK - unsigned int dtbclk_khz_reg = REG_READ(CLK1_CLK4_CURRENT_CNT); // DTBCLK - unsigned int fclk_khz_reg = REG_READ(CLK4_CLK0_CURRENT_CNT); // FCLK + bool is_native_scaling = false; + int width = pipe->plane_state->src_rect.width; + int height = pipe->plane_state->src_rect.height; + + if (pipe->stream->timing.h_addressable == width && + pipe->stream->timing.v_addressable == height && + pipe->plane_state->dst_rect.width == width && + pipe->plane_state->dst_rect.height == height) + is_native_scaling = true; + + return is_native_scaling; +} + +static void dcn32_auto_dpm_test_log( + struct dc_clocks *new_clocks, + struct clk_mgr_internal *clk_mgr, + struct dc_state *context) +{ + unsigned int dispclk_khz_reg, dppclk_khz_reg, dprefclk_khz_reg, dcfclk_khz_reg, dtbclk_khz_reg, + fclk_khz_reg, mall_ss_size_bytes; + int dramclk_khz_override, fclk_khz_override, num_fclk_levels; + + struct pipe_ctx *pipe_ctx_list[MAX_PIPES]; + int active_pipe_count = 0; + + for (int i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) { + pipe_ctx_list[active_pipe_count] = pipe_ctx; + active_pipe_count++; + } + } + + mall_ss_size_bytes = context->bw_ctx.bw.dcn.mall_ss_size_bytes; + + dispclk_khz_reg = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK + dppclk_khz_reg = REG_READ(CLK1_CLK1_CURRENT_CNT); // DPPCLK + dprefclk_khz_reg = REG_READ(CLK1_CLK2_CURRENT_CNT); // DPREFCLK + dcfclk_khz_reg = REG_READ(CLK1_CLK3_CURRENT_CNT); // DCFCLK + dtbclk_khz_reg = REG_READ(CLK1_CLK4_CURRENT_CNT); // DTBCLK + fclk_khz_reg = REG_READ(CLK4_CLK0_CURRENT_CNT); // FCLK // Overrides for these clocks in case there is no p_state change support - int dramclk_khz_override = new_clocks->dramclk_khz; - int fclk_khz_override = new_clocks->fclk_khz; + dramclk_khz_override = new_clocks->dramclk_khz; + fclk_khz_override = new_clocks->fclk_khz; - int num_fclk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1; + num_fclk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1; if (!new_clocks->p_state_change_support) { dramclk_khz_override = clk_mgr->base.bw_params->max_memclk_mhz * 1000; @@ -502,16 +537,49 @@ static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr // // AutoDPMTest: clk1:%d - clk2:%d - clk3:%d - clk4:%d\n" //////////////////////////////////////////////////////////////////////////// - if (new_clocks && + if (new_clocks && active_pipe_count > 0 && new_clocks->dramclk_khz > 0 && new_clocks->fclk_khz > 0 && new_clocks->dcfclk_khz > 0 && new_clocks->dppclk_khz > 0) { + uint32_t pix_clk_list[MAX_PIPES] = {0}; + int p_state_list[MAX_PIPES] = {0}; + int disp_src_width_list[MAX_PIPES] = {0}; + int disp_src_height_list[MAX_PIPES] = {0}; + uint64_t disp_src_refresh_list[MAX_PIPES] = {0}; + bool is_scaled_list[MAX_PIPES] = {0}; + + for (int i = 0; i < active_pipe_count; i++) { + struct pipe_ctx *curr_pipe_ctx = pipe_ctx_list[i]; + uint64_t refresh_rate; + + pix_clk_list[i] = curr_pipe_ctx->stream->timing.pix_clk_100hz; + p_state_list[i] = curr_pipe_ctx->p_state_type; + + refresh_rate = (curr_pipe_ctx->stream->timing.pix_clk_100hz * (uint64_t)100 + + curr_pipe_ctx->stream->timing.v_total * curr_pipe_ctx->stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.h_total); + disp_src_refresh_list[i] = refresh_rate; + + if (curr_pipe_ctx->plane_state) { + is_scaled_list[i] = !(dcn32_check_native_scaling(curr_pipe_ctx)); + disp_src_width_list[i] = curr_pipe_ctx->plane_state->src_rect.width; + disp_src_height_list[i] = curr_pipe_ctx->plane_state->src_rect.height; + } + } + DC_LOG_AUTO_DPM_TEST("AutoDPMTest: dramclk:%d - fclk:%d - " "dcfclk:%d - dppclk:%d - dispclk_hw:%d - " "dppclk_hw:%d - dprefclk_hw:%d - dcfclk_hw:%d - " - "dtbclk_hw:%d - fclk_hw:%d\n", + "dtbclk_hw:%d - fclk_hw:%d - pix_clk_0:%d - pix_clk_1:%d - " + "pix_clk_2:%d - pix_clk_3:%d - mall_ss_size:%d - p_state_type_0:%d - " + "p_state_type_1:%d - p_state_type_2:%d - p_state_type_3:%d - " + "pix_width_0:%d - pix_height_0:%d - refresh_rate_0:%lld - is_scaled_0:%d - " + "pix_width_1:%d - pix_height_1:%d - refresh_rate_1:%lld - is_scaled_1:%d - " + "pix_width_2:%d - pix_height_2:%d - refresh_rate_2:%lld - is_scaled_2:%d - " + "pix_width_3:%d - pix_height_3:%d - refresh_rate_3:%lld - is_scaled_3:%d - LOG_END\n", dramclk_khz_override, fclk_khz_override, new_clocks->dcfclk_khz, @@ -521,7 +589,14 @@ static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr dprefclk_khz_reg, dcfclk_khz_reg, dtbclk_khz_reg, - fclk_khz_reg); + fclk_khz_reg, + pix_clk_list[0], pix_clk_list[1], pix_clk_list[3], pix_clk_list[2], + mall_ss_size_bytes, + p_state_list[0], p_state_list[1], p_state_list[2], p_state_list[3], + disp_src_width_list[0], disp_src_height_list[0], disp_src_refresh_list[0], is_scaled_list[0], + disp_src_width_list[1], disp_src_height_list[1], disp_src_refresh_list[1], is_scaled_list[1], + disp_src_width_list[2], disp_src_height_list[2], disp_src_refresh_list[2], is_scaled_list[2], + disp_src_width_list[3], disp_src_height_list[3], disp_src_refresh_list[3], is_scaled_list[3]); } } @@ -694,6 +769,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, /* DCCG requires KHz precision for DTBCLK */ clk_mgr_base->clks.ref_dtbclk_khz = dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DTBCLK, khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz)); + dcn32_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz); } @@ -722,7 +798,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, clk_mgr_base->clks.dispclk_khz / 1000 / 7); if (dc->config.enable_auto_dpm_test_logs) { - dcn32_auto_dpm_test_log(new_clocks, clk_mgr); + dcn32_auto_dpm_test_log(new_clocks, clk_mgr, context); } } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index 353d5fb9e3..9cbab880c6 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -50,6 +50,7 @@ #include "dc_dmub_srv.h" #include "link.h" #include "logger_types.h" + #undef DC_LOGGER #define DC_LOGGER \ clk_mgr->base.base.ctx->logger @@ -80,12 +81,12 @@ static int dcn35_get_active_display_cnt_wa( struct dc *dc, - struct dc_state *context) + struct dc_state *context, + int *all_active_disps) { - int i, display_count; + int i, display_count = 0; bool tmds_present = false; - display_count = 0; for (i = 0; i < context->stream_count; i++) { const struct dc_stream_state *stream = context->streams[i]; @@ -103,7 +104,8 @@ static int dcn35_get_active_display_cnt_wa( link->link_enc->funcs->is_dig_enabled(link->link_enc)) display_count++; } - + if (all_active_disps != NULL) + *all_active_disps = display_count; /* WA for hang on HDMI after display off back on*/ if (display_count == 0 && tmds_present) display_count = 1; @@ -216,15 +218,16 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dc *dc = clk_mgr_base->ctx->dc; - int display_count; + int display_count = 0; bool update_dppclk = false; bool update_dispclk = false; bool dpp_clock_lowered = false; + int all_active_disps = 0; if (dc->work_arounds.skip_clock_update) return; - /* DTBCLK is fixed, so set a default if unspecified. */ + display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps); if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz) new_clocks->ref_dtbclk_khz = 600000; @@ -246,7 +249,6 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, } /* check that we're not already in lower */ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { - display_count = dcn35_get_active_display_cnt_wa(dc, context); /* if we can go lower, go lower */ if (display_count == 0) clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; @@ -382,19 +384,6 @@ static void dcn35_enable_pme_wa(struct clk_mgr *clk_mgr_base) dcn35_smu_enable_pme_wa(clk_mgr); } -void dcn35_init_clocks(struct clk_mgr *clk_mgr) -{ - uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; - - memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); - - // Assumption is that boot state always supports pstate - clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk - clk_mgr->clks.p_state_change_support = true; - clk_mgr->clks.prev_p_state_change_support = true; - clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; - clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN; -} bool dcn35_are_clock_states_equal(struct dc_clocks *a, struct dc_clocks *b) @@ -416,11 +405,22 @@ bool dcn35_are_clock_states_equal(struct dc_clocks *a, } static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, - struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) + struct clk_mgr_dcn35 *clk_mgr) { - } +void dcn35_init_clocks(struct clk_mgr *clk_mgr) +{ + uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; + + memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); + // Assumption is that boot state always supports pstate + clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk + clk_mgr->clks.p_state_change_support = true; + clk_mgr->clks.prev_p_state_change_support = true; + clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; + clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN; +} static struct clk_bw_params dcn35_bw_params = { .vram_type = Ddr4MemType, .num_channels = 1, @@ -436,32 +436,32 @@ static struct wm_table ddr5_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, } @@ -473,32 +473,32 @@ static struct wm_table lpddr5_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, .valid = true, }, } @@ -825,7 +825,7 @@ static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base) struct dc_state *context = dc->current_state; if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { - display_count = dcn35_get_active_display_cnt_wa(dc, context); + display_count = dcn35_get_active_display_cnt_wa(dc, context, NULL); /* if we can go lower, go lower */ if (display_count == 0) clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; @@ -992,7 +992,6 @@ void dcn35_clk_mgr_construct( struct dccg *dccg) { struct dcn35_smu_dpm_clks smu_dpm_clks = { 0 }; - struct clk_log_info log_info = {0}; clk_mgr->base.base.ctx = ctx; clk_mgr->base.base.funcs = &dcn35_funcs; @@ -1045,7 +1044,7 @@ void dcn35_clk_mgr_construct( dcn35_bw_params.wm_table = ddr5_wm_table; } /* Saved clocks configured at boot for debug purposes */ - dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info); + dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr); clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base); clk_mgr->base.base.clks.ref_dtbclk_khz = 600000; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c index b6b8c3ca15..6d4a1ffab5 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c @@ -116,6 +116,9 @@ static uint32_t dcn35_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un msleep(delay_us/1000); else if (delay_us > 0) udelay(delay_us); + + if (clk_mgr->base.ctx->dc->debug.disable_timeout) + max_retries++; } while (max_retries--); return res_val; @@ -276,7 +279,7 @@ void dcn35_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, u clk_mgr, VBIOSSMC_MSG_SetDisplayIdleOptimizations, idle_info); - smu_print("VBIOSSMC_MSG_SetDisplayIdleOptimizations idle_info = %d\n", idle_info); + smu_print("%s: VBIOSSMC_MSG_SetDisplayIdleOptimizations idle_info = %x\n", __func__, idle_info); } void dcn35_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable) @@ -295,7 +298,7 @@ void dcn35_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool e clk_mgr, VBIOSSMC_MSG_SetDisplayIdleOptimizations, idle_info.data); - smu_print("dcn35_smu_enable_phy_refclk_pwrdwn = %d\n", enable ? 1 : 0); + smu_print("%s smu_enable_phy_refclk_pwrdwn = %d\n", __func__, enable ? 1 : 0); } void dcn35_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr) @@ -307,6 +310,7 @@ void dcn35_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr) clk_mgr, VBIOSSMC_MSG_UpdatePmeRestore, 0); + smu_print("%s: SMC_MSG_UpdatePmeRestore\n", __func__); } void dcn35_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high) @@ -347,7 +351,7 @@ void dcn35_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr) void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support) { - unsigned int msg_id, param; + unsigned int msg_id, param, retv; if (!clk_mgr->smu_present) return; @@ -357,27 +361,32 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst case DCN_ZSTATE_SUPPORT_ALLOW: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = (1 << 10) | (1 << 9) | (1 << 8); + smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW, param = %d\n", __func__, param); break; case DCN_ZSTATE_SUPPORT_DISALLOW: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = 0; + smu_print("%s: SMC_MSG_AllowZstatesEntry msg_id = DISALLOW, param = %d\n", __func__, param); break; case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = (1 << 10); + smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z10_ONLY, param = %d\n", __func__, param); break; case DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = (1 << 10) | (1 << 8); + smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_Z10_ONLY, param = %d\n", __func__, param); break; case DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY: msg_id = VBIOSSMC_MSG_AllowZstatesEntry; param = (1 << 8); + smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_ONLY, param = %d\n", __func__, param); break; default: //DCN_ZSTATE_SUPPORT_UNKNOWN @@ -387,11 +396,11 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst } - dcn35_smu_send_msg_with_param( + retv = dcn35_smu_send_msg_with_param( clk_mgr, msg_id, param); - smu_print("dcn35_smu_set_zstate_support msg_id = %d, param = %d\n", msg_id, param); + smu_print("%s: msg_id = %d, param = 0x%x, return = %d\n", __func__, msg_id, param, retv); } int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr) @@ -405,7 +414,7 @@ int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr) VBIOSSMC_MSG_GetDprefclkFreq, 0); - smu_print("dcn35_smu_get_DPREF clk = %d mhz\n", dprefclk); + smu_print("%s: SMU DPREF clk = %d mhz\n", __func__, dprefclk); return dprefclk * 1000; } @@ -420,7 +429,7 @@ int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr) VBIOSSMC_MSG_GetDtbclkFreq, 0); - smu_print("dcn35_smu_get_dtbclk = %d mhz\n", dtbclk); + smu_print("%s: get_dtbclk = %dmhz\n", __func__, dtbclk); return dtbclk * 1000; } /* Arg = 1: Turn DTB on; 0: Turn DTB CLK OFF. when it is on, it is 600MHZ */ @@ -433,7 +442,7 @@ void dcn35_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable) clk_mgr, VBIOSSMC_MSG_SetDtbClk, enable); - smu_print("dcn35_smu_set_dtbclk = %d \n", enable ? 1 : 0); + smu_print("%s: smu_set_dtbclk = %d\n", __func__, enable ? 1 : 0); } void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable) @@ -442,30 +451,45 @@ void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *cl clk_mgr, VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown, enable); + smu_print("%s: smu_enable_48mhz_tmdp_refclk_pwrdwn = %d\n", __func__, enable ? 1 : 0); } int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr) { - return dcn35_smu_send_msg_with_param( + int retv; + + retv = dcn35_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_DispPsrExit, 0); + smu_print("%s: smu_exit_low_power_state return = %d\n", __func__, retv); + return retv; } int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr) { - return dcn35_smu_send_msg_with_param( + int retv; + + retv = dcn35_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_QueryIPS2Support, 0); + + //smu_print("%s: VBIOSSMC_MSG_QueryIPS2Support return = %x\n", __func__, retv); + return retv; } void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param) { REG_WRITE(MP1_SMN_C2PMSG_71, param); + //smu_print("%s: write_ips_scratch = %x\n", __func__, param); } uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr) { - return REG_READ(MP1_SMN_C2PMSG_71); + uint32_t retv; + + retv = REG_READ(MP1_SMN_C2PMSG_71); + //smu_print("%s: dcn35_smu_read_ips_scratch = %x\n", __func__, retv); + return retv; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index b51208f44c..3c3d613c5f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -34,6 +34,8 @@ #include "dce/dce_hwseq.h" #include "resource.h" +#include "dc_state.h" +#include "dc_state_priv.h" #include "gpio_service_interface.h" #include "clk_mgr.h" @@ -409,9 +411,14 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, * avoid conflicting with firmware updates. */ if (dc->ctx->dce_version > DCE_VERSION_MAX) - if (dc->optimized_required || dc->wm_optimized_required) + if (dc->optimized_required) return false; + if (!memcmp(&stream->adjust, adjust, sizeof(*adjust))) + return true; + + dc_exit_ips_for_hw_access(dc); + stream->adjust.v_total_max = adjust->v_total_max; stream->adjust.v_total_mid = adjust->v_total_mid; stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; @@ -452,6 +459,8 @@ bool dc_stream_get_last_used_drr_vtotal(struct dc *dc, int i = 0; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -482,6 +491,8 @@ bool dc_stream_get_crtc_position(struct dc *dc, bool ret = false; struct crtc_position position; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -601,6 +612,8 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, if (pipe == NULL) return false; + dc_exit_ips_for_hw_access(dc); + /* By default, capture the full frame */ param.windowa_x_start = 0; param.windowa_y_start = 0; @@ -660,6 +673,8 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, struct pipe_ctx *pipe; struct timing_generator *tg; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream == stream) @@ -684,6 +699,8 @@ void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, int i; struct pipe_ctx *pipe_ctx; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { @@ -719,6 +736,8 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream, if (option > DITHER_OPTION_MAX) return; + dc_exit_ips_for_hw_access(stream->ctx->dc); + stream->dither_option = option; memset(¶ms, 0, sizeof(params)); @@ -743,6 +762,8 @@ bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stre bool ret = false; struct pipe_ctx *pipes; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { pipes = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -760,6 +781,8 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) bool ret = false; struct pipe_ctx *pipes; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { @@ -786,6 +809,8 @@ void dc_stream_set_static_screen_params(struct dc *dc, struct pipe_ctx *pipes_affected[MAX_PIPES]; int num_pipes_affected = 0; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < num_streams; i++) { struct dc_stream_state *stream = streams[i]; @@ -808,7 +833,7 @@ static void dc_destruct(struct dc *dc) link_enc_cfg_init(dc, dc->current_state); if (dc->current_state) { - dc_release_state(dc->current_state); + dc_state_release(dc->current_state); dc->current_state = NULL; } @@ -1020,29 +1045,27 @@ static bool dc_construct(struct dc *dc, } #endif + if (!create_links(dc, init_params->num_virtual_links)) + goto fail; + + /* Create additional DIG link encoder objects if fewer than the platform + * supports were created during link construction. + */ + if (!create_link_encoders(dc)) + goto fail; + /* Creation of current_state must occur after dc->dml * is initialized in dc_create_resource_pool because * on creation it copies the contents of dc->dml */ - dc->current_state = dc_create_state(dc); + dc->current_state = dc_state_create(dc); if (!dc->current_state) { dm_error("%s: failed to create validate ctx\n", __func__); goto fail; } - if (!create_links(dc, init_params->num_virtual_links)) - goto fail; - - /* Create additional DIG link encoder objects if fewer than the platform - * supports were created during link construction. - */ - if (!create_link_encoders(dc)) - goto fail; - - dc_resource_state_construct(dc, dc->current_state); - return true; fail: @@ -1085,7 +1108,7 @@ static void apply_ctx_interdependent_lock(struct dc *dc, } } -static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) +static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) { if (dc->ctx->dce_version >= DCN_VERSION_1_0) { memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color)); @@ -1105,9 +1128,9 @@ static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *conte if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP) - get_subvp_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); + get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) - get_mclk_switch_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); + get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); } } } @@ -1115,7 +1138,7 @@ static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *conte static void disable_dangling_plane(struct dc *dc, struct dc_state *context) { int i, j; - struct dc_state *dangling_context = dc_create_state(dc); + struct dc_state *dangling_context = dc_state_create_current_copy(dc); struct dc_state *current_ctx; struct pipe_ctx *pipe; struct timing_generator *tg; @@ -1123,8 +1146,6 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) if (dangling_context == NULL) return; - dc_resource_state_copy_construct(dc->current_state, dangling_context); - for (i = 0; i < dc->res_pool->pipe_count; i++) { struct dc_stream_state *old_stream = dc->current_state->res_ctx.pipe_ctx[i].stream; @@ -1161,6 +1182,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) } if (should_disable && old_stream) { + bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM; pipe = &dc->current_state->res_ctx.pipe_ctx[i]; tg = pipe->stream_res.tg; /* When disabling plane for a phantom pipe, we must turn on the @@ -1169,22 +1191,29 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) * state that can result in underflow or hang when enabling it * again for different use. */ - if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (is_phantom) { if (tg->funcs->enable_crtc) { int main_pipe_width, main_pipe_height; + struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream); - main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width; - main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height; + main_pipe_width = old_paired_stream->dst.width; + main_pipe_height = old_paired_stream->dst.height; if (dc->hwss.blank_phantom) dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height); tg->funcs->enable_crtc(tg); } } - dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); + + if (is_phantom) + dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true); + else + dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context); disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); - if (pipe->stream && pipe->plane_state) - dc_update_viusal_confirm_color(dc, context, pipe); + if (pipe->stream && pipe->plane_state) { + set_p_state_switch_method(dc, context, pipe); + dc_update_visual_confirm_color(dc, context, pipe); + } if (dc->hwss.apply_ctx_for_surface) { apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true); @@ -1203,7 +1232,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) * The OTG is set to disable on falling edge of VUPDATE so the plane disable * will still get it's double buffer update. */ - if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (is_phantom) { if (tg->funcs->disable_phantom_crtc) tg->funcs->disable_phantom_crtc(tg); } @@ -1212,7 +1241,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) current_ctx = dc->current_state; dc->current_state = dangling_context; - dc_release_state(current_ctx); + dc_state_release(current_ctx); } static void disable_vbios_mode_if_required( @@ -1276,6 +1305,54 @@ static void disable_vbios_mode_if_required( } } +/** + * wait_for_blank_complete - wait for all active OPPs to finish pending blank + * pattern updates + * + * @dc: [in] dc reference + * @context: [in] hardware context in use + */ +static void wait_for_blank_complete(struct dc *dc, + struct dc_state *context) +{ + struct pipe_ctx *opp_head; + struct dce_hwseq *hws = dc->hwseq; + int i; + + if (!hws->funcs.wait_for_blank_complete) + return; + + for (i = 0; i < MAX_PIPES; i++) { + opp_head = &context->res_ctx.pipe_ctx[i]; + + if (!resource_is_pipe_type(opp_head, OPP_HEAD) || + dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM) + continue; + + hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp); + } +} + +static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context) +{ + struct pipe_ctx *otg_master; + struct timing_generator *tg; + int i; + + for (i = 0; i < MAX_PIPES; i++) { + otg_master = &context->res_ctx.pipe_ctx[i]; + if (!resource_is_pipe_type(otg_master, OTG_MASTER) || + dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM) + continue; + tg = otg_master->stream_res.tg; + if (tg->funcs->wait_odm_doublebuffer_pending_clear) + tg->funcs->wait_odm_doublebuffer_pending_clear(tg); + } + + /* ODM update may require to reprogram blank pattern for each OPP */ + wait_for_blank_complete(dc, context); +} + static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) { int i; @@ -1284,7 +1361,7 @@ static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) int count = 0; struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) + if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) continue; /* Timeout 100 ms */ @@ -1510,7 +1587,7 @@ static void program_timing_sync( } for (k = 0; k < group_size; k++) { - struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream); + struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream); status->timing_sync_info.group_id = num_group; status->timing_sync_info.group_size = group_size; @@ -1521,7 +1598,7 @@ static void program_timing_sync( } - /* remove any other pipes that are already been synced */ + /* remove any other unblanked pipes as they have already been synced */ if (dc->config.use_pipe_ctx_sync_logic) { /* check pipe's syncd to decide which pipe to be removed */ for (j = 1; j < group_size; j++) { @@ -1534,6 +1611,7 @@ static void program_timing_sync( pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd; } } else { + /* remove any other pipes by checking valid plane */ for (j = j + 1; j < group_size; j++) { bool is_blanked; @@ -1554,7 +1632,7 @@ static void program_timing_sync( if (group_size > 1) { if (sync_type == TIMING_SYNCHRONIZABLE) { dc->hwss.enable_timing_synchronization( - dc, group_index, group_size, pipe_set); + dc, ctx, group_index, group_size, pipe_set); } else if (sync_type == VBLANK_SYNCHRONIZABLE) { dc->hwss.enable_vblanks_synchronization( @@ -1759,6 +1837,8 @@ void dc_enable_stereo( int i, j; struct pipe_ctx *pipe; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { if (context != NULL) { pipe = &context->res_ctx.pipe_ctx[i]; @@ -1778,6 +1858,8 @@ void dc_enable_stereo( void dc_trigger_sync(struct dc *dc, struct dc_state *context) { if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { + dc_exit_ips_for_hw_access(dc); + enable_timing_multisync(dc, context); program_timing_sync(dc, context); } @@ -1836,7 +1918,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; /* Check old context for SubVP */ - subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); + subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM); if (subvp_prev_use) break; } @@ -1962,6 +2044,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c context->stream_count == 0) { /* Must wait for no flips to be pending before doing optimize bw */ wait_for_no_pipes_pending(dc, context); + /* + * optimized dispclk depends on ODM setup. Need to wait for ODM + * update pending complete before optimizing bandwidth. + */ + wait_for_odm_update_pending_complete(dc, context); /* pplib is notified if disp_num changed */ dc->hwss.optimize_bandwidth(dc, context); /* Need to do otg sync again as otg could be out of sync due to otg @@ -1994,9 +2081,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c old_state = dc->current_state; dc->current_state = context; - dc_release_state(old_state); + dc_state_release(old_state); - dc_retain_state(dc->current_state); + dc_state_retain(dc->current_state); return result; } @@ -2034,6 +2121,8 @@ enum dc_status dc_commit_streams(struct dc *dc, if (!streams_changed(dc, streams, stream_count)) return res; + dc_exit_ips_for_hw_access(dc); + DC_LOG_DC("%s: %d streams\n", __func__, stream_count); for (i = 0; i < stream_count; i++) { @@ -2067,12 +2156,10 @@ enum dc_status dc_commit_streams(struct dc *dc, if (handle_exit_odm2to1) res = commit_minimal_transition_state(dc, dc->current_state); - context = dc_create_state(dc); + context = dc_state_create_current_copy(dc); if (!context) goto context_alloc_fail; - dc_resource_state_copy_construct_current(dc, context); - res = dc_validate_with_context(dc, set, stream_count, context, false); if (res != DC_OK) { BREAK_TO_DEBUGGER(); @@ -2087,7 +2174,7 @@ enum dc_status dc_commit_streams(struct dc *dc, streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst; if (dc_is_embedded_signal(streams[i]->signal)) { - struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]); + struct dc_stream_status *status = dc_state_get_stream_status(context, streams[i]); if (dc->hwss.is_abm_supported) status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]); @@ -2098,7 +2185,7 @@ enum dc_status dc_commit_streams(struct dc *dc, } fail: - dc_release_state(context); + dc_state_release(context); context_alloc_fail: @@ -2152,7 +2239,7 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) pipe = &context->res_ctx.pipe_ctx[i]; // Don't check flip pending on phantom pipes - if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)) + if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)) continue; /* Must set to false to start with, due to OR in update function */ @@ -2210,7 +2297,7 @@ void dc_post_update_surfaces_to_stream(struct dc *dc) if (context->res_ctx.pipe_ctx[i].stream == NULL || context->res_ctx.pipe_ctx[i].plane_state == NULL) { context->res_ctx.pipe_ctx[i].pipe_idx = i; - dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]); + dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]); } process_deferred_updates(dc); @@ -2222,104 +2309,6 @@ void dc_post_update_surfaces_to_stream(struct dc *dc) } dc->optimized_required = false; - dc->wm_optimized_required = false; -} - -static void init_state(struct dc *dc, struct dc_state *context) -{ - /* Each context must have their own instance of VBA and in order to - * initialize and obtain IP and SOC the base DML instance from DC is - * initially copied into every context - */ - memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); -} - -struct dc_state *dc_create_state(struct dc *dc) -{ - struct dc_state *context = kvzalloc(sizeof(struct dc_state), - GFP_KERNEL); - - if (!context) - return NULL; - - init_state(dc, context); - -#ifdef CONFIG_DRM_AMD_DC_FP - if (dc->debug.using_dml2) { - dml2_create(dc, &dc->dml2_options, &context->bw_ctx.dml2); - } -#endif - kref_init(&context->refcount); - - return context; -} - -struct dc_state *dc_copy_state(struct dc_state *src_ctx) -{ - int i, j; - struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL); - - if (!new_ctx) - return NULL; - memcpy(new_ctx, src_ctx, sizeof(struct dc_state)); - -#ifdef CONFIG_DRM_AMD_DC_FP - if (new_ctx->bw_ctx.dml2 && !dml2_create_copy(&new_ctx->bw_ctx.dml2, src_ctx->bw_ctx.dml2)) { - dc_release_state(new_ctx); - return NULL; - } -#endif - - for (i = 0; i < MAX_PIPES; i++) { - struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i]; - - if (cur_pipe->top_pipe) - cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; - - if (cur_pipe->bottom_pipe) - cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; - - if (cur_pipe->prev_odm_pipe) - cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; - - if (cur_pipe->next_odm_pipe) - cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; - - } - - for (i = 0; i < new_ctx->stream_count; i++) { - dc_stream_retain(new_ctx->streams[i]); - for (j = 0; j < new_ctx->stream_status[i].plane_count; j++) - dc_plane_state_retain( - new_ctx->stream_status[i].plane_states[j]); - } - - kref_init(&new_ctx->refcount); - - return new_ctx; -} - -void dc_retain_state(struct dc_state *context) -{ - kref_get(&context->refcount); -} - -static void dc_state_free(struct kref *kref) -{ - struct dc_state *context = container_of(kref, struct dc_state, refcount); - dc_resource_state_destruct(context); - -#ifdef CONFIG_DRM_AMD_DC_FP - dml2_destroy(context->bw_ctx.dml2); - context->bw_ctx.dml2 = 0; -#endif - - kvfree(context); -} - -void dc_release_state(struct dc_state *context) -{ - kref_put(&context->refcount, dc_state_free); } bool dc_set_generic_gpio_for_stereo(bool enable, @@ -2742,8 +2731,6 @@ enum surface_update_type dc_check_update_surfaces_for_stream( } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { dc->optimized_required = true; } - - dc->optimized_required |= dc->wm_optimized_required; } return type; @@ -2951,9 +2938,6 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->vrr_active_fixed) stream->vrr_active_fixed = *update->vrr_active_fixed; - if (update->crtc_timing_adjust) - stream->adjust = *update->crtc_timing_adjust; - if (update->dpms_off) stream->dpms_off = *update->dpms_off; @@ -2994,11 +2978,9 @@ static void copy_stream_update_to_stream(struct dc *dc, update->dsc_config->num_slices_v != 0); /* Use temporarry context for validating new DSC config */ - struct dc_state *dsc_validate_context = dc_create_state(dc); + struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state); if (dsc_validate_context) { - dc_resource_state_copy_construct(dc->current_state, dsc_validate_context); - stream->timing.dsc_cfg = *update->dsc_config; stream->timing.flags.DSC = enable_dsc; if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { @@ -3007,7 +2989,7 @@ static void copy_stream_update_to_stream(struct dc *dc, update->dsc_config = NULL; } - dc_release_state(dsc_validate_context); + dc_state_release(dsc_validate_context); } else { DC_ERROR("Failed to allocate new validate context for DSC change\n"); update->dsc_config = NULL; @@ -3106,30 +3088,27 @@ static bool update_planes_and_stream_state(struct dc *dc, new_planes[i] = srf_updates[i].surface; /* initialize scratch memory for building context */ - context = dc_create_state(dc); + context = dc_state_create_copy(dc->current_state); if (context == NULL) { DC_ERROR("Failed to allocate new validate context!\n"); return false; } - dc_resource_state_copy_construct( - dc->current_state, context); - /* For each full update, remove all existing phantom pipes first. * Ensures that we have enough pipes for newly added MPO planes */ - if (dc->res_pool->funcs->remove_phantom_pipes) - dc->res_pool->funcs->remove_phantom_pipes(dc, context, false); + dc_state_remove_phantom_streams_and_planes(dc, context); + dc_state_release_phantom_streams_and_planes(dc, context); /*remove old surfaces from context */ - if (!dc_rem_all_planes_for_stream(dc, stream, context)) { + if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) { BREAK_TO_DEBUGGER(); goto fail; } /* add surface to context */ - if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { + if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { BREAK_TO_DEBUGGER(); goto fail; @@ -3154,19 +3133,6 @@ static bool update_planes_and_stream_state(struct dc *dc, if (update_type == UPDATE_TYPE_FULL) { if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { - /* For phantom pipes we remove and create a new set of phantom pipes - * for each full update (because we don't know if we'll need phantom - * pipes until after the first round of validation). However, if validation - * fails we need to keep the existing phantom pipes (because we don't update - * the dc->current_state). - * - * The phantom stream/plane refcount is decremented for validation because - * we assume it'll be removed (the free comes when the dc_state is freed), - * but if validation fails we have to increment back the refcount so it's - * consistent. - */ - if (dc->res_pool->funcs->retain_phantom_pipes) - dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state); BREAK_TO_DEBUGGER(); goto fail; } @@ -3187,7 +3153,7 @@ static bool update_planes_and_stream_state(struct dc *dc, return true; fail: - dc_release_state(context); + dc_state_release(context); return false; @@ -3488,18 +3454,26 @@ static void commit_planes_for_stream_fast(struct dc *dc, { int i, j; struct pipe_ctx *top_pipe_to_program = NULL; + struct dc_stream_status *stream_status = NULL; + dc_exit_ips_for_hw_access(dc); + dc_z10_restore(dc); top_pipe_to_program = resource_get_otg_master_for_stream( &context->res_ctx, stream); - if (dc->debug.visual_confirm) { - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + if (!top_pipe_to_program) + return; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->plane_state) { + set_p_state_switch_method(dc, context, pipe); - if (pipe->stream && pipe->plane_state) - dc_update_viusal_confirm_color(dc, context, pipe); + if (dc->debug.visual_confirm) + dc_update_visual_confirm_color(dc, context, pipe); } } @@ -3523,6 +3497,8 @@ static void commit_planes_for_stream_fast(struct dc *dc, } } + stream_status = dc_state_get_stream_status(context, stream); + build_dmub_cmd_list(dc, srf_updates, surface_count, @@ -3535,7 +3511,8 @@ static void commit_planes_for_stream_fast(struct dc *dc, context->dmub_cmd_count, context->block_sequence, &(context->block_sequence_steps), - top_pipe_to_program); + top_pipe_to_program, + stream_status); hwss_execute_sequence(dc, context->block_sequence, context->block_sequence_steps); @@ -3548,7 +3525,7 @@ static void commit_planes_for_stream_fast(struct dc *dc, top_pipe_to_program->stream->update_flags.raw = 0; } -static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context) +static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context) { /* * This function calls HWSS to wait for any potentially double buffered @@ -3586,6 +3563,7 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state } } } + wait_for_odm_update_pending_complete(dc, dc_context); } static void commit_planes_for_stream(struct dc *dc, @@ -3607,6 +3585,8 @@ static void commit_planes_for_stream(struct dc *dc, // dc->current_state anymore, so we have to cache it before we apply // the new SubVP context subvp_prev_use = false; + dc_exit_ips_for_hw_access(dc); + dc_z10_restore(dc); if (update_type == UPDATE_TYPE_FULL) wait_for_outstanding_hw_updates(dc, context); @@ -3631,7 +3611,7 @@ static void commit_planes_for_stream(struct dc *dc, struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; // Check old context for SubVP - subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); + subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM); if (subvp_prev_use) break; } @@ -3639,19 +3619,22 @@ static void commit_planes_for_stream(struct dc *dc, for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { subvp_curr_use = true; break; } } - if (dc->debug.visual_confirm) - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->plane_state) { + set_p_state_switch_method(dc, context, pipe); - if (pipe->stream && pipe->plane_state) - dc_update_viusal_confirm_color(dc, context, pipe); + if (dc->debug.visual_confirm) + dc_update_visual_confirm_color(dc, context, pipe); } + } if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { struct pipe_ctx *mpcc_pipe; @@ -3918,7 +3901,9 @@ static void commit_planes_for_stream(struct dc *dc, * programming has completed (we turn on phantom OTG in order * to complete the plane disable for phantom pipes). */ - dc->hwss.apply_ctx_to_hw(dc, context); + + if (dc->hwss.disable_phantom_streams) + dc->hwss.disable_phantom_streams(dc, context); } if (update_type != UPDATE_TYPE_FAST) @@ -4024,7 +4009,7 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc, for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) { + if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) { subvp_active = true; break; } @@ -4061,7 +4046,7 @@ struct pipe_split_policy_backup { static void release_minimal_transition_state(struct dc *dc, struct dc_state *context, struct pipe_split_policy_backup *policy) { - dc_release_state(context); + dc_state_release(context); /* restore previous pipe split and odm policy */ if (!dc->config.is_vmin_only_asic) dc->debug.pipe_split_policy = policy->mpc_policy; @@ -4072,7 +4057,7 @@ static void release_minimal_transition_state(struct dc *dc, static struct dc_state *create_minimal_transition_state(struct dc *dc, struct dc_state *base_context, struct pipe_split_policy_backup *policy) { - struct dc_state *minimal_transition_context = dc_create_state(dc); + struct dc_state *minimal_transition_context = NULL; unsigned int i, j; if (!dc->config.is_vmin_only_asic) { @@ -4084,7 +4069,9 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc, policy->subvp_policy = dc->debug.force_disable_subvp; dc->debug.force_disable_subvp = true; - dc_resource_state_copy_construct(base_context, minimal_transition_context); + minimal_transition_context = dc_state_create_copy(base_context); + if (!minimal_transition_context) + return NULL; /* commit minimal state */ if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) { @@ -4116,7 +4103,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc, bool success = false; struct dc_state *minimal_transition_context; struct pipe_split_policy_backup policy; - struct mall_temp_config mall_temp_config; /* commit based on new context */ /* Since all phantom pipes are removed in full validation, @@ -4125,8 +4111,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc, * pipe as subvp/phantom will be cleared (dc copy constructor * creates a shallow copy). */ - if (dc->res_pool->funcs->save_mall_state) - dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config); minimal_transition_context = create_minimal_transition_state(dc, context, &policy); if (minimal_transition_context) { @@ -4139,16 +4123,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc, success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK; } release_minimal_transition_state(dc, minimal_transition_context, &policy); - if (dc->res_pool->funcs->restore_mall_state) - dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config); - /* If we do a minimal transition with plane removal and the context - * has subvp we also have to retain back the phantom stream / planes - * since the refcount is decremented as part of the min transition - * (we commit a state with no subvp, so the phantom streams / planes - * had to be removed). - */ - if (dc->res_pool->funcs->retain_phantom_pipes) - dc->res_pool->funcs->retain_phantom_pipes(dc, context); } if (!success) { @@ -4216,7 +4190,7 @@ static bool commit_minimal_transition_state(struct dc *dc, for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) { subvp_in_use = true; break; } @@ -4403,8 +4377,7 @@ static bool full_update_required(struct dc *dc, stream_update->mst_bw_update || stream_update->func_shaper || stream_update->lut3d_func || - stream_update->pending_test_pattern || - stream_update->crtc_timing_adjust)) + stream_update->pending_test_pattern)) return true; if (stream) { @@ -4484,7 +4457,6 @@ bool dc_update_planes_and_stream(struct dc *dc, struct dc_state *context; enum surface_update_type update_type; int i; - struct mall_temp_config mall_temp_config; struct dc_fast_update fast_update[MAX_SURFACES] = {0}; /* In cases where MPO and split or ODM are used transitions can @@ -4495,6 +4467,8 @@ bool dc_update_planes_and_stream(struct dc *dc, bool is_plane_addition = 0; bool is_fast_update_only; + dc_exit_ips_for_hw_access(dc); + populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); is_fast_update_only = fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream); @@ -4528,23 +4502,10 @@ bool dc_update_planes_and_stream(struct dc *dc, * pipe as subvp/phantom will be cleared (dc copy constructor * creates a shallow copy). */ - if (dc->res_pool->funcs->save_mall_state) - dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config); if (!commit_minimal_transition_state(dc, context)) { - dc_release_state(context); + dc_state_release(context); return false; } - if (dc->res_pool->funcs->restore_mall_state) - dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config); - - /* If we do a minimal transition with plane removal and the context - * has subvp we also have to retain back the phantom stream / planes - * since the refcount is decremented as part of the min transition - * (we commit a state with no subvp, so the phantom streams / planes - * had to be removed). - */ - if (dc->res_pool->funcs->retain_phantom_pipes) - dc->res_pool->funcs->retain_phantom_pipes(dc, context); update_type = UPDATE_TYPE_FULL; } @@ -4601,7 +4562,7 @@ bool dc_update_planes_and_stream(struct dc *dc, struct dc_state *old = dc->current_state; dc->current_state = context; - dc_release_state(old); + dc_state_release(old); // clear any forced full updates for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -4628,6 +4589,8 @@ void dc_commit_updates_for_stream(struct dc *dc, int i, j; struct dc_fast_update fast_update[MAX_SURFACES] = {0}; + dc_exit_ips_for_hw_access(dc); + populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); stream_status = dc_stream_get_status(stream); context = dc->current_state; @@ -4660,14 +4623,12 @@ void dc_commit_updates_for_stream(struct dc *dc, if (update_type >= UPDATE_TYPE_FULL) { /* initialize scratch memory for building context */ - context = dc_create_state(dc); + context = dc_state_create_copy(state); if (context == NULL) { DC_ERROR("Failed to allocate new validate context!\n"); return; } - dc_resource_state_copy_construct(state, context); - for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -4706,7 +4667,7 @@ void dc_commit_updates_for_stream(struct dc *dc, if (update_type >= UPDATE_TYPE_FULL) { if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { DC_ERROR("Mode validation failed for stream update!\n"); - dc_release_state(context); + dc_state_release(context); return; } } @@ -4739,7 +4700,7 @@ void dc_commit_updates_for_stream(struct dc *dc, struct dc_state *old = dc->current_state; dc->current_state = context; - dc_release_state(old); + dc_state_release(old); for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; @@ -4812,7 +4773,9 @@ void dc_set_power_state( switch (power_state) { case DC_ACPI_CM_POWER_STATE_D0: - dc_resource_state_construct(dc, dc->current_state); + dc_state_construct(dc, dc->current_state); + + dc_exit_ips_for_hw_access(dc); dc_z10_restore(dc); @@ -4827,7 +4790,7 @@ void dc_set_power_state( default: ASSERT(dc->current_state->stream_count == 0); - dc_resource_state_destruct(dc->current_state); + dc_state_destruct(dc->current_state); break; } @@ -4904,6 +4867,38 @@ bool dc_set_psr_allow_active(struct dc *dc, bool enable) return true; } +/* enable/disable eDP Replay without specify stream for eDP */ +bool dc_set_replay_allow_active(struct dc *dc, bool active) +{ + int i; + bool allow_active; + + for (i = 0; i < dc->current_state->stream_count; i++) { + struct dc_link *link; + struct dc_stream_state *stream = dc->current_state->streams[i]; + + link = stream->link; + if (!link) + continue; + + if (link->replay_settings.replay_feature_enabled) { + if (active && !link->replay_settings.replay_allow_active) { + allow_active = true; + if (!dc_link_set_replay_allow_active(link, &allow_active, + false, false, NULL)) + return false; + } else if (!active && link->replay_settings.replay_allow_active) { + allow_active = false; + if (!dc_link_set_replay_allow_active(link, &allow_active, + true, false, NULL)) + return false; + } + } + } + + return true; +} + void dc_allow_idle_optimizations(struct dc *dc, bool allow) { if (dc->debug.disable_idle_power_optimizations) @@ -4923,6 +4918,12 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow) dc->idle_optimizations_allowed = allow; } +void dc_exit_ips_for_hw_access(struct dc *dc) +{ + if (dc->caps.ips_support) + dc_allow_idle_optimizations(dc, false); +} + bool dc_dmub_is_ips_idle_state(struct dc *dc) { if (dc->debug.disable_idle_power_optimizations) @@ -5443,6 +5444,8 @@ bool dc_abm_save_restore( struct dc_link *link = stream->sink->link; struct dc_link *edp_links[MAX_NUM_EDP]; + if (link->replay_settings.replay_feature_enabled) + return false; /*find primary pipe associated with stream*/ for (i = 0; i < MAX_PIPES; i++) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index fc18b9dc94..9c05b1a071 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -31,6 +31,7 @@ #include "basics/dc_common.h" #include "resource.h" #include "dc_dmub_srv.h" +#include "dc_state_priv.h" #define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) @@ -425,45 +426,130 @@ void get_hdr_visual_confirm_color( } void get_subvp_visual_confirm_color( - struct dc *dc, - struct dc_state *context, struct pipe_ctx *pipe_ctx, struct tg_color *color) { uint32_t color_value = MAX_TG_COLOR_VALUE; - bool enable_subvp = false; - int i; - - if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !context) - return; + if (pipe_ctx) { + switch (pipe_ctx->p_state_type) { + case P_STATE_SUB_VP: + color->color_r_cr = color_value; + color->color_g_y = 0; + color->color_b_cb = 0; + break; + case P_STATE_DRR_SUB_VP: + color->color_r_cr = 0; + color->color_g_y = color_value; + color->color_b_cb = 0; + break; + case P_STATE_V_BLANK_SUB_VP: + color->color_r_cr = 0; + color->color_g_y = 0; + color->color_b_cb = color_value; + break; + default: + break; + } + } +} - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; +void get_mclk_switch_visual_confirm_color( + struct pipe_ctx *pipe_ctx, + struct tg_color *color) +{ + uint32_t color_value = MAX_TG_COLOR_VALUE; - if (pipe->stream && pipe->stream->mall_stream_config.paired_stream && - pipe->stream->mall_stream_config.type == SUBVP_MAIN) { - /* SubVP enable - red */ - color->color_g_y = 0; + if (pipe_ctx) { + switch (pipe_ctx->p_state_type) { + case P_STATE_V_BLANK: + color->color_r_cr = color_value; + color->color_g_y = color_value; color->color_b_cb = 0; + break; + case P_STATE_FPO: + color->color_r_cr = 0; + color->color_g_y = color_value; + color->color_b_cb = color_value; + break; + case P_STATE_V_ACTIVE: color->color_r_cr = color_value; - enable_subvp = true; - - if (pipe_ctx->stream == pipe->stream) - return; + color->color_g_y = 0; + color->color_b_cb = color_value; + break; + case P_STATE_SUB_VP: + color->color_r_cr = color_value; + color->color_g_y = 0; + color->color_b_cb = 0; + break; + case P_STATE_DRR_SUB_VP: + color->color_r_cr = 0; + color->color_g_y = color_value; + color->color_b_cb = 0; + break; + case P_STATE_V_BLANK_SUB_VP: + color->color_r_cr = 0; + color->color_g_y = 0; + color->color_b_cb = color_value; + break; + default: break; } } +} - if (enable_subvp && pipe_ctx->stream->mall_stream_config.type == SUBVP_NONE) { - color->color_r_cr = 0; - if (pipe_ctx->stream->allow_freesync == 1) { - /* SubVP enable and DRR on - green */ - color->color_b_cb = 0; - color->color_g_y = color_value; +void set_p_state_switch_method( + struct dc *dc, + struct dc_state *context, + struct pipe_ctx *pipe_ctx) +{ + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; + bool enable_subvp; + + if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context) + return; + + if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] != + dm_dram_clock_change_unsupported) { + /* MCLK switching is supported */ + if (!pipe_ctx->has_vactive_margin) { + /* In Vblank - yellow */ + pipe_ctx->p_state_type = P_STATE_V_BLANK; + + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { + /* FPO + Vblank - cyan */ + pipe_ctx->p_state_type = P_STATE_FPO; + } } else { - /* SubVP enable and No DRR - blue */ - color->color_g_y = 0; - color->color_b_cb = color_value; + /* In Vactive - pink */ + pipe_ctx->p_state_type = P_STATE_V_ACTIVE; + } + + /* SubVP */ + enable_subvp = false; + + for (int i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream && dc_state_get_paired_subvp_stream(context, pipe->stream) && + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) { + /* SubVP enable - red */ + pipe_ctx->p_state_type = P_STATE_SUB_VP; + enable_subvp = true; + + if (pipe_ctx->stream == pipe->stream) + return; + break; + } + } + + if (enable_subvp && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_NONE) { + if (pipe_ctx->stream->allow_freesync == 1) { + /* SubVP enable and DRR on - green */ + pipe_ctx->p_state_type = P_STATE_DRR_SUB_VP; + } else { + /* SubVP enable and No DRR - blue */ + pipe_ctx->p_state_type = P_STATE_V_BLANK_SUB_VP; + } } } } @@ -473,7 +559,8 @@ void hwss_build_fast_sequence(struct dc *dc, unsigned int dmub_cmd_count, struct block_sequence block_sequence[], int *num_steps, - struct pipe_ctx *pipe_ctx) + struct pipe_ctx *pipe_ctx, + struct dc_stream_status *stream_status) { struct dc_plane_state *plane = pipe_ctx->plane_state; struct dc_stream_state *stream = pipe_ctx->stream; @@ -490,7 +577,8 @@ void hwss_build_fast_sequence(struct dc *dc, if (dc->hwss.subvp_pipe_control_lock_fast) { block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc; block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = true; - block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx; + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.subvp_immediate_flip = + plane->flip_immediate && stream_status->mall_stream_config.type == SUBVP_MAIN; block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; (*num_steps)++; } @@ -529,7 +617,7 @@ void hwss_build_fast_sequence(struct dc *dc, } if (dc->hwss.update_plane_addr && current_mpc_pipe->plane_state->update_flags.bits.addr_update) { if (resource_is_pipe_type(current_mpc_pipe, OTG_MASTER) && - current_mpc_pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + stream_status->mall_stream_config.type == SUBVP_MAIN) { block_sequence[*num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv; block_sequence[*num_steps].params.subvp_save_surf_addr.addr = ¤t_mpc_pipe->plane_state->address; block_sequence[*num_steps].params.subvp_save_surf_addr.subvp_index = current_mpc_pipe->subvp_index; @@ -612,7 +700,8 @@ void hwss_build_fast_sequence(struct dc *dc, if (dc->hwss.subvp_pipe_control_lock_fast) { block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc; block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = false; - block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx; + block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.subvp_immediate_flip = + plane->flip_immediate && stream_status->mall_stream_config.type == SUBVP_MAIN; block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; (*num_steps)++; } @@ -812,42 +901,6 @@ void hwss_subvp_save_surf_addr(union block_sequence_params *params) dc_dmub_srv_subvp_save_surf_addr(dc_dmub_srv, addr, subvp_index); } -void get_mclk_switch_visual_confirm_color( - struct dc *dc, - struct dc_state *context, - struct pipe_ctx *pipe_ctx, - struct tg_color *color) -{ - uint32_t color_value = MAX_TG_COLOR_VALUE; - struct vba_vars_st *vba = &context->bw_ctx.dml.vba; - - if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context) - return; - - if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] != - dm_dram_clock_change_unsupported) { - /* MCLK switching is supported */ - if (!pipe_ctx->has_vactive_margin) { - /* In Vblank - yellow */ - color->color_r_cr = color_value; - color->color_g_y = color_value; - - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { - /* FPO + Vblank - cyan */ - color->color_r_cr = 0; - color->color_g_y = color_value; - color->color_b_cb = color_value; - } - } else { - /* In Vactive - pink */ - color->color_r_cr = color_value; - color->color_b_cb = color_value; - } - /* SubVP */ - get_subvp_visual_confirm_color(dc, context, pipe_ctx, color); - } -} - void get_surface_tile_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c index f365773d57..c6c35037bd 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c @@ -467,6 +467,13 @@ bool dc_link_setup_psr(struct dc_link *link, return link->dc->link_srv->edp_setup_psr(link, stream, psr_config, psr_context); } +bool dc_link_set_replay_allow_active(struct dc_link *link, const bool *allow_active, + bool wait, bool force_static, const unsigned int *power_opts) +{ + return link->dc->link_srv->edp_set_replay_allow_active(link, allow_active, wait, + force_static, power_opts); +} + bool dc_link_get_replay_state(const struct dc_link *link, uint64_t *state) { return link->dc->link_srv->edp_get_replay_state(link, state); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 990d775e4c..9fbdb09697 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -42,6 +42,7 @@ #include "link_enc_cfg.h" #include "link.h" #include "clk_mgr.h" +#include "dc_state_priv.h" #include "virtual/virtual_link_hwss.h" #include "link/hwss/link_hwss_dio.h" #include "link/hwss/link_hwss_dpia.h" @@ -69,8 +70,8 @@ #include "dcn314/dcn314_resource.h" #include "dcn315/dcn315_resource.h" #include "dcn316/dcn316_resource.h" -#include "../dcn32/dcn32_resource.h" -#include "../dcn321/dcn321_resource.h" +#include "dcn32/dcn32_resource.h" +#include "dcn321/dcn321_resource.h" #include "dcn35/dcn35_resource.h" #define VISUAL_CONFIRM_BASE_DEFAULT 3 @@ -1764,6 +1765,29 @@ int recource_find_free_pipe_not_used_in_cur_res_ctx( return free_pipe_idx; } +int recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool) +{ + int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND; + const struct pipe_ctx *new_pipe, *cur_pipe; + int i; + + for (i = 0; i < pool->pipe_count; i++) { + cur_pipe = &cur_res_ctx->pipe_ctx[i]; + new_pipe = &new_res_ctx->pipe_ctx[i]; + + if (resource_is_pipe_type(cur_pipe, OTG_MASTER) && + resource_is_pipe_type(new_pipe, FREE_PIPE)) { + free_pipe_idx = i; + break; + } + } + + return free_pipe_idx; +} + int resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine( const struct resource_context *cur_res_ctx, struct resource_context *new_res_ctx, @@ -2440,6 +2464,9 @@ void resource_remove_otg_master_for_stream_output(struct dc_state *context, struct pipe_ctx *otg_master = resource_get_otg_master_for_stream( &context->res_ctx, stream); + if (!otg_master) + return; + ASSERT(resource_get_odm_slice_count(otg_master) == 1); ASSERT(otg_master->plane_state == NULL); ASSERT(otg_master->stream_res.stream_enc); @@ -2974,190 +3001,6 @@ bool resource_update_pipes_for_plane_with_slice_count( return result; } -bool dc_add_plane_to_context( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_plane_state *plane_state, - struct dc_state *context) -{ - struct resource_pool *pool = dc->res_pool; - struct pipe_ctx *otg_master_pipe; - struct dc_stream_status *stream_status = NULL; - bool added = false; - - stream_status = dc_stream_get_status_from_state(context, stream); - if (stream_status == NULL) { - dm_error("Existing stream not found; failed to attach surface!\n"); - goto out; - } else if (stream_status->plane_count == MAX_SURFACE_NUM) { - dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n", - plane_state, MAX_SURFACE_NUM); - goto out; - } - - otg_master_pipe = resource_get_otg_master_for_stream( - &context->res_ctx, stream); - if (otg_master_pipe) - added = resource_append_dpp_pipes_for_plane_composition(context, - dc->current_state, pool, otg_master_pipe, plane_state); - - if (added) { - stream_status->plane_states[stream_status->plane_count] = - plane_state; - stream_status->plane_count++; - dc_plane_state_retain(plane_state); - } - -out: - return added; -} - -bool dc_remove_plane_from_context( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_plane_state *plane_state, - struct dc_state *context) -{ - int i; - struct dc_stream_status *stream_status = NULL; - struct resource_pool *pool = dc->res_pool; - - if (!plane_state) - return true; - - for (i = 0; i < context->stream_count; i++) - if (context->streams[i] == stream) { - stream_status = &context->stream_status[i]; - break; - } - - if (stream_status == NULL) { - dm_error("Existing stream not found; failed to remove plane.\n"); - return false; - } - - resource_remove_dpp_pipes_for_plane_composition( - context, pool, plane_state); - - for (i = 0; i < stream_status->plane_count; i++) { - if (stream_status->plane_states[i] == plane_state) { - dc_plane_state_release(stream_status->plane_states[i]); - break; - } - } - - if (i == stream_status->plane_count) { - dm_error("Existing plane_state not found; failed to detach it!\n"); - return false; - } - - stream_status->plane_count--; - - /* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */ - for (; i < stream_status->plane_count; i++) - stream_status->plane_states[i] = stream_status->plane_states[i + 1]; - - stream_status->plane_states[stream_status->plane_count] = NULL; - - if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm) - /* ODM combine could prevent us from supporting more planes - * we will reset ODM slice count back to 1 when all planes have - * been removed to maximize the amount of planes supported when - * new planes are added. - */ - resource_update_pipes_for_stream_with_slice_count( - context, dc->current_state, dc->res_pool, stream, 1); - - return true; -} - -/** - * dc_rem_all_planes_for_stream - Remove planes attached to the target stream. - * - * @dc: Current dc state. - * @stream: Target stream, which we want to remove the attached plans. - * @context: New context. - * - * Return: - * Return true if DC was able to remove all planes from the target - * stream, otherwise, return false. - */ -bool dc_rem_all_planes_for_stream( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_state *context) -{ - int i, old_plane_count; - struct dc_stream_status *stream_status = NULL; - struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 }; - - for (i = 0; i < context->stream_count; i++) - if (context->streams[i] == stream) { - stream_status = &context->stream_status[i]; - break; - } - - if (stream_status == NULL) { - dm_error("Existing stream %p not found!\n", stream); - return false; - } - - old_plane_count = stream_status->plane_count; - - for (i = 0; i < old_plane_count; i++) - del_planes[i] = stream_status->plane_states[i]; - - for (i = 0; i < old_plane_count; i++) - if (!dc_remove_plane_from_context(dc, stream, del_planes[i], context)) - return false; - - return true; -} - -static bool add_all_planes_for_stream( - const struct dc *dc, - struct dc_stream_state *stream, - const struct dc_validation_set set[], - int set_count, - struct dc_state *context) -{ - int i, j; - - for (i = 0; i < set_count; i++) - if (set[i].stream == stream) - break; - - if (i == set_count) { - dm_error("Stream %p not found in set!\n", stream); - return false; - } - - for (j = 0; j < set[i].plane_count; j++) - if (!dc_add_plane_to_context(dc, stream, set[i].plane_states[j], context)) - return false; - - return true; -} - -bool dc_add_all_planes_for_stream( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_plane_state * const *plane_states, - int plane_count, - struct dc_state *context) -{ - struct dc_validation_set set; - int i; - - set.stream = stream; - set.plane_count = plane_count; - - for (i = 0; i < plane_count; i++) - set.plane_states[i] = plane_states[i]; - - return add_all_planes_for_stream(dc, stream, &set, 1, context); -} - bool dc_is_timing_changed(struct dc_stream_state *cur_stream, struct dc_stream_state *new_stream) { @@ -3309,84 +3152,6 @@ static struct audio *find_first_free_audio( return NULL; } -/* - * dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state. - */ -enum dc_status dc_add_stream_to_ctx( - struct dc *dc, - struct dc_state *new_ctx, - struct dc_stream_state *stream) -{ - enum dc_status res; - DC_LOGGER_INIT(dc->ctx->logger); - - if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) { - DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream); - return DC_ERROR_UNEXPECTED; - } - - new_ctx->streams[new_ctx->stream_count] = stream; - dc_stream_retain(stream); - new_ctx->stream_count++; - - res = resource_add_otg_master_for_stream_output( - new_ctx, dc->res_pool, stream); - if (res != DC_OK) - DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res); - - return res; -} - -/* - * dc_remove_stream_from_ctx() - Remove a stream from a dc_state. - */ -enum dc_status dc_remove_stream_from_ctx( - struct dc *dc, - struct dc_state *new_ctx, - struct dc_stream_state *stream) -{ - int i; - struct dc_context *dc_ctx = dc->ctx; - struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream( - &new_ctx->res_ctx, stream); - - if (!del_pipe) { - DC_ERROR("Pipe not found for stream %p !\n", stream); - return DC_ERROR_UNEXPECTED; - } - - resource_update_pipes_for_stream_with_slice_count(new_ctx, - dc->current_state, dc->res_pool, stream, 1); - resource_remove_otg_master_for_stream_output( - new_ctx, dc->res_pool, stream); - - for (i = 0; i < new_ctx->stream_count; i++) - if (new_ctx->streams[i] == stream) - break; - - if (new_ctx->streams[i] != stream) { - DC_ERROR("Context doesn't have stream %p !\n", stream); - return DC_ERROR_UNEXPECTED; - } - - dc_stream_release(new_ctx->streams[i]); - new_ctx->stream_count--; - - /* Trim back arrays */ - for (; i < new_ctx->stream_count; i++) { - new_ctx->streams[i] = new_ctx->streams[i + 1]; - new_ctx->stream_status[i] = new_ctx->stream_status[i + 1]; - } - - new_ctx->streams[new_ctx->stream_count] = NULL; - memset( - &new_ctx->stream_status[new_ctx->stream_count], - 0, - sizeof(new_ctx->stream_status[0])); - - return DC_OK; -} - static struct dc_stream_state *find_pll_sharable_stream( struct dc_stream_state *stream_needs_pll, struct dc_state *context) @@ -3594,6 +3359,7 @@ static void mark_seamless_boot_stream( * |________|_______________|___________|_____________| */ static bool acquire_otg_master_pipe_for_stream( + const struct dc_state *cur_ctx, struct dc_state *new_ctx, const struct resource_pool *pool, struct dc_stream_state *stream) @@ -3607,7 +3373,22 @@ static bool acquire_otg_master_pipe_for_stream( int pipe_idx; struct pipe_ctx *pipe_ctx = NULL; - pipe_idx = resource_find_any_free_pipe(&new_ctx->res_ctx, pool); + /* + * Upper level code is responsible to optimize unnecessary addition and + * removal for unchanged streams. So unchanged stream will keep the same + * OTG master instance allocated. When current stream is removed and a + * new stream is added, we want to reuse the OTG instance made available + * by the removed stream first. If not found, we try to avoid of using + * any free pipes already used in current context as this could tear + * down exiting ODM/MPC/MPO configuration unnecessarily. + */ + pipe_idx = recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx( + &cur_ctx->res_ctx, &new_ctx->res_ctx, pool); + if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) + pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx( + &cur_ctx->res_ctx, &new_ctx->res_ctx, pool); + if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) + pipe_idx = resource_find_any_free_pipe(&new_ctx->res_ctx, pool); if (pipe_idx != FREE_PIPE_INDEX_NOT_FOUND) { pipe_ctx = &new_ctx->res_ctx.pipe_ctx[pipe_idx]; memset(pipe_ctx, 0, sizeof(*pipe_ctx)); @@ -3667,7 +3448,7 @@ enum dc_status resource_map_pool_resources( if (!acquired) /* acquire new resources */ - acquired = acquire_otg_master_pipe_for_stream( + acquired = acquire_otg_master_pipe_for_stream(dc->current_state, context, pool, stream); pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); @@ -3750,35 +3531,6 @@ enum dc_status resource_map_pool_resources( return DC_ERROR_UNEXPECTED; } -/** - * dc_resource_state_copy_construct_current() - Creates a new dc_state from existing state - * - * @dc: copy out of dc->current_state - * @dst_ctx: copy into this - * - * This function makes a shallow copy of the current DC state and increments - * refcounts on existing streams and planes. - */ -void dc_resource_state_copy_construct_current( - const struct dc *dc, - struct dc_state *dst_ctx) -{ - dc_resource_state_copy_construct(dc->current_state, dst_ctx); -} - - -void dc_resource_state_construct( - const struct dc *dc, - struct dc_state *dst_ctx) -{ - dst_ctx->clk_mgr = dc->clk_mgr; - - /* Initialise DIG link encoder resource tracking variables. */ - if (dc->res_pool) - link_enc_cfg_init(dc, dst_ctx); -} - - bool dc_resource_is_dsc_encoding_supported(const struct dc *dc) { if (dc->res_pool == NULL) @@ -3822,6 +3574,31 @@ static bool planes_changed_for_existing_stream(struct dc_state *context, return false; } +static bool add_all_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *stream, + const struct dc_validation_set set[], + int set_count, + struct dc_state *state) +{ + int i, j; + + for (i = 0; i < set_count; i++) + if (set[i].stream == stream) + break; + + if (i == set_count) { + dm_error("Stream %p not found in set!\n", stream); + return false; + } + + for (j = 0; j < set[i].plane_count; j++) + if (!dc_state_add_plane(dc, stream, set[i].plane_states[j], state)) + return false; + + return true; +} + /** * dc_validate_with_context - Validate and update the potential new stream in the context object * @@ -3927,7 +3704,8 @@ enum dc_status dc_validate_with_context(struct dc *dc, unchanged_streams[i], set, set_count)) { - if (!dc_rem_all_planes_for_stream(dc, + + if (!dc_state_rem_all_planes_for_stream(dc, unchanged_streams[i], context)) { res = DC_FAIL_DETACH_SURFACES; @@ -3949,12 +3727,24 @@ enum dc_status dc_validate_with_context(struct dc *dc, } } - if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) { - res = DC_FAIL_DETACH_SURFACES; - goto fail; + if (dc_state_get_stream_subvp_type(context, del_streams[i]) == SUBVP_PHANTOM) { + /* remove phantoms specifically */ + if (!dc_state_rem_all_phantom_planes_for_stream(dc, del_streams[i], context, true)) { + res = DC_FAIL_DETACH_SURFACES; + goto fail; + } + + res = dc_state_remove_phantom_stream(dc, context, del_streams[i]); + dc_state_release_phantom_stream(dc, context, del_streams[i]); + } else { + if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) { + res = DC_FAIL_DETACH_SURFACES; + goto fail; + } + + res = dc_state_remove_stream(dc, context, del_streams[i]); } - res = dc_remove_stream_from_ctx(dc, context, del_streams[i]); if (res != DC_OK) goto fail; } @@ -3977,7 +3767,7 @@ enum dc_status dc_validate_with_context(struct dc *dc, /* Add new streams and then add all planes for the new stream */ for (i = 0; i < add_streams_count; i++) { calculate_phy_pix_clks(add_streams[i]); - res = dc_add_stream_to_ctx(dc, context, add_streams[i]); + res = dc_state_add_stream(dc, context, add_streams[i]); if (res != DC_OK) goto fail; @@ -4483,84 +4273,6 @@ static void set_vtem_info_packet( *info_packet = stream->vtem_infopacket; } -void dc_resource_state_destruct(struct dc_state *context) -{ - int i, j; - - for (i = 0; i < context->stream_count; i++) { - for (j = 0; j < context->stream_status[i].plane_count; j++) - dc_plane_state_release( - context->stream_status[i].plane_states[j]); - - context->stream_status[i].plane_count = 0; - dc_stream_release(context->streams[i]); - context->streams[i] = NULL; - } - context->stream_count = 0; - context->stream_mask = 0; - memset(&context->res_ctx, 0, sizeof(context->res_ctx)); - memset(&context->pp_display_cfg, 0, sizeof(context->pp_display_cfg)); - memset(&context->dcn_bw_vars, 0, sizeof(context->dcn_bw_vars)); - context->clk_mgr = NULL; - memset(&context->bw_ctx.bw, 0, sizeof(context->bw_ctx.bw)); - memset(context->block_sequence, 0, sizeof(context->block_sequence)); - context->block_sequence_steps = 0; - memset(context->dc_dmub_cmd, 0, sizeof(context->dc_dmub_cmd)); - context->dmub_cmd_count = 0; - memset(&context->perf_params, 0, sizeof(context->perf_params)); - memset(&context->scratch, 0, sizeof(context->scratch)); -} - -void dc_resource_state_copy_construct( - const struct dc_state *src_ctx, - struct dc_state *dst_ctx) -{ - int i, j; - struct kref refcount = dst_ctx->refcount; -#ifdef CONFIG_DRM_AMD_DC_FP - struct dml2_context *dml2 = NULL; - - // Need to preserve allocated dml2 context - if (src_ctx->clk_mgr && src_ctx->clk_mgr->ctx->dc->debug.using_dml2) - dml2 = dst_ctx->bw_ctx.dml2; -#endif - - *dst_ctx = *src_ctx; - -#ifdef CONFIG_DRM_AMD_DC_FP - // Preserve allocated dml2 context - if (src_ctx->clk_mgr && src_ctx->clk_mgr->ctx->dc->debug.using_dml2) - dst_ctx->bw_ctx.dml2 = dml2; -#endif - - for (i = 0; i < MAX_PIPES; i++) { - struct pipe_ctx *cur_pipe = &dst_ctx->res_ctx.pipe_ctx[i]; - - if (cur_pipe->top_pipe) - cur_pipe->top_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; - - if (cur_pipe->bottom_pipe) - cur_pipe->bottom_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; - - if (cur_pipe->next_odm_pipe) - cur_pipe->next_odm_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; - - if (cur_pipe->prev_odm_pipe) - cur_pipe->prev_odm_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; - } - - for (i = 0; i < dst_ctx->stream_count; i++) { - dc_stream_retain(dst_ctx->streams[i]); - for (j = 0; j < dst_ctx->stream_status[i].plane_count; j++) - dc_plane_state_retain( - dst_ctx->stream_status[i].plane_states[j]); - } - - /* context refcount should not be overridden */ - dst_ctx->refcount = refcount; - -} - struct clock_source *dc_resource_find_first_free_pll( struct resource_context *res_ctx, const struct resource_pool *pool) @@ -4740,7 +4452,7 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream, option = DITHER_OPTION_SPATIAL8; break; case COLOR_DEPTH_101010: - option = DITHER_OPTION_SPATIAL10; + option = DITHER_OPTION_TRUN10; break; default: option = DITHER_OPTION_DISABLE; @@ -4766,6 +4478,8 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream, option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) { fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; fmt_bit_depth->flags.TRUNCATE_DEPTH = 2; + if (option == DITHER_OPTION_TRUN10) + fmt_bit_depth->flags.TRUNCATE_MODE = 1; } /* special case - Formatter can only reduce by 4 bits at most. @@ -5283,7 +4997,7 @@ bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_st if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 && ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120) return true; - else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 2160 && + else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 1080 && ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120) return true; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c new file mode 100644 index 0000000000..61986e5cb4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -0,0 +1,880 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "core_types.h" +#include "core_status.h" +#include "dc_state.h" +#include "dc_state_priv.h" +#include "dc_stream_priv.h" +#include "dc_plane_priv.h" + +#include "dm_services.h" +#include "resource.h" +#include "link_enc_cfg.h" + +#include "dml2/dml2_wrapper.h" +#include "dml2/dml2_internal_types.h" + +#define DC_LOGGER \ + dc->ctx->logger +#define DC_LOGGER_INIT(logger) + +/* Private dc_state helper functions */ +static bool dc_state_track_phantom_stream(struct dc_state *state, + struct dc_stream_state *phantom_stream) +{ + if (state->phantom_stream_count >= MAX_PHANTOM_PIPES) + return false; + + state->phantom_streams[state->phantom_stream_count++] = phantom_stream; + + return true; +} + +static bool dc_state_untrack_phantom_stream(struct dc_state *state, struct dc_stream_state *phantom_stream) +{ + bool res = false; + int i; + + /* first find phantom stream in the dc_state */ + for (i = 0; i < state->phantom_stream_count; i++) { + if (state->phantom_streams[i] == phantom_stream) { + state->phantom_streams[i] = NULL; + res = true; + break; + } + } + + /* failed to find stream in state */ + if (!res) + return res; + + /* trim back phantom streams */ + state->phantom_stream_count--; + for (; i < state->phantom_stream_count; i++) + state->phantom_streams[i] = state->phantom_streams[i + 1]; + + return res; +} + +static bool dc_state_is_phantom_stream_tracked(struct dc_state *state, struct dc_stream_state *phantom_stream) +{ + int i; + + for (i = 0; i < state->phantom_stream_count; i++) { + if (state->phantom_streams[i] == phantom_stream) + return true; + } + + return false; +} + +static bool dc_state_track_phantom_plane(struct dc_state *state, + struct dc_plane_state *phantom_plane) +{ + if (state->phantom_plane_count >= MAX_PHANTOM_PIPES) + return false; + + state->phantom_planes[state->phantom_plane_count++] = phantom_plane; + + return true; +} + +static bool dc_state_untrack_phantom_plane(struct dc_state *state, struct dc_plane_state *phantom_plane) +{ + bool res = false; + int i; + + /* first find phantom plane in the dc_state */ + for (i = 0; i < state->phantom_plane_count; i++) { + if (state->phantom_planes[i] == phantom_plane) { + state->phantom_planes[i] = NULL; + res = true; + break; + } + } + + /* failed to find plane in state */ + if (!res) + return res; + + /* trim back phantom planes */ + state->phantom_plane_count--; + for (; i < state->phantom_plane_count; i++) + state->phantom_planes[i] = state->phantom_planes[i + 1]; + + return res; +} + +static bool dc_state_is_phantom_plane_tracked(struct dc_state *state, struct dc_plane_state *phantom_plane) +{ + int i; + + for (i = 0; i < state->phantom_plane_count; i++) { + if (state->phantom_planes[i] == phantom_plane) + return true; + } + + return false; +} + +static void dc_state_copy_internal(struct dc_state *dst_state, struct dc_state *src_state) +{ + int i, j; + + memcpy(dst_state, src_state, sizeof(struct dc_state)); + + for (i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *cur_pipe = &dst_state->res_ctx.pipe_ctx[i]; + + if (cur_pipe->top_pipe) + cur_pipe->top_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; + + if (cur_pipe->bottom_pipe) + cur_pipe->bottom_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; + + if (cur_pipe->prev_odm_pipe) + cur_pipe->prev_odm_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; + + if (cur_pipe->next_odm_pipe) + cur_pipe->next_odm_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; + } + + /* retain phantoms */ + for (i = 0; i < dst_state->phantom_stream_count; i++) + dc_stream_retain(dst_state->phantom_streams[i]); + + for (i = 0; i < dst_state->phantom_plane_count; i++) + dc_plane_state_retain(dst_state->phantom_planes[i]); + + /* retain streams and planes */ + for (i = 0; i < dst_state->stream_count; i++) { + dc_stream_retain(dst_state->streams[i]); + for (j = 0; j < dst_state->stream_status[i].plane_count; j++) + dc_plane_state_retain( + dst_state->stream_status[i].plane_states[j]); + } + +} + +static void init_state(struct dc *dc, struct dc_state *state) +{ + /* Each context must have their own instance of VBA and in order to + * initialize and obtain IP and SOC the base DML instance from DC is + * initially copied into every context + */ + memcpy(&state->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); +} + +/* Public dc_state functions */ +struct dc_state *dc_state_create(struct dc *dc) +{ + struct dc_state *state = kvzalloc(sizeof(struct dc_state), + GFP_KERNEL); + + if (!state) + return NULL; + + init_state(dc, state); + dc_state_construct(dc, state); + +#ifdef CONFIG_DRM_AMD_DC_FP + if (dc->debug.using_dml2) + dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2); +#endif + + kref_init(&state->refcount); + + return state; +} + +void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state) +{ + struct kref refcount = dst_state->refcount; +#ifdef CONFIG_DRM_AMD_DC_FP + struct dml2_context *dst_dml2 = dst_state->bw_ctx.dml2; +#endif + + dc_state_copy_internal(dst_state, src_state); + +#ifdef CONFIG_DRM_AMD_DC_FP + dst_state->bw_ctx.dml2 = dst_dml2; + if (src_state->bw_ctx.dml2) + dml2_copy(dst_state->bw_ctx.dml2, src_state->bw_ctx.dml2); +#endif + + /* context refcount should not be overridden */ + dst_state->refcount = refcount; +} + +struct dc_state *dc_state_create_copy(struct dc_state *src_state) +{ + struct dc_state *new_state; + + new_state = kvmalloc(sizeof(struct dc_state), + GFP_KERNEL); + if (!new_state) + return NULL; + + dc_state_copy_internal(new_state, src_state); + +#ifdef CONFIG_DRM_AMD_DC_FP + if (src_state->bw_ctx.dml2 && + !dml2_create_copy(&new_state->bw_ctx.dml2, src_state->bw_ctx.dml2)) { + dc_state_release(new_state); + return NULL; + } +#endif + + kref_init(&new_state->refcount); + + return new_state; +} + +void dc_state_copy_current(struct dc *dc, struct dc_state *dst_state) +{ + dc_state_copy(dst_state, dc->current_state); +} + +struct dc_state *dc_state_create_current_copy(struct dc *dc) +{ + return dc_state_create_copy(dc->current_state); +} + +void dc_state_construct(struct dc *dc, struct dc_state *state) +{ + state->clk_mgr = dc->clk_mgr; + + /* Initialise DIG link encoder resource tracking variables. */ + if (dc->res_pool) + link_enc_cfg_init(dc, state); +} + +void dc_state_destruct(struct dc_state *state) +{ + int i, j; + + for (i = 0; i < state->stream_count; i++) { + for (j = 0; j < state->stream_status[i].plane_count; j++) + dc_plane_state_release( + state->stream_status[i].plane_states[j]); + + state->stream_status[i].plane_count = 0; + dc_stream_release(state->streams[i]); + state->streams[i] = NULL; + } + state->stream_count = 0; + + /* release tracked phantoms */ + for (i = 0; i < state->phantom_stream_count; i++) { + dc_stream_release(state->phantom_streams[i]); + state->phantom_streams[i] = NULL; + } + state->phantom_stream_count = 0; + + for (i = 0; i < state->phantom_plane_count; i++) { + dc_plane_state_release(state->phantom_planes[i]); + state->phantom_planes[i] = NULL; + } + state->phantom_plane_count = 0; + + state->stream_mask = 0; + memset(&state->res_ctx, 0, sizeof(state->res_ctx)); + memset(&state->pp_display_cfg, 0, sizeof(state->pp_display_cfg)); + memset(&state->dcn_bw_vars, 0, sizeof(state->dcn_bw_vars)); + state->clk_mgr = NULL; + memset(&state->bw_ctx.bw, 0, sizeof(state->bw_ctx.bw)); + memset(state->block_sequence, 0, sizeof(state->block_sequence)); + state->block_sequence_steps = 0; + memset(state->dc_dmub_cmd, 0, sizeof(state->dc_dmub_cmd)); + state->dmub_cmd_count = 0; + memset(&state->perf_params, 0, sizeof(state->perf_params)); + memset(&state->scratch, 0, sizeof(state->scratch)); +} + +void dc_state_retain(struct dc_state *state) +{ + kref_get(&state->refcount); +} + +static void dc_state_free(struct kref *kref) +{ + struct dc_state *state = container_of(kref, struct dc_state, refcount); + + dc_state_destruct(state); + +#ifdef CONFIG_DRM_AMD_DC_FP + dml2_destroy(state->bw_ctx.dml2); + state->bw_ctx.dml2 = 0; +#endif + + kvfree(state); +} + +void dc_state_release(struct dc_state *state) +{ + if (state != NULL) + kref_put(&state->refcount, dc_state_free); +} +/* + * dc_state_add_stream() - Add a new dc_stream_state to a dc_state. + */ +enum dc_status dc_state_add_stream( + struct dc *dc, + struct dc_state *state, + struct dc_stream_state *stream) +{ + enum dc_status res; + + DC_LOGGER_INIT(dc->ctx->logger); + + if (state->stream_count >= dc->res_pool->timing_generator_count) { + DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream); + return DC_ERROR_UNEXPECTED; + } + + state->streams[state->stream_count] = stream; + dc_stream_retain(stream); + state->stream_count++; + + res = resource_add_otg_master_for_stream_output( + state, dc->res_pool, stream); + if (res != DC_OK) + DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res); + + return res; +} + +/* + * dc_state_remove_stream() - Remove a stream from a dc_state. + */ +enum dc_status dc_state_remove_stream( + struct dc *dc, + struct dc_state *state, + struct dc_stream_state *stream) +{ + int i; + struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream( + &state->res_ctx, stream); + + if (!del_pipe) { + dm_error("Pipe not found for stream %p !\n", stream); + return DC_ERROR_UNEXPECTED; + } + + resource_update_pipes_for_stream_with_slice_count(state, + dc->current_state, dc->res_pool, stream, 1); + resource_remove_otg_master_for_stream_output( + state, dc->res_pool, stream); + + for (i = 0; i < state->stream_count; i++) + if (state->streams[i] == stream) + break; + + if (state->streams[i] != stream) { + dm_error("Context doesn't have stream %p !\n", stream); + return DC_ERROR_UNEXPECTED; + } + + dc_stream_release(state->streams[i]); + state->stream_count--; + + /* Trim back arrays */ + for (; i < state->stream_count; i++) { + state->streams[i] = state->streams[i + 1]; + state->stream_status[i] = state->stream_status[i + 1]; + } + + state->streams[state->stream_count] = NULL; + memset( + &state->stream_status[state->stream_count], + 0, + sizeof(state->stream_status[0])); + + return DC_OK; +} + +bool dc_state_add_plane( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_plane_state *plane_state, + struct dc_state *state) +{ + struct resource_pool *pool = dc->res_pool; + struct pipe_ctx *otg_master_pipe; + struct dc_stream_status *stream_status = NULL; + bool added = false; + + stream_status = dc_state_get_stream_status(state, stream); + if (stream_status == NULL) { + dm_error("Existing stream not found; failed to attach surface!\n"); + goto out; + } else if (stream_status->plane_count == MAX_SURFACE_NUM) { + dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n", + plane_state, MAX_SURFACE_NUM); + goto out; + } + + if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm) + /* ODM combine could prevent us from supporting more planes + * we will reset ODM slice count back to 1 when all planes have + * been removed to maximize the amount of planes supported when + * new planes are added. + */ + resource_update_pipes_for_stream_with_slice_count( + state, dc->current_state, dc->res_pool, stream, 1); + + otg_master_pipe = resource_get_otg_master_for_stream( + &state->res_ctx, stream); + if (otg_master_pipe) + added = resource_append_dpp_pipes_for_plane_composition(state, + dc->current_state, pool, otg_master_pipe, plane_state); + + if (added) { + stream_status->plane_states[stream_status->plane_count] = + plane_state; + stream_status->plane_count++; + dc_plane_state_retain(plane_state); + } + +out: + return added; +} + +bool dc_state_remove_plane( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_plane_state *plane_state, + struct dc_state *state) +{ + int i; + struct dc_stream_status *stream_status = NULL; + struct resource_pool *pool = dc->res_pool; + + if (!plane_state) + return true; + + for (i = 0; i < state->stream_count; i++) + if (state->streams[i] == stream) { + stream_status = &state->stream_status[i]; + break; + } + + if (stream_status == NULL) { + dm_error("Existing stream not found; failed to remove plane.\n"); + return false; + } + + resource_remove_dpp_pipes_for_plane_composition( + state, pool, plane_state); + + for (i = 0; i < stream_status->plane_count; i++) { + if (stream_status->plane_states[i] == plane_state) { + dc_plane_state_release(stream_status->plane_states[i]); + break; + } + } + + if (i == stream_status->plane_count) { + dm_error("Existing plane_state not found; failed to detach it!\n"); + return false; + } + + stream_status->plane_count--; + + /* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */ + for (; i < stream_status->plane_count; i++) + stream_status->plane_states[i] = stream_status->plane_states[i + 1]; + + stream_status->plane_states[stream_status->plane_count] = NULL; + + if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm) + /* ODM combine could prevent us from supporting more planes + * we will reset ODM slice count back to 1 when all planes have + * been removed to maximize the amount of planes supported when + * new planes are added. + */ + resource_update_pipes_for_stream_with_slice_count( + state, dc->current_state, dc->res_pool, stream, 1); + + return true; +} + +/** + * dc_state_rem_all_planes_for_stream - Remove planes attached to the target stream. + * + * @dc: Current dc state. + * @stream: Target stream, which we want to remove the attached plans. + * @state: context from which the planes are to be removed. + * + * Return: + * Return true if DC was able to remove all planes from the target + * stream, otherwise, return false. + */ +bool dc_state_rem_all_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_state *state) +{ + int i, old_plane_count; + struct dc_stream_status *stream_status = NULL; + struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 }; + + for (i = 0; i < state->stream_count; i++) + if (state->streams[i] == stream) { + stream_status = &state->stream_status[i]; + break; + } + + if (stream_status == NULL) { + dm_error("Existing stream %p not found!\n", stream); + return false; + } + + old_plane_count = stream_status->plane_count; + + for (i = 0; i < old_plane_count; i++) + del_planes[i] = stream_status->plane_states[i]; + + for (i = 0; i < old_plane_count; i++) + if (!dc_state_remove_plane(dc, stream, del_planes[i], state)) + return false; + + return true; +} + +bool dc_state_add_all_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_plane_state * const *plane_states, + int plane_count, + struct dc_state *state) +{ + int i; + bool result = true; + + for (i = 0; i < plane_count; i++) + if (!dc_state_add_plane(dc, stream, plane_states[i], state)) { + result = false; + break; + } + + return result; +} + +/* Private dc_state functions */ + +/** + * dc_state_get_stream_status - Get stream status from given dc state + * @state: DC state to find the stream status in + * @stream: The stream to get the stream status for + * + * The given stream is expected to exist in the given dc state. Otherwise, NULL + * will be returned. + */ +struct dc_stream_status *dc_state_get_stream_status( + struct dc_state *state, + struct dc_stream_state *stream) +{ + uint8_t i; + + if (state == NULL) + return NULL; + + for (i = 0; i < state->stream_count; i++) { + if (stream == state->streams[i]) + return &state->stream_status[i]; + } + + return NULL; +} + +enum mall_stream_type dc_state_get_pipe_subvp_type(const struct dc_state *state, + const struct pipe_ctx *pipe_ctx) +{ + return dc_state_get_stream_subvp_type(state, pipe_ctx->stream); +} + +enum mall_stream_type dc_state_get_stream_subvp_type(const struct dc_state *state, + const struct dc_stream_state *stream) +{ + int i; + + enum mall_stream_type type = SUBVP_NONE; + + for (i = 0; i < state->stream_count; i++) { + if (state->streams[i] == stream) { + type = state->stream_status[i].mall_stream_config.type; + break; + } + } + + return type; +} + +struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state *state, + const struct dc_stream_state *stream) +{ + int i; + + struct dc_stream_state *paired_stream = NULL; + + for (i = 0; i < state->stream_count; i++) { + if (state->streams[i] == stream) { + paired_stream = state->stream_status[i].mall_stream_config.paired_stream; + break; + } + } + + return paired_stream; +} + +struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *main_stream) +{ + struct dc_stream_state *phantom_stream; + + DC_LOGGER_INIT(dc->ctx->logger); + + phantom_stream = dc_create_stream_for_sink(main_stream->sink); + + if (!phantom_stream) { + DC_LOG_ERROR("Failed to allocate phantom stream.\n"); + return NULL; + } + + /* track phantom stream in dc_state */ + dc_state_track_phantom_stream(state, phantom_stream); + + phantom_stream->is_phantom = true; + phantom_stream->signal = SIGNAL_TYPE_VIRTUAL; + phantom_stream->dpms_off = true; + + return phantom_stream; +} + +void dc_state_release_phantom_stream(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *phantom_stream) +{ + DC_LOGGER_INIT(dc->ctx->logger); + + if (!dc_state_untrack_phantom_stream(state, phantom_stream)) { + DC_LOG_ERROR("Failed to free phantom stream %p in dc state %p.\n", phantom_stream, state); + return; + } + + dc_stream_release(phantom_stream); +} + +struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc, + struct dc_state *state, + struct dc_plane_state *main_plane) +{ + struct dc_plane_state *phantom_plane = dc_create_plane_state(dc); + + DC_LOGGER_INIT(dc->ctx->logger); + + if (!phantom_plane) { + DC_LOG_ERROR("Failed to allocate phantom plane.\n"); + return NULL; + } + + /* track phantom inside dc_state */ + dc_state_track_phantom_plane(state, phantom_plane); + + phantom_plane->is_phantom = true; + + return phantom_plane; +} + +void dc_state_release_phantom_plane(const struct dc *dc, + struct dc_state *state, + struct dc_plane_state *phantom_plane) +{ + DC_LOGGER_INIT(dc->ctx->logger); + + if (!dc_state_untrack_phantom_plane(state, phantom_plane)) { + DC_LOG_ERROR("Failed to free phantom plane %p in dc state %p.\n", phantom_plane, state); + return; + } + + dc_plane_state_release(phantom_plane); +} + +/* add phantom streams to context and generate correct meta inside dc_state */ +enum dc_status dc_state_add_phantom_stream(struct dc *dc, + struct dc_state *state, + struct dc_stream_state *phantom_stream, + struct dc_stream_state *main_stream) +{ + struct dc_stream_status *main_stream_status; + struct dc_stream_status *phantom_stream_status; + enum dc_status res = dc_state_add_stream(dc, state, phantom_stream); + + /* check if stream is tracked */ + if (res == DC_OK && !dc_state_is_phantom_stream_tracked(state, phantom_stream)) { + /* stream must be tracked if added to state */ + dc_state_track_phantom_stream(state, phantom_stream); + } + + /* setup subvp meta */ + main_stream_status = dc_state_get_stream_status(state, main_stream); + phantom_stream_status = dc_state_get_stream_status(state, phantom_stream); + phantom_stream_status->mall_stream_config.type = SUBVP_PHANTOM; + phantom_stream_status->mall_stream_config.paired_stream = main_stream; + main_stream_status->mall_stream_config.type = SUBVP_MAIN; + main_stream_status->mall_stream_config.paired_stream = phantom_stream; + + return res; +} + +enum dc_status dc_state_remove_phantom_stream(struct dc *dc, + struct dc_state *state, + struct dc_stream_state *phantom_stream) +{ + struct dc_stream_status *main_stream_status; + struct dc_stream_status *phantom_stream_status; + + /* reset subvp meta */ + phantom_stream_status = dc_state_get_stream_status(state, phantom_stream); + main_stream_status = dc_state_get_stream_status(state, phantom_stream_status->mall_stream_config.paired_stream); + phantom_stream_status->mall_stream_config.type = SUBVP_NONE; + phantom_stream_status->mall_stream_config.paired_stream = NULL; + if (main_stream_status) { + main_stream_status->mall_stream_config.type = SUBVP_NONE; + main_stream_status->mall_stream_config.paired_stream = NULL; + } + + /* remove stream from state */ + return dc_state_remove_stream(dc, state, phantom_stream); +} + +bool dc_state_add_phantom_plane( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_plane_state *phantom_plane, + struct dc_state *state) +{ + bool res = dc_state_add_plane(dc, phantom_stream, phantom_plane, state); + + /* check if stream is tracked */ + if (res && !dc_state_is_phantom_plane_tracked(state, phantom_plane)) { + /* stream must be tracked if added to state */ + dc_state_track_phantom_plane(state, phantom_plane); + } + + return res; +} + +bool dc_state_remove_phantom_plane( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_plane_state *phantom_plane, + struct dc_state *state) +{ + return dc_state_remove_plane(dc, phantom_stream, phantom_plane, state); +} + +bool dc_state_rem_all_phantom_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_state *state, + bool should_release_planes) +{ + int i, old_plane_count; + struct dc_stream_status *stream_status = NULL; + struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 }; + + for (i = 0; i < state->stream_count; i++) + if (state->streams[i] == phantom_stream) { + stream_status = &state->stream_status[i]; + break; + } + + if (stream_status == NULL) { + dm_error("Existing stream %p not found!\n", phantom_stream); + return false; + } + + old_plane_count = stream_status->plane_count; + + for (i = 0; i < old_plane_count; i++) + del_planes[i] = stream_status->plane_states[i]; + + for (i = 0; i < old_plane_count; i++) { + if (!dc_state_remove_plane(dc, phantom_stream, del_planes[i], state)) + return false; + if (should_release_planes) + dc_state_release_phantom_plane(dc, state, del_planes[i]); + } + + return true; +} + +bool dc_state_add_all_phantom_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_plane_state * const *phantom_planes, + int plane_count, + struct dc_state *state) +{ + return dc_state_add_all_planes_for_stream(dc, phantom_stream, phantom_planes, plane_count, state); +} + +bool dc_state_remove_phantom_streams_and_planes( + struct dc *dc, + struct dc_state *state) +{ + int i; + bool removed_phantom = false; + struct dc_stream_state *phantom_stream = NULL; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state && pipe->stream && dc_state_get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) { + phantom_stream = pipe->stream; + + dc_state_rem_all_phantom_planes_for_stream(dc, phantom_stream, state, false); + dc_state_remove_phantom_stream(dc, state, phantom_stream); + removed_phantom = true; + } + } + return removed_phantom; +} + +void dc_state_release_phantom_streams_and_planes( + struct dc *dc, + struct dc_state *state) +{ + int i; + + for (i = 0; i < state->phantom_stream_count; i++) + dc_state_release_phantom_stream(dc, state, state->phantom_streams[i]); + + for (i = 0; i < state->phantom_plane_count; i++) + dc_state_release_phantom_plane(dc, state, state->phantom_planes[i]); +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 4bdf105d1d..51a970fcb5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -31,6 +31,8 @@ #include "ipp.h" #include "timing_generator.h" #include "dc_dmub_srv.h" +#include "dc_state_priv.h" +#include "dc_stream_priv.h" #define DC_LOGGER dc->ctx->logger @@ -54,7 +56,7 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink) } } -static bool dc_stream_construct(struct dc_stream_state *stream, +bool dc_stream_construct(struct dc_stream_state *stream, struct dc_sink *dc_sink_data) { uint32_t i = 0; @@ -121,13 +123,12 @@ static bool dc_stream_construct(struct dc_stream_state *stream, } stream->out_transfer_func->type = TF_TYPE_BYPASS; - stream->stream_id = stream->ctx->dc_stream_id_count; - stream->ctx->dc_stream_id_count++; + dc_stream_assign_stream_id(stream); return true; } -static void dc_stream_destruct(struct dc_stream_state *stream) +void dc_stream_destruct(struct dc_stream_state *stream) { dc_sink_release(stream->sink); if (stream->out_transfer_func != NULL) { @@ -136,6 +137,13 @@ static void dc_stream_destruct(struct dc_stream_state *stream) } } +void dc_stream_assign_stream_id(struct dc_stream_state *stream) +{ + /* MSB is reserved to indicate phantoms */ + stream->stream_id = stream->ctx->dc_stream_id_count; + stream->ctx->dc_stream_id_count++; +} + void dc_stream_retain(struct dc_stream_state *stream) { kref_get(&stream->refcount); @@ -196,8 +204,7 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream) if (new_stream->out_transfer_func) dc_transfer_func_retain(new_stream->out_transfer_func); - new_stream->stream_id = new_stream->ctx->dc_stream_id_count; - new_stream->ctx->dc_stream_id_count++; + dc_stream_assign_stream_id(new_stream); /* If using dynamic encoder assignment, wait till stream committed to assign encoder. */ if (new_stream->ctx->dc->res_pool->funcs->link_encs_assign) @@ -208,31 +215,6 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream) return new_stream; } -/** - * dc_stream_get_status_from_state - Get stream status from given dc state - * @state: DC state to find the stream status in - * @stream: The stream to get the stream status for - * - * The given stream is expected to exist in the given dc state. Otherwise, NULL - * will be returned. - */ -struct dc_stream_status *dc_stream_get_status_from_state( - struct dc_state *state, - struct dc_stream_state *stream) -{ - uint8_t i; - - if (state == NULL) - return NULL; - - for (i = 0; i < state->stream_count; i++) { - if (stream == state->streams[i]) - return &state->stream_status[i]; - } - - return NULL; -} - /** * dc_stream_get_status() - Get current stream status of the given stream state * @stream: The stream to get the stream status for. @@ -244,7 +226,7 @@ struct dc_stream_status *dc_stream_get_status( struct dc_stream_state *stream) { struct dc *dc = stream->ctx->dc; - return dc_stream_get_status_from_state(dc->current_state, stream); + return dc_state_get_stream_status(dc->current_state, stream); } static void program_cursor_attributes( @@ -441,6 +423,8 @@ bool dc_stream_add_writeback(struct dc *dc, return false; } + dc_exit_ips_for_hw_access(dc); + wb_info->dwb_params.out_transfer_func = stream->out_transfer_func; dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; @@ -465,16 +449,37 @@ bool dc_stream_add_writeback(struct dc *dc, if (dc->hwss.enable_writeback) { struct dc_stream_status *stream_status = dc_stream_get_status(stream); struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; - dwb->otg_inst = stream_status->primary_otg_inst; + if (stream_status) + dwb->otg_inst = stream_status->primary_otg_inst; + } + + if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { + dm_error("DC: update_bandwidth failed!\n"); + return false; + } + + /* enable writeback */ + if (dc->hwss.enable_writeback) { + struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; + + if (dwb->funcs->is_enabled(dwb)) { + /* writeback pipe already enabled, only need to update */ + dc->hwss.update_writeback(dc, wb_info, dc->current_state); + } else { + /* Enable writeback pipe from scratch*/ + dc->hwss.enable_writeback(dc, wb_info, dc->current_state); + } } + return true; } -bool dc_stream_remove_writeback(struct dc *dc, +bool dc_stream_fc_disable_writeback(struct dc *dc, struct dc_stream_state *stream, uint32_t dwb_pipe_inst) { - int i = 0, j = 0; + struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst]; + if (stream == NULL) { dm_error("DC: dc_stream is NULL!\n"); return false; @@ -490,27 +495,67 @@ bool dc_stream_remove_writeback(struct dc *dc, return false; } -// stream->writeback_info[dwb_pipe_inst].wb_enabled = false; - for (i = 0; i < stream->num_wb_info; i++) { - /*dynamic update*/ - if (stream->writeback_info[i].wb_enabled && - stream->writeback_info[i].dwb_pipe_inst == dwb_pipe_inst) { - stream->writeback_info[i].wb_enabled = false; - } + dc_exit_ips_for_hw_access(dc); + + if (dwb->funcs->set_fc_enable) + dwb->funcs->set_fc_enable(dwb, DWB_FRAME_CAPTURE_DISABLE); + + return true; +} + +bool dc_stream_remove_writeback(struct dc *dc, + struct dc_stream_state *stream, + uint32_t dwb_pipe_inst) +{ + int i = 0, j = 0; + if (stream == NULL) { + dm_error("DC: dc_stream is NULL!\n"); + return false; + } + + if (dwb_pipe_inst >= MAX_DWB_PIPES) { + dm_error("DC: writeback pipe is invalid!\n"); + return false; + } + + if (stream->num_wb_info > MAX_DWB_PIPES) { + dm_error("DC: num_wb_info is invalid!\n"); + return false; } /* remove writeback info for disabled writeback pipes from stream */ for (i = 0, j = 0; i < stream->num_wb_info; i++) { if (stream->writeback_info[i].wb_enabled) { - if (j < i) - /* trim the array */ + + if (stream->writeback_info[i].dwb_pipe_inst == dwb_pipe_inst) + stream->writeback_info[i].wb_enabled = false; + + /* trim the array */ + if (j < i) { memcpy(&stream->writeback_info[j], &stream->writeback_info[i], sizeof(struct dc_writeback_info)); - j++; + j++; + } } } stream->num_wb_info = j; + /* recalculate and apply DML parameters */ + if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { + dm_error("DC: update_bandwidth failed!\n"); + return false; + } + + dc_exit_ips_for_hw_access(dc); + + /* disable writeback */ + if (dc->hwss.disable_writeback) { + struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst]; + + if (dwb->funcs->is_enabled(dwb)) + dc->hwss.disable_writeback(dc, dwb_pipe_inst); + } + return true; } @@ -518,6 +563,8 @@ bool dc_stream_warmup_writeback(struct dc *dc, int num_dwb, struct dc_writeback_info *wb_info) { + dc_exit_ips_for_hw_access(dc); + if (dc->hwss.mmhubbub_warmup) return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info); else @@ -530,6 +577,8 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream) struct resource_context *res_ctx = &dc->current_state->res_ctx; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; @@ -558,6 +607,8 @@ bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream, dc = stream->ctx->dc; res_ctx = &dc->current_state->res_ctx; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; @@ -589,6 +640,8 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream, struct resource_context *res_ctx = &dc->current_state->res_ctx; + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < MAX_PIPES; i++) { struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; @@ -625,6 +678,8 @@ bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream) if (i == MAX_PIPES) return true; + dc_exit_ips_for_hw_access(dc); + return dc->hwss.dmdata_status_done(pipe); } @@ -659,6 +714,8 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc, pipe_ctx->stream->dmdata_address = attr->address; + dc_exit_ips_for_hw_access(dc); + dc->hwss.program_dmdata_engine(pipe_ctx); if (hubp->funcs->dmdata_set_attributes != NULL && diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index a80e453007..19140fb657 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -32,10 +32,12 @@ #include "transform.h" #include "dpp.h" +#include "dc_plane_priv.h" + /******************************************************************************* * Private functions ******************************************************************************/ -static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state) +void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state) { plane_state->ctx = ctx; @@ -63,7 +65,7 @@ static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *pl } -static void dc_plane_destruct(struct dc_plane_state *plane_state) +void dc_plane_destruct(struct dc_plane_state *plane_state) { if (plane_state->gamma_correction != NULL) { dc_gamma_release(&plane_state->gamma_correction); @@ -159,6 +161,8 @@ const struct dc_plane_status *dc_plane_get_status( break; } + dc_exit_ips_for_hw_access(dc); + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 8164a53404..fc60fa5814 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -27,6 +27,8 @@ #define DC_INTERFACE_H_ #include "dc_types.h" +#include "dc_state.h" +#include "dc_plane.h" #include "grph_object_defs.h" #include "logger_types.h" #include "hdcp_msg_types.h" @@ -49,7 +51,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.259" +#define DC_VER "3.2.266" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -432,6 +434,7 @@ struct dc_config { bool EnableMinDispClkODM; bool enable_auto_dpm_test_logs; unsigned int disable_ips; + unsigned int disable_ips_in_vpb; }; enum visual_confirm { @@ -461,6 +464,12 @@ enum dml_hostvm_override_opts { DML_HOSTVM_OVERRIDE_TRUE = 0x2, }; +enum dc_replay_power_opts { + replay_power_opt_invalid = 0x0, + replay_power_opt_smu_opt_static_screen = 0x1, + replay_power_opt_z10_static_screen = 0x10, +}; + enum dcc_option { DCC_ENABLE = 0, DCC_DISABLE = 1, @@ -956,7 +965,6 @@ struct dc_debug_options { unsigned int min_prefetch_in_strobe_ns; bool disable_unbounded_requesting; bool dig_fifo_off_in_blank; - bool temp_mst_deallocation_sequence; bool override_dispclk_programming; bool otg_crc_db; bool disallow_dispclk_dppclk_ds; @@ -979,6 +987,10 @@ struct dc_debug_options { bool psp_disabled_wa; unsigned int ips2_eval_delay_us; unsigned int ips2_entry_delay_us; + bool disable_dmub_reallow_idle; + bool disable_timeout; + bool disable_extblankadj; + unsigned int static_screen_wait_frames; }; struct gpu_info_soc_bounding_box_v1_0; @@ -1026,7 +1038,6 @@ struct dc { /* Require to optimize clocks and bandwidth for added/removed planes */ bool optimized_required; - bool wm_optimized_required; bool idle_optimizations_allowed; bool enable_c20_dtm_b0; @@ -1389,13 +1400,6 @@ struct dc_surface_update { /* * Create a new surface with default parameters; */ -struct dc_plane_state *dc_create_plane_state(struct dc *dc); -const struct dc_plane_status *dc_plane_get_status( - const struct dc_plane_state *plane_state); - -void dc_plane_state_retain(struct dc_plane_state *plane_state); -void dc_plane_state_release(struct dc_plane_state *plane_state); - void dc_gamma_retain(struct dc_gamma *dc_gamma); void dc_gamma_release(struct dc_gamma **dc_gamma); struct dc_gamma *dc_create_gamma(void); @@ -1459,37 +1463,20 @@ enum dc_status dc_validate_global_state( struct dc_state *new_ctx, bool fast_validate); - -void dc_resource_state_construct( - const struct dc *dc, - struct dc_state *dst_ctx); - bool dc_acquire_release_mpc_3dlut( struct dc *dc, bool acquire, struct dc_stream_state *stream, struct dc_3dlut **lut, struct dc_transfer_func **shaper); -void dc_resource_state_copy_construct( - const struct dc_state *src_ctx, - struct dc_state *dst_ctx); - -void dc_resource_state_copy_construct_current( - const struct dc *dc, - struct dc_state *dst_ctx); - -void dc_resource_state_destruct(struct dc_state *context); - bool dc_resource_is_dsc_encoding_supported(const struct dc *dc); +void get_audio_check(struct audio_info *aud_modes, + struct audio_check *aud_chk); enum dc_status dc_commit_streams(struct dc *dc, struct dc_stream_state *streams[], uint8_t stream_count); -struct dc_state *dc_create_state(struct dc *dc); -struct dc_state *dc_copy_state(struct dc_state *src_ctx); -void dc_retain_state(struct dc_state *context); -void dc_release_state(struct dc_state *context); struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc, struct dc_stream_state *stream, @@ -1541,7 +1528,13 @@ struct dc_link { bool is_dig_mapping_flexible; bool hpd_status; /* HPD status of link without physical HPD pin. */ bool is_hpd_pending; /* Indicates a new received hpd */ - bool is_automated; /* Indicates automated testing */ + + /* USB4 DPIA links skip verifying link cap, instead performing the fallback method + * for every link training. This is incompatible with DP LL compliance automation, + * which expects the same link settings to be used every retry on a link loss. + * This flag is used to skip the fallback when link loss occurs during automation. + */ + bool skip_fallback_on_link_loss; bool edp_sink_present; @@ -2092,6 +2085,20 @@ bool dc_link_setup_psr(struct dc_link *dc_link, const struct dc_stream_state *stream, struct psr_config *psr_config, struct psr_context *psr_context); +/* + * Communicate with DMUB to allow or disallow Panel Replay on the specified link: + * + * @link: pointer to the dc_link struct instance + * @enable: enable(active) or disable(inactive) replay + * @wait: state transition need to wait the active set completed. + * @force_static: force disable(inactive) the replay + * @power_opts: set power optimazation parameters to DMUB. + * + * return: allow Replay active will return true, else will return false. + */ +bool dc_link_set_replay_allow_active(struct dc_link *dc_link, const bool *enable, + bool wait, bool force_static, const unsigned int *power_opts); + bool dc_link_get_replay_state(const struct dc_link *dc_link, uint64_t *state); /* On eDP links this function call will stall until T12 has elapsed. @@ -2318,6 +2325,7 @@ bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_ struct dc_cursor_attributes *cursor_attr); void dc_allow_idle_optimizations(struct dc *dc, bool allow); +void dc_exit_ips_for_hw_access(struct dc *dc); bool dc_dmub_is_ips_idle_state(struct dc *dc); /* set min and max memory clock to lowest and highest DPM level, respectively */ @@ -2336,6 +2344,9 @@ void dc_hardware_release(struct dc *dc); void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc); bool dc_set_psr_allow_active(struct dc *dc, bool enable); + +bool dc_set_replay_allow_active(struct dc *dc, bool active); + void dc_z10_restore(const struct dc *dc); void dc_z10_save_init(struct dc *dc); diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 05b3433cbb..9084b32084 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -33,6 +33,7 @@ #include "cursor_reg_cache.h" #include "resource.h" #include "clk_mgr.h" +#include "dc_state_priv.h" #define CTX dc_dmub_srv->ctx #define DC_LOGGER CTX->logger @@ -141,7 +142,10 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, if (status == DMUB_STATUS_QUEUE_FULL) { /* Execute and wait for queue to become empty again. */ - dmub_srv_cmd_execute(dmub); + status = dmub_srv_cmd_execute(dmub); + if (status == DMUB_STATUS_POWER_STATE_D3) + return false; + dmub_srv_wait_for_idle(dmub, 100000); /* Requeue the command. */ @@ -149,16 +153,20 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, } if (status != DMUB_STATUS_OK) { - DC_ERROR("Error queueing DMUB command: status=%d\n", status); - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + if (status != DMUB_STATUS_POWER_STATE_D3) { + DC_ERROR("Error queueing DMUB command: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + } return false; } } status = dmub_srv_cmd_execute(dmub); if (status != DMUB_STATUS_OK) { - DC_ERROR("Error starting DMUB execution: status=%d\n", status); - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + if (status != DMUB_STATUS_POWER_STATE_D3) { + DC_ERROR("Error starting DMUB execution: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + } return false; } @@ -219,7 +227,10 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun if (status == DMUB_STATUS_QUEUE_FULL) { /* Execute and wait for queue to become empty again. */ - dmub_srv_cmd_execute(dmub); + status = dmub_srv_cmd_execute(dmub); + if (status == DMUB_STATUS_POWER_STATE_D3) + return false; + dmub_srv_wait_for_idle(dmub, 100000); /* Requeue the command. */ @@ -227,22 +238,31 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun } if (status != DMUB_STATUS_OK) { - DC_ERROR("Error queueing DMUB command: status=%d\n", status); - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + if (status != DMUB_STATUS_POWER_STATE_D3) { + DC_ERROR("Error queueing DMUB command: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + } return false; } } status = dmub_srv_cmd_execute(dmub); if (status != DMUB_STATUS_OK) { - DC_ERROR("Error starting DMUB execution: status=%d\n", status); - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + if (status != DMUB_STATUS_POWER_STATE_D3) { + DC_ERROR("Error starting DMUB execution: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + } return false; } // Wait for DMUB to process command if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { - status = dmub_srv_wait_for_idle(dmub, 100000); + if (dc_dmub_srv->ctx->dc->debug.disable_timeout) { + do { + status = dmub_srv_wait_for_idle(dmub, 100000); + } while (status != DMUB_STATUS_OK); + } else + status = dmub_srv_wait_for_idle(dmub, 100000); if (status != DMUB_STATUS_OK) { DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); @@ -500,10 +520,11 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi /** * populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command * - * @dc: [in] current dc state + * @dc: [in] pointer to dc object * @subvp_pipe: [in] pipe_ctx for the SubVP pipe * @vblank_pipe: [in] pipe_ctx for the DRR pipe * @pipe_data: [in] Pipe data which stores the VBLANK/DRR info + * @context: [in] DC state for access to phantom stream * * Populate the DMCUB SubVP command with DRR pipe info. All the information * required for calculating the SubVP + DRR microschedule is populated here. @@ -514,12 +535,14 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi * 3. Populate the drr_info with the min and max supported vtotal values */ static void populate_subvp_cmd_drr_info(struct dc *dc, + struct dc_state *context, struct pipe_ctx *subvp_pipe, struct pipe_ctx *vblank_pipe, struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data) { + struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; - struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; + struct dc_crtc_timing *phantom_timing = &phantom_stream->timing; struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing; uint16_t drr_frame_us = 0; uint16_t min_drr_supported_us = 0; @@ -607,7 +630,7 @@ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc, continue; // Find the SubVP pipe - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) break; } @@ -624,7 +647,7 @@ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc, if (vblank_pipe->stream->ignore_msa_timing_param && (vblank_pipe->stream->allow_freesync || vblank_pipe->stream->vrr_active_variable || vblank_pipe->stream->vrr_active_fixed)) - populate_subvp_cmd_drr_info(dc, pipe, vblank_pipe, pipe_data); + populate_subvp_cmd_drr_info(dc, context, pipe, vblank_pipe, pipe_data); } /** @@ -649,10 +672,17 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc, uint32_t subvp0_prefetch_us = 0; uint32_t subvp1_prefetch_us = 0; uint32_t prefetch_delta_us = 0; - struct dc_crtc_timing *phantom_timing0 = &subvp_pipes[0]->stream->mall_stream_config.paired_stream->timing; - struct dc_crtc_timing *phantom_timing1 = &subvp_pipes[1]->stream->mall_stream_config.paired_stream->timing; + struct dc_stream_state *phantom_stream0 = NULL; + struct dc_stream_state *phantom_stream1 = NULL; + struct dc_crtc_timing *phantom_timing0 = NULL; + struct dc_crtc_timing *phantom_timing1 = NULL; struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL; + phantom_stream0 = dc_state_get_paired_subvp_stream(context, subvp_pipes[0]->stream); + phantom_stream1 = dc_state_get_paired_subvp_stream(context, subvp_pipes[1]->stream); + phantom_timing0 = &phantom_stream0->timing; + phantom_timing1 = &phantom_stream1->timing; + subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) * (uint64_t)phantom_timing0->h_total * 1000000), (((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); @@ -702,8 +732,9 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc, uint32_t j; struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index]; + struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; - struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; + struct dc_crtc_timing *phantom_timing = &phantom_stream->timing; uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den; pipe_data->mode = SUBVP; @@ -757,7 +788,7 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc, for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j]; - if (phantom_pipe->stream == subvp_pipe->stream->mall_stream_config.paired_stream) { + if (phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) { pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst; if (phantom_pipe->bottom_pipe) { pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst; @@ -791,6 +822,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, union dmub_rb_cmd cmd; struct pipe_ctx *subvp_pipes[2]; uint32_t wm_val_refclk = 0; + enum mall_stream_type pipe_mall_type; memset(&cmd, 0, sizeof(cmd)); // FW command for SUBVP @@ -806,7 +838,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, */ if (resource_is_pipe_type(pipe, OTG_MASTER) && resource_is_pipe_type(pipe, DPP_PIPE) && - pipe->stream->mall_stream_config.type == SUBVP_MAIN) + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) subvp_pipes[subvp_count++] = pipe; } @@ -814,6 +846,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, // For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); if (!pipe->stream) continue; @@ -824,12 +857,11 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, */ if (resource_is_pipe_type(pipe, OTG_MASTER) && resource_is_pipe_type(pipe, DPP_PIPE) && - pipe->stream->mall_stream_config.paired_stream && - pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + pipe_mall_type == SUBVP_MAIN) { populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); } else if (resource_is_pipe_type(pipe, OTG_MASTER) && resource_is_pipe_type(pipe, DPP_PIPE) && - pipe->stream->mall_stream_config.type == SUBVP_NONE) { + pipe_mall_type == SUBVP_NONE) { // Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where // we run through DML without calculating "natural" P-state support populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); @@ -1142,10 +1174,16 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait) dc_ctx = dc_dmub_srv->ctx; if (wait) { - status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000); - if (status != DMUB_STATUS_OK) { - DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status); - return false; + if (dc_dmub_srv->ctx->dc->debug.disable_timeout) { + do { + status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000); + } while (status != DMUB_STATUS_OK); + } else { + status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000); + if (status != DMUB_STATUS_OK) { + DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status); + return false; + } } } else return dmub_srv_is_hw_pwr_up(dc_dmub_srv->dmub); @@ -1175,22 +1213,18 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) } /* NOTE: This does not use the "wake" interface since this is part of the wake path. */ - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + /* We also do not perform a wait since DMCUB could enter idle after the notification. */ + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); } static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) { - const uint32_t max_num_polls = 10000; uint32_t allow_state = 0; uint32_t commit_state = 0; - uint32_t i; if (dc->debug.dmcub_emulation) return; - if (!dc->idle_optimizations_allowed) - return; - if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub) return; @@ -1203,8 +1237,16 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) if (!(allow_state & DMUB_IPS2_ALLOW_MASK)) { // Wait for evaluation time - udelay(dc->debug.ips2_eval_delay_us); - commit_state = dc->hwss.get_idle_state(dc); + for (;;) { + udelay(dc->debug.ips2_eval_delay_us); + commit_state = dc->hwss.get_idle_state(dc); + if (commit_state & DMUB_IPS2_ALLOW_MASK) + break; + + /* allow was still set, retry eval delay */ + dc->hwss.set_idle_state(dc, false); + } + if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) { // Tell PMFW to exit low power state dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); @@ -1213,14 +1255,13 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) udelay(dc->debug.ips2_entry_delay_us); dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); - for (i = 0; i < max_num_polls; ++i) { + for (;;) { commit_state = dc->hwss.get_idle_state(dc); if (commit_state & DMUB_IPS2_COMMIT_MASK) break; udelay(1); } - ASSERT(i < max_num_polls); if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) ASSERT(0); @@ -1235,14 +1276,13 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) dc_dmub_srv_notify_idle(dc, false); if (!(allow_state & DMUB_IPS1_ALLOW_MASK)) { - for (i = 0; i < max_num_polls; ++i) { + for (;;) { commit_state = dc->hwss.get_idle_state(dc); if (commit_state & DMUB_IPS1_COMMIT_MASK) break; udelay(1); } - ASSERT(i < max_num_polls); } } @@ -1324,7 +1364,7 @@ bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned in else result = dm_execute_dmub_cmd(ctx, cmd, wait_type); - if (result && reallow_idle) + if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle) dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true); return result; @@ -1373,7 +1413,7 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type); - if (result && reallow_idle) + if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle) dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true); return result; diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h new file mode 100644 index 0000000000..ef380cae81 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h @@ -0,0 +1,38 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DC_PLANE_H_ +#define _DC_PLANE_H_ + +#include "dc.h" +#include "dc_hw_types.h" + +struct dc_plane_state *dc_create_plane_state(struct dc *dc); +const struct dc_plane_status *dc_plane_get_status( + const struct dc_plane_state *plane_state); +void dc_plane_state_retain(struct dc_plane_state *plane_state); +void dc_plane_state_release(struct dc_plane_state *plane_state); + +#endif /* _DC_PLANE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h new file mode 100644 index 0000000000..9ee184c1df --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h @@ -0,0 +1,34 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DC_PLANE_PRIV_H_ +#define _DC_PLANE_PRIV_H_ + +#include "dc_plane.h" + +void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state); +void dc_plane_destruct(struct dc_plane_state *plane_state); + +#endif /* _DC_PLANE_PRIV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_state.h b/drivers/gpu/drm/amd/display/dc/dc_state.h new file mode 100644 index 0000000000..d167fdbfa8 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_state.h @@ -0,0 +1,78 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DC_STATE_H_ +#define _DC_STATE_H_ + +#include "dc.h" +#include "inc/core_status.h" + +struct dc_state *dc_state_create(struct dc *dc); +void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state); +struct dc_state *dc_state_create_copy(struct dc_state *src_state); +void dc_state_copy_current(struct dc *dc, struct dc_state *dst_state); +struct dc_state *dc_state_create_current_copy(struct dc *dc); +void dc_state_construct(struct dc *dc, struct dc_state *state); +void dc_state_destruct(struct dc_state *state); +void dc_state_retain(struct dc_state *state); +void dc_state_release(struct dc_state *state); + +enum dc_status dc_state_add_stream(struct dc *dc, + struct dc_state *state, + struct dc_stream_state *stream); + +enum dc_status dc_state_remove_stream( + struct dc *dc, + struct dc_state *state, + struct dc_stream_state *stream); + +bool dc_state_add_plane( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_plane_state *plane_state, + struct dc_state *state); + +bool dc_state_remove_plane( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_plane_state *plane_state, + struct dc_state *state); + +bool dc_state_rem_all_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_state *state); + +bool dc_state_add_all_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *stream, + struct dc_plane_state * const *plane_states, + int plane_count, + struct dc_state *state); + +struct dc_stream_status *dc_state_get_stream_status( + struct dc_state *state, + struct dc_stream_state *stream); +#endif /* _DC_STATE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h new file mode 100644 index 0000000000..c1f44e09a6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h @@ -0,0 +1,102 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DC_STATE_PRIV_H_ +#define _DC_STATE_PRIV_H_ + +#include "dc_state.h" +#include "dc_stream.h" + +/* Get the type of the provided resource (none, phantom, main) based on the provided + * context. If the context is unavailable, determine only if phantom or not. + */ +enum mall_stream_type dc_state_get_pipe_subvp_type(const struct dc_state *state, + const struct pipe_ctx *pipe_ctx); +enum mall_stream_type dc_state_get_stream_subvp_type(const struct dc_state *state, + const struct dc_stream_state *stream); + +/* Gets the phantom stream if main is provided, gets the main if phantom is provided.*/ +struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state *state, + const struct dc_stream_state *stream); + +/* allocate's phantom stream or plane and returns pointer to the object */ +struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *main_stream); +struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc, + struct dc_state *state, + struct dc_plane_state *main_plane); + +/* deallocate's phantom stream or plane */ +void dc_state_release_phantom_stream(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *phantom_stream); +void dc_state_release_phantom_plane(const struct dc *dc, + struct dc_state *state, + struct dc_plane_state *phantom_plane); + +/* add/remove phantom stream to context and generate subvp meta data */ +enum dc_status dc_state_add_phantom_stream(struct dc *dc, + struct dc_state *state, + struct dc_stream_state *phantom_stream, + struct dc_stream_state *main_stream); +enum dc_status dc_state_remove_phantom_stream(struct dc *dc, + struct dc_state *state, + struct dc_stream_state *phantom_stream); + +bool dc_state_add_phantom_plane( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_plane_state *phantom_plane, + struct dc_state *state); + +bool dc_state_remove_phantom_plane( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_plane_state *phantom_plane, + struct dc_state *state); + +bool dc_state_rem_all_phantom_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_state *state, + bool should_release_planes); + +bool dc_state_add_all_phantom_planes_for_stream( + const struct dc *dc, + struct dc_stream_state *phantom_stream, + struct dc_plane_state * const *phantom_planes, + int plane_count, + struct dc_state *state); + +bool dc_state_remove_phantom_streams_and_planes( + struct dc *dc, + struct dc_state *state); + +void dc_state_release_phantom_streams_and_planes( + struct dc *dc, + struct dc_state *state); + +#endif /* _DC_STATE_PRIV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index e61eea6db2..a23eebd993 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -38,6 +38,14 @@ struct timing_sync_info { bool master; }; +struct mall_stream_config { + /* MALL stream config to indicate if the stream is phantom or not. + * We will use a phantom stream to indicate that the pipe is phantom. + */ + enum mall_stream_type type; + struct dc_stream_state *paired_stream; // master / slave stream +}; + struct dc_stream_status { int primary_otg_inst; int stream_enc_inst; @@ -50,6 +58,7 @@ struct dc_stream_status { struct timing_sync_info timing_sync_info; struct dc_plane_state *plane_states[MAX_SURFACE_NUM]; bool is_abm_supported; + struct mall_stream_config mall_stream_config; }; enum hubp_dmdata_mode { @@ -130,7 +139,6 @@ union stream_update_flags { uint32_t wb_update:1; uint32_t dsc_changed : 1; uint32_t mst_bw : 1; - uint32_t crtc_timing_adjust : 1; uint32_t fams_changed : 1; } bits; @@ -147,31 +155,6 @@ struct test_pattern { #define SUBVP_DRR_MARGIN_US 100 // 100us for DRR margin (SubVP + DRR) -enum mall_stream_type { - SUBVP_NONE, // subvp not in use - SUBVP_MAIN, // subvp in use, this stream is main stream - SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream -}; - -struct mall_stream_config { - /* MALL stream config to indicate if the stream is phantom or not. - * We will use a phantom stream to indicate that the pipe is phantom. - */ - enum mall_stream_type type; - struct dc_stream_state *paired_stream; // master / slave stream -}; - -/* Temp struct used to save and restore MALL config - * during validation. - * - * TODO: Move MALL config into dc_state instead of stream struct - * to avoid needing to save/restore. - */ -struct mall_temp_config { - struct mall_stream_config mall_stream_config[MAX_PIPES]; - bool is_phantom_plane[MAX_PIPES]; -}; - struct dc_stream_debug_options { char force_odm_combine_segments; }; @@ -301,7 +284,7 @@ struct dc_stream_state { bool has_non_synchronizable_pclk; bool vblank_synchronized; bool fpo_in_use; - struct mall_stream_config mall_stream_config; + bool is_phantom; }; #define ABM_LEVEL_IMMEDIATE_DISABLE 255 @@ -342,7 +325,6 @@ struct dc_stream_update { struct dc_3dlut *lut3d_func; struct test_pattern *pending_test_pattern; - struct dc_crtc_timing_adjust *crtc_timing_adjust; }; bool dc_is_stream_unchanged( @@ -415,45 +397,14 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream, uint32_t *h_position, uint32_t *v_position); -enum dc_status dc_add_stream_to_ctx( - struct dc *dc, - struct dc_state *new_ctx, - struct dc_stream_state *stream); - -enum dc_status dc_remove_stream_from_ctx( - struct dc *dc, - struct dc_state *new_ctx, - struct dc_stream_state *stream); - - -bool dc_add_plane_to_context( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_plane_state *plane_state, - struct dc_state *context); - -bool dc_remove_plane_from_context( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_plane_state *plane_state, - struct dc_state *context); - -bool dc_rem_all_planes_for_stream( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_state *context); - -bool dc_add_all_planes_for_stream( - const struct dc *dc, - struct dc_stream_state *stream, - struct dc_plane_state * const *plane_states, - int plane_count, - struct dc_state *context); - bool dc_stream_add_writeback(struct dc *dc, struct dc_stream_state *stream, struct dc_writeback_info *wb_info); +bool dc_stream_fc_disable_writeback(struct dc *dc, + struct dc_stream_state *stream, + uint32_t dwb_pipe_inst); + bool dc_stream_remove_writeback(struct dc *dc, struct dc_stream_state *stream, uint32_t dwb_pipe_inst); @@ -514,9 +465,6 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink); void dc_stream_retain(struct dc_stream_state *dc_stream); void dc_stream_release(struct dc_stream_state *dc_stream); -struct dc_stream_status *dc_stream_get_status_from_state( - struct dc_state *state, - struct dc_stream_state *stream); struct dc_stream_status *dc_stream_get_status( struct dc_stream_state *dc_stream); diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream_priv.h b/drivers/gpu/drm/amd/display/dc/dc_stream_priv.h new file mode 100644 index 0000000000..7476fd52ce --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dc_stream_priv.h @@ -0,0 +1,37 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DC_STREAM_PRIV_H_ +#define _DC_STREAM_PRIV_H_ + +#include "dc_stream.h" + +bool dc_stream_construct(struct dc_stream_state *stream, + struct dc_sink *dc_sink_data); +void dc_stream_destruct(struct dc_stream_state *stream); + +void dc_stream_assign_stream_id(struct dc_stream_state *stream); + +#endif // _DC_STREAM_PRIV_H_ diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 66d0774bef..be2ac5c442 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -1018,6 +1018,25 @@ enum replay_coasting_vtotal_type { PR_COASTING_TYPE_NUM, }; +enum replay_link_off_frame_count_level { + PR_LINK_OFF_FRAME_COUNT_FAIL = 0x0, + PR_LINK_OFF_FRAME_COUNT_GOOD = 0x2, + PR_LINK_OFF_FRAME_COUNT_BEST = 0x6, +}; + +/* + * This is general Interface for Replay to + * set an 32 bit variable to dmub + * The Message_type indicates which variable + * passed to DMUB. + */ +enum replay_FW_Message_type { + Replay_Msg_Not_Support = -1, + Replay_Set_Timing_Sync_Supported, + Replay_Set_Residency_Frameupdate_Timer, + Replay_Set_Pseudo_VTotal, +}; + union replay_error_status { struct { unsigned char STATE_TRANSITION_ERROR :1; @@ -1029,26 +1048,52 @@ union replay_error_status { }; struct replay_config { - bool replay_supported; // Replay feature is supported - unsigned int replay_power_opt_supported; // Power opt flags that are supported - bool replay_smu_opt_supported; // SMU optimization is supported - unsigned int replay_enable_option; // Replay enablement option - uint32_t debug_flags; // Replay debug flags - bool replay_timing_sync_supported; // Replay desync is supported - bool force_disable_desync_error_check; // Replay desync is supported - bool received_desync_error_hpd; //Replay Received Desync Error HPD. - union replay_error_status replay_error_status; // Replay error status -}; - -/* Replay feature flags */ + /* Replay feature is supported */ + bool replay_supported; + /* Power opt flags that are supported */ + unsigned int replay_power_opt_supported; + /* SMU optimization is supported */ + bool replay_smu_opt_supported; + /* Replay enablement option */ + unsigned int replay_enable_option; + /* Replay debug flags */ + uint32_t debug_flags; + /* Replay sync is supported */ + bool replay_timing_sync_supported; + /* Replay Disable desync error check. */ + bool force_disable_desync_error_check; + /* Replay Received Desync Error HPD. */ + bool received_desync_error_hpd; + /* Replay feature is supported long vblank */ + bool replay_support_fast_resync_in_ultra_sleep_mode; + /* Replay error status */ + union replay_error_status replay_error_status; +}; + +/* Replay feature flags*/ struct replay_settings { - struct replay_config config; // Replay configuration - bool replay_feature_enabled; // Replay feature is ready for activating - bool replay_allow_active; // Replay is currently active - unsigned int replay_power_opt_active; // Power opt flags that are activated currently - bool replay_smu_opt_enable; // SMU optimization is enabled - uint16_t coasting_vtotal; // Current Coasting vtotal - uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM]; // Coasting vtotal table + /* Replay configuration */ + struct replay_config config; + /* Replay feature is ready for activating */ + bool replay_feature_enabled; + /* Replay is currently active */ + bool replay_allow_active; + /* Replay is currently active */ + bool replay_allow_long_vblank; + /* Power opt flags that are activated currently */ + unsigned int replay_power_opt_active; + /* SMU optimization is enabled */ + bool replay_smu_opt_enable; + /* Current Coasting vtotal */ + uint32_t coasting_vtotal; + /* Coasting vtotal table */ + uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM]; + /* Maximum link off frame count */ + enum replay_link_off_frame_count_level link_off_frame_count_level; + /* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */ + uint16_t abm_with_ips_on_full_screen_video_pseudo_vtotal; + /* Replay last pseudo vtotal set to DMUB */ + uint16_t last_pseudo_vtotal; }; /* To split out "global" and "per-panel" config settings. @@ -1125,4 +1170,9 @@ enum dc_hpd_enable_select { HPD_EN_FOR_SECONDARY_EDP_ONLY, }; +enum mall_stream_type { + SUBVP_NONE, // subvp not in use + SUBVP_MAIN, // subvp in use, this stream is main stream + SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream +}; #endif /* DC_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index 874b132fe1..a600677633 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c @@ -135,7 +135,7 @@ static void dmcu_set_backlight_level( 0, 1, 80000); } -static void dce_abm_init(struct abm *abm, uint32_t backlight) +static void dce_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level) { struct dce_abm *abm_dce = TO_DCE_ABM(abm); @@ -162,7 +162,7 @@ static void dce_abm_init(struct abm *abm, uint32_t backlight) BL1_PWM_TARGET_ABM_LEVEL, backlight); REG_UPDATE(BL1_PWM_USER_LEVEL, - BL1_PWM_USER_LEVEL, backlight); + BL1_PWM_USER_LEVEL, user_level); REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM1_LS_MIN_PIXEL_VALUE_THRES, 0, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c index 930fd929e9..ccc154b028 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c @@ -57,18 +57,22 @@ static unsigned int abm_feature_support(struct abm *abm, unsigned int panel_inst return ret; } -static void dmub_abm_init_ex(struct abm *abm, uint32_t backlight) +static void dmub_abm_init_ex(struct abm *abm, uint32_t backlight, uint32_t user_level) { - dmub_abm_init(abm, backlight); + dmub_abm_init(abm, backlight, user_level); } static unsigned int dmub_abm_get_current_backlight_ex(struct abm *abm) { + dc_allow_idle_optimizations(abm->ctx->dc, false); + return dmub_abm_get_current_backlight(abm); } static unsigned int dmub_abm_get_target_backlight_ex(struct abm *abm) { + dc_allow_idle_optimizations(abm->ctx->dc, false); + return dmub_abm_get_target_backlight(abm); } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c index 4cff36351f..f9d6a18116 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c @@ -79,7 +79,7 @@ static void dmub_abm_enable_fractional_pwm(struct dc_context *dc) dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } -void dmub_abm_init(struct abm *abm, uint32_t backlight) +void dmub_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level) { struct dce_abm *dce_abm = TO_DMUB_ABM(abm); @@ -106,7 +106,7 @@ void dmub_abm_init(struct abm *abm, uint32_t backlight) BL1_PWM_TARGET_ABM_LEVEL, backlight); REG_UPDATE(BL1_PWM_USER_LEVEL, - BL1_PWM_USER_LEVEL, backlight); + BL1_PWM_USER_LEVEL, user_level); REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM1_LS_MIN_PIXEL_VALUE_THRES, 0, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h index 07ea6c8d41..761685e5b8 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h @@ -30,7 +30,7 @@ struct abm_save_restore; -void dmub_abm_init(struct abm *abm, uint32_t backlight); +void dmub_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level); bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask); unsigned int dmub_abm_get_current_backlight(struct abm *abm); unsigned int dmub_abm_get_target_backlight(struct abm *abm); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c index 28149e53c2..38e4797e94 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c @@ -258,13 +258,97 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst, *residency = 0; } +/** + * Set REPLAY power optimization flags and coasting vtotal. + */ +static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dmub, + unsigned int power_opt, uint8_t panel_inst, uint16_t coasting_vtotal) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = dmub->ctx; + + memset(&cmd, 0, sizeof(cmd)); + cmd.replay_set_power_opt_and_coasting_vtotal.header.type = DMUB_CMD__REPLAY; + cmd.replay_set_power_opt_and_coasting_vtotal.header.sub_type = + DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL; + cmd.replay_set_power_opt_and_coasting_vtotal.header.payload_bytes = + sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal); + cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_power_opt_data.power_opt = power_opt; + cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_power_opt_data.panel_inst = panel_inst; + cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_coasting_vtotal_data.coasting_vtotal = coasting_vtotal; + + dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + +/** + * send Replay general cmd to DMUB. + */ +static void dmub_replay_send_cmd(struct dmub_replay *dmub, + enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_element) +{ + union dmub_rb_cmd cmd; + struct dc_context *ctx = NULL; + + if (dmub == NULL || cmd_element == NULL) + return; + + ctx = dmub->ctx; + if (ctx != NULL) { + + if (msg != Replay_Msg_Not_Support) { + memset(&cmd, 0, sizeof(cmd)); + //Header + cmd.replay_set_timing_sync.header.type = DMUB_CMD__REPLAY; + } else + return; + } else + return; + + switch (msg) { + case Replay_Set_Timing_Sync_Supported: + //Header + cmd.replay_set_timing_sync.header.sub_type = + DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED; + cmd.replay_set_timing_sync.header.payload_bytes = + sizeof(struct dmub_rb_cmd_replay_set_timing_sync); + //Cmd Body + cmd.replay_set_timing_sync.replay_set_timing_sync_data.panel_inst = + cmd_element->sync_data.panel_inst; + cmd.replay_set_timing_sync.replay_set_timing_sync_data.timing_sync_supported = + cmd_element->sync_data.timing_sync_supported; + break; + case Replay_Set_Residency_Frameupdate_Timer: + //Header + cmd.replay_set_frameupdate_timer.header.sub_type = + DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER; + cmd.replay_set_frameupdate_timer.header.payload_bytes = + sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer); + //Cmd Body + cmd.replay_set_frameupdate_timer.data.panel_inst = + cmd_element->panel_inst; + cmd.replay_set_frameupdate_timer.data.enable = + cmd_element->timer_data.enable; + cmd.replay_set_frameupdate_timer.data.frameupdate_count = + cmd_element->timer_data.frameupdate_count; + break; + case Replay_Msg_Not_Support: + default: + return; + break; + } + + dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + static const struct dmub_replay_funcs replay_funcs = { - .replay_copy_settings = dmub_replay_copy_settings, - .replay_enable = dmub_replay_enable, - .replay_get_state = dmub_replay_get_state, - .replay_set_power_opt = dmub_replay_set_power_opt, - .replay_set_coasting_vtotal = dmub_replay_set_coasting_vtotal, - .replay_residency = dmub_replay_residency, + .replay_copy_settings = dmub_replay_copy_settings, + .replay_enable = dmub_replay_enable, + .replay_get_state = dmub_replay_get_state, + .replay_set_power_opt = dmub_replay_set_power_opt, + .replay_set_coasting_vtotal = dmub_replay_set_coasting_vtotal, + .replay_residency = dmub_replay_residency, + .replay_set_power_opt_and_coasting_vtotal = dmub_replay_set_power_opt_and_coasting_vtotal, + .replay_send_cmd = dmub_replay_send_cmd, }; /* diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h index e8385bbf51..3613aff994 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h @@ -45,10 +45,14 @@ struct dmub_replay_funcs { struct replay_context *replay_context, uint8_t panel_inst); void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt, uint8_t panel_inst); + void (*replay_send_cmd)(struct dmub_replay *dmub, + enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_element); void (*replay_set_coasting_vtotal)(struct dmub_replay *dmub, uint16_t coasting_vtotal, uint8_t panel_inst); void (*replay_residency)(struct dmub_replay *dmub, uint8_t panel_inst, uint32_t *residency, const bool is_start, const bool is_alpm); + void (*replay_set_power_opt_and_coasting_vtotal)(struct dmub_replay *dmub, + unsigned int power_opt, uint8_t panel_inst, uint16_t coasting_vtotal); }; struct dmub_replay *dmub_replay_create(struct dc_context *ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dce100/Makefile b/drivers/gpu/drm/amd/display/dc/dce100/Makefile deleted file mode 100644 index 0d2f6bbf75..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dce100/Makefile +++ /dev/null @@ -1,46 +0,0 @@ -# -# Copyright 2017 Advanced Micro Devices, Inc. -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -# -# Makefile for the 'controller' sub-component of DAL. -# It provides the control and status of HW CRTC block. - -CFLAGS_$(AMDDALPATH)/dc/dce100/dce100_resource.o = $(call cc-disable-warning, override-init) - -DCE100 = dce100_resource.o - -AMD_DAL_DCE100 = $(addprefix $(AMDDALPATH)/dc/dce100/,$(DCE100)) - -AMD_DISPLAY_FILES += $(AMD_DAL_DCE100) - - -############################################################################### -# DCE 10x -############################################################################### -ifdef 0#CONFIG_DRM_AMD_DC_DCE11_0 -TG_DCE100 = dce100_resource.o - -AMD_DAL_TG_DCE100 = $(addprefix \ - $(AMDDALPATH)/dc/dce100/,$(TG_DCE100)) - -AMD_DISPLAY_FILES += $(AMD_DAL_TG_DCE100) -endif - diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c deleted file mode 100644 index 53a5f4cb64..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ /dev/null @@ -1,1179 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -#include "link_encoder.h" -#include "stream_encoder.h" - -#include "resource.h" -#include "include/irq_service_interface.h" -#include "virtual/virtual_stream_encoder.h" -#include "dce110/dce110_resource.h" -#include "dce110/dce110_timing_generator.h" -#include "irq/dce110/irq_service_dce110.h" -#include "dce/dce_link_encoder.h" -#include "dce/dce_stream_encoder.h" -#include "dce/dce_mem_input.h" -#include "dce/dce_ipp.h" -#include "dce/dce_transform.h" -#include "dce/dce_opp.h" -#include "dce/dce_clock_source.h" -#include "dce/dce_audio.h" -#include "dce/dce_hwseq.h" -#include "dce100/dce100_hwseq.h" -#include "dce/dce_panel_cntl.h" - -#include "reg_helper.h" - -#include "dce/dce_10_0_d.h" -#include "dce/dce_10_0_sh_mask.h" - -#include "dce/dce_dmcu.h" -#include "dce/dce_aux.h" -#include "dce/dce_abm.h" -#include "dce/dce_i2c.h" - -#include "dce100_resource.h" - -#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT -#include "gmc/gmc_8_2_d.h" -#include "gmc/gmc_8_2_sh_mask.h" -#endif - -#ifndef mmDP_DPHY_INTERNAL_CTRL - #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7 - #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7 - #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7 - #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7 - #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7 - #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7 - #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7 - #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7 - #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7 - #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7 -#endif - -#ifndef mmBIOS_SCRATCH_2 - #define mmBIOS_SCRATCH_2 0x05CB - #define mmBIOS_SCRATCH_3 0x05CC - #define mmBIOS_SCRATCH_6 0x05CF -#endif - -#ifndef mmDP_DPHY_BS_SR_SWAP_CNTL - #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC - #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC - #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC - #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC - #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC - #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC - #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC - #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC -#endif - -#ifndef mmDP_DPHY_FAST_TRAINING - #define mmDP_DPHY_FAST_TRAINING 0x4ABC - #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC - #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC - #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC - #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC - #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC - #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC - #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC -#endif - -static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = { - { - .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL), - }, - { - .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL), - }, - { - .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL), - }, - { - .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL), - }, - { - .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL), - }, - { - .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL), - } -}; - -/* set register offset */ -#define SR(reg_name)\ - .reg_name = mm ## reg_name - -/* set register offset with instance */ -#define SRI(reg_name, block, id)\ - .reg_name = mm ## block ## id ## _ ## reg_name - -#define ipp_regs(id)\ -[id] = {\ - IPP_DCE100_REG_LIST_DCE_BASE(id)\ -} - -static const struct dce_ipp_registers ipp_regs[] = { - ipp_regs(0), - ipp_regs(1), - ipp_regs(2), - ipp_regs(3), - ipp_regs(4), - ipp_regs(5) -}; - -static const struct dce_ipp_shift ipp_shift = { - IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) -}; - -static const struct dce_ipp_mask ipp_mask = { - IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) -}; - -#define transform_regs(id)\ -[id] = {\ - XFM_COMMON_REG_LIST_DCE100(id)\ -} - -static const struct dce_transform_registers xfm_regs[] = { - transform_regs(0), - transform_regs(1), - transform_regs(2), - transform_regs(3), - transform_regs(4), - transform_regs(5) -}; - -static const struct dce_transform_shift xfm_shift = { - XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT) -}; - -static const struct dce_transform_mask xfm_mask = { - XFM_COMMON_MASK_SH_LIST_DCE110(_MASK) -}; - -#define aux_regs(id)\ -[id] = {\ - AUX_REG_LIST(id)\ -} - -static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { - aux_regs(0), - aux_regs(1), - aux_regs(2), - aux_regs(3), - aux_regs(4), - aux_regs(5) -}; - -#define hpd_regs(id)\ -[id] = {\ - HPD_REG_LIST(id)\ -} - -static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { - hpd_regs(0), - hpd_regs(1), - hpd_regs(2), - hpd_regs(3), - hpd_regs(4), - hpd_regs(5) -}; - -#define link_regs(id)\ -[id] = {\ - LE_DCE100_REG_LIST(id)\ -} - -static const struct dce110_link_enc_registers link_enc_regs[] = { - link_regs(0), - link_regs(1), - link_regs(2), - link_regs(3), - link_regs(4), - link_regs(5), - link_regs(6), -}; - -#define stream_enc_regs(id)\ -[id] = {\ - SE_COMMON_REG_LIST_DCE_BASE(id),\ - .AFMT_CNTL = 0,\ -} - -static const struct dce110_stream_enc_registers stream_enc_regs[] = { - stream_enc_regs(0), - stream_enc_regs(1), - stream_enc_regs(2), - stream_enc_regs(3), - stream_enc_regs(4), - stream_enc_regs(5), - stream_enc_regs(6) -}; - -static const struct dce_stream_encoder_shift se_shift = { - SE_COMMON_MASK_SH_LIST_DCE80_100(__SHIFT) -}; - -static const struct dce_stream_encoder_mask se_mask = { - SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK) -}; - -static const struct dce_panel_cntl_registers panel_cntl_regs[] = { - { DCE_PANEL_CNTL_REG_LIST() } -}; - -static const struct dce_panel_cntl_shift panel_cntl_shift = { - DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_panel_cntl_mask panel_cntl_mask = { - DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) -}; - -#define opp_regs(id)\ -[id] = {\ - OPP_DCE_100_REG_LIST(id),\ -} - -static const struct dce_opp_registers opp_regs[] = { - opp_regs(0), - opp_regs(1), - opp_regs(2), - opp_regs(3), - opp_regs(4), - opp_regs(5) -}; - -static const struct dce_opp_shift opp_shift = { - OPP_COMMON_MASK_SH_LIST_DCE_100(__SHIFT) -}; - -static const struct dce_opp_mask opp_mask = { - OPP_COMMON_MASK_SH_LIST_DCE_100(_MASK) -}; -#define aux_engine_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST(id), \ - .AUX_RESET_MASK = 0 \ -} - -static const struct dce110_aux_registers aux_engine_regs[] = { - aux_engine_regs(0), - aux_engine_regs(1), - aux_engine_regs(2), - aux_engine_regs(3), - aux_engine_regs(4), - aux_engine_regs(5) -}; - -#define audio_regs(id)\ -[id] = {\ - AUD_COMMON_REG_LIST(id)\ -} - -static const struct dce_audio_registers audio_regs[] = { - audio_regs(0), - audio_regs(1), - audio_regs(2), - audio_regs(3), - audio_regs(4), - audio_regs(5), - audio_regs(6), -}; - -static const struct dce_audio_shift audio_shift = { - AUD_COMMON_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_audio_mask audio_mask = { - AUD_COMMON_MASK_SH_LIST(_MASK) -}; - -#define clk_src_regs(id)\ -[id] = {\ - CS_COMMON_REG_LIST_DCE_100_110(id),\ -} - -static const struct dce110_clk_src_regs clk_src_regs[] = { - clk_src_regs(0), - clk_src_regs(1), - clk_src_regs(2) -}; - -static const struct dce110_clk_src_shift cs_shift = { - CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) -}; - -static const struct dce110_clk_src_mask cs_mask = { - CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) -}; - -static const struct dce_dmcu_registers dmcu_regs = { - DMCU_DCE110_COMMON_REG_LIST() -}; - -static const struct dce_dmcu_shift dmcu_shift = { - DMCU_MASK_SH_LIST_DCE110(__SHIFT) -}; - -static const struct dce_dmcu_mask dmcu_mask = { - DMCU_MASK_SH_LIST_DCE110(_MASK) -}; - -static const struct dce_abm_registers abm_regs = { - ABM_DCE110_COMMON_REG_LIST() -}; - -static const struct dce_abm_shift abm_shift = { - ABM_MASK_SH_LIST_DCE110(__SHIFT) -}; - -static const struct dce_abm_mask abm_mask = { - ABM_MASK_SH_LIST_DCE110(_MASK) -}; - -#define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03 - -static const struct bios_registers bios_regs = { - .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, - .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 -}; - -static const struct resource_caps res_cap = { - .num_timing_generator = 6, - .num_audio = 6, - .num_stream_encoder = 6, - .num_pll = 3, - .num_ddc = 6, -}; - -static const struct dc_plane_cap plane_cap = { - .type = DC_PLANE_TYPE_DCE_RGB, - - .pixel_format_support = { - .argb8888 = true, - .nv12 = false, - .fp16 = true - }, - - .max_upscale_factor = { - .argb8888 = 16000, - .nv12 = 1, - .fp16 = 1 - }, - - .max_downscale_factor = { - .argb8888 = 250, - .nv12 = 1, - .fp16 = 1 - } -}; - -static const struct dc_debug_options debug_defaults = { - .enable_legacy_fast_update = true, -}; - -#define CTX ctx -#define REG(reg) mm ## reg - -#ifndef mmCC_DC_HDMI_STRAPS -#define mmCC_DC_HDMI_STRAPS 0x1918 -#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40 -#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6 -#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700 -#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 -#endif - -static int map_transmitter_id_to_phy_instance( - enum transmitter transmitter) -{ - switch (transmitter) { - case TRANSMITTER_UNIPHY_A: - return 0; - case TRANSMITTER_UNIPHY_B: - return 1; - case TRANSMITTER_UNIPHY_C: - return 2; - case TRANSMITTER_UNIPHY_D: - return 3; - case TRANSMITTER_UNIPHY_E: - return 4; - case TRANSMITTER_UNIPHY_F: - return 5; - case TRANSMITTER_UNIPHY_G: - return 6; - default: - ASSERT(0); - return 0; - } -} - -static void read_dce_straps( - struct dc_context *ctx, - struct resource_straps *straps) -{ - REG_GET_2(CC_DC_HDMI_STRAPS, - HDMI_DISABLE, &straps->hdmi_disable, - AUDIO_STREAM_NUMBER, &straps->audio_stream_number); - - REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio); -} - -static struct audio *create_audio( - struct dc_context *ctx, unsigned int inst) -{ - return dce_audio_create(ctx, inst, - &audio_regs[inst], &audio_shift, &audio_mask); -} - -static struct timing_generator *dce100_timing_generator_create( - struct dc_context *ctx, - uint32_t instance, - const struct dce110_timing_generator_offsets *offsets) -{ - struct dce110_timing_generator *tg110 = - kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); - - if (!tg110) - return NULL; - - dce110_timing_generator_construct(tg110, ctx, instance, offsets); - return &tg110->base; -} - -static struct stream_encoder *dce100_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dce110_stream_encoder *enc110 = - kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); - - if (!enc110) - return NULL; - - dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, - &stream_enc_regs[eng_id], &se_shift, &se_mask); - return &enc110->base; -} - -#define SRII(reg_name, block, id)\ - .reg_name[id] = mm ## block ## id ## _ ## reg_name - -static const struct dce_hwseq_registers hwseq_reg = { - HWSEQ_DCE10_REG_LIST() -}; - -static const struct dce_hwseq_shift hwseq_shift = { - HWSEQ_DCE10_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_hwseq_mask hwseq_mask = { - HWSEQ_DCE10_MASK_SH_LIST(_MASK) -}; - -static struct dce_hwseq *dce100_hwseq_create( - struct dc_context *ctx) -{ - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); - - if (hws) { - hws->ctx = ctx; - hws->regs = &hwseq_reg; - hws->shifts = &hwseq_shift; - hws->masks = &hwseq_mask; - } - return hws; -} - -static const struct resource_create_funcs res_create_funcs = { - .read_dce_straps = read_dce_straps, - .create_audio = create_audio, - .create_stream_encoder = dce100_stream_encoder_create, - .create_hwseq = dce100_hwseq_create, -}; - -#define mi_inst_regs(id) { \ - MI_DCE8_REG_LIST(id), \ - .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \ -} -static const struct dce_mem_input_registers mi_regs[] = { - mi_inst_regs(0), - mi_inst_regs(1), - mi_inst_regs(2), - mi_inst_regs(3), - mi_inst_regs(4), - mi_inst_regs(5), -}; - -static const struct dce_mem_input_shift mi_shifts = { - MI_DCE8_MASK_SH_LIST(__SHIFT), - .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT -}; - -static const struct dce_mem_input_mask mi_masks = { - MI_DCE8_MASK_SH_LIST(_MASK), - .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK -}; - -static const struct dce110_aux_registers_shift aux_shift = { - DCE10_AUX_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce110_aux_registers_mask aux_mask = { - DCE10_AUX_MASK_SH_LIST(_MASK) -}; - -static struct mem_input *dce100_mem_input_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), - GFP_KERNEL); - - if (!dce_mi) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); - dce_mi->wa.single_head_rdreq_dmif_limit = 2; - return &dce_mi->base; -} - -static void dce100_transform_destroy(struct transform **xfm) -{ - kfree(TO_DCE_TRANSFORM(*xfm)); - *xfm = NULL; -} - -static struct transform *dce100_transform_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_transform *transform = - kzalloc(sizeof(struct dce_transform), GFP_KERNEL); - - if (!transform) - return NULL; - - dce_transform_construct(transform, ctx, inst, - &xfm_regs[inst], &xfm_shift, &xfm_mask); - return &transform->base; -} - -static struct input_pixel_processor *dce100_ipp_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); - - if (!ipp) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dce_ipp_construct(ipp, ctx, inst, - &ipp_regs[inst], &ipp_shift, &ipp_mask); - return &ipp->base; -} - -static const struct encoder_feature_support link_enc_feature = { - .max_hdmi_deep_color = COLOR_DEPTH_121212, - .max_hdmi_pixel_clock = 300000, - .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true -}; - -static struct link_encoder *dce100_link_encoder_create( - struct dc_context *ctx, - const struct encoder_init_data *enc_init_data) -{ - struct dce110_link_encoder *enc110 = - kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); - int link_regs_id; - - if (!enc110) - return NULL; - - link_regs_id = - map_transmitter_id_to_phy_instance(enc_init_data->transmitter); - - dce110_link_encoder_construct(enc110, - enc_init_data, - &link_enc_feature, - &link_enc_regs[link_regs_id], - &link_enc_aux_regs[enc_init_data->channel - 1], - &link_enc_hpd_regs[enc_init_data->hpd_source]); - return &enc110->base; -} - -static struct panel_cntl *dce100_panel_cntl_create(const struct panel_cntl_init_data *init_data) -{ - struct dce_panel_cntl *panel_cntl = - kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); - - if (!panel_cntl) - return NULL; - - dce_panel_cntl_construct(panel_cntl, - init_data, - &panel_cntl_regs[init_data->inst], - &panel_cntl_shift, - &panel_cntl_mask); - - return &panel_cntl->base; -} - -static struct output_pixel_processor *dce100_opp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce110_opp *opp = - kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); - - if (!opp) - return NULL; - - dce110_opp_construct(opp, - ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); - return &opp->base; -} - -static struct dce_aux *dce100_aux_engine_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); - - if (!aux_engine) - return NULL; - - dce110_aux_engine_construct(aux_engine, ctx, inst, - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst], - &aux_mask, - &aux_shift, - ctx->dc->caps.extended_aux_timeout_support); - - return &aux_engine->base; -} -#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } - -static const struct dce_i2c_registers i2c_hw_regs[] = { - i2c_inst_regs(1), - i2c_inst_regs(2), - i2c_inst_regs(3), - i2c_inst_regs(4), - i2c_inst_regs(5), - i2c_inst_regs(6), -}; - -static const struct dce_i2c_shift i2c_shifts = { - I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) -}; - -static const struct dce_i2c_mask i2c_masks = { - I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) -}; - -static struct dce_i2c_hw *dce100_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); - - if (!dce_i2c_hw) - return NULL; - - dce100_i2c_hw_construct(dce_i2c_hw, ctx, inst, - &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); - - return dce_i2c_hw; -} -static struct clock_source *dce100_clock_source_create( - struct dc_context *ctx, - struct dc_bios *bios, - enum clock_source_id id, - const struct dce110_clk_src_regs *regs, - bool dp_clk_src) -{ - struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); - - if (!clk_src) - return NULL; - - if (dce110_clk_src_construct(clk_src, ctx, bios, id, - regs, &cs_shift, &cs_mask)) { - clk_src->base.dp_clk_src = dp_clk_src; - return &clk_src->base; - } - - kfree(clk_src); - BREAK_TO_DEBUGGER(); - return NULL; -} - -static void dce100_clock_source_destroy(struct clock_source **clk_src) -{ - kfree(TO_DCE110_CLK_SRC(*clk_src)); - *clk_src = NULL; -} - -static void dce100_resource_destruct(struct dce110_resource_pool *pool) -{ - unsigned int i; - - for (i = 0; i < pool->base.pipe_count; i++) { - if (pool->base.opps[i] != NULL) - dce110_opp_destroy(&pool->base.opps[i]); - - if (pool->base.transforms[i] != NULL) - dce100_transform_destroy(&pool->base.transforms[i]); - - if (pool->base.ipps[i] != NULL) - dce_ipp_destroy(&pool->base.ipps[i]); - - if (pool->base.mis[i] != NULL) { - kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); - pool->base.mis[i] = NULL; - } - - if (pool->base.timing_generators[i] != NULL) { - kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); - pool->base.timing_generators[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - if (pool->base.engines[i] != NULL) - dce110_engine_destroy(&pool->base.engines[i]); - if (pool->base.hw_i2cs[i] != NULL) { - kfree(pool->base.hw_i2cs[i]); - pool->base.hw_i2cs[i] = NULL; - } - if (pool->base.sw_i2cs[i] != NULL) { - kfree(pool->base.sw_i2cs[i]); - pool->base.sw_i2cs[i] = NULL; - } - } - - for (i = 0; i < pool->base.stream_enc_count; i++) { - if (pool->base.stream_enc[i] != NULL) - kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] != NULL) - dce100_clock_source_destroy(&pool->base.clock_sources[i]); - } - - if (pool->base.dp_clock_source != NULL) - dce100_clock_source_destroy(&pool->base.dp_clock_source); - - for (i = 0; i < pool->base.audio_count; i++) { - if (pool->base.audios[i] != NULL) - dce_aud_destroy(&pool->base.audios[i]); - } - - if (pool->base.abm != NULL) - dce_abm_destroy(&pool->base.abm); - - if (pool->base.dmcu != NULL) - dce_dmcu_destroy(&pool->base.dmcu); - - if (pool->base.irqs != NULL) - dal_irq_service_destroy(&pool->base.irqs); -} - -static enum dc_status build_mapped_resource( - const struct dc *dc, - struct dc_state *context, - struct dc_stream_state *stream) -{ - struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); - - if (!pipe_ctx) - return DC_ERROR_UNEXPECTED; - - dce110_resource_build_pipe_hw_param(pipe_ctx); - - resource_build_info_frame(pipe_ctx); - - return DC_OK; -} - -static bool dce100_validate_bandwidth( - struct dc *dc, - struct dc_state *context, - bool fast_validate) -{ - int i; - bool at_least_one_pipe = false; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - if (context->res_ctx.pipe_ctx[i].stream) - at_least_one_pipe = true; - } - - if (at_least_one_pipe) { - /* TODO implement when needed but for now hardcode max value*/ - context->bw_ctx.bw.dce.dispclk_khz = 681000; - context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; - } else { - context->bw_ctx.bw.dce.dispclk_khz = 0; - context->bw_ctx.bw.dce.yclk_khz = 0; - } - - return true; -} - -static bool dce100_validate_surface_sets( - struct dc_state *context) -{ - int i; - - for (i = 0; i < context->stream_count; i++) { - if (context->stream_status[i].plane_count == 0) - continue; - - if (context->stream_status[i].plane_count > 1) - return false; - - if (context->stream_status[i].plane_states[0]->format - >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) - return false; - } - - return true; -} - -static enum dc_status dce100_validate_global( - struct dc *dc, - struct dc_state *context) -{ - if (!dce100_validate_surface_sets(context)) - return DC_FAIL_SURFACE_VALIDATE; - - return DC_OK; -} - -enum dc_status dce100_add_stream_to_ctx( - struct dc *dc, - struct dc_state *new_ctx, - struct dc_stream_state *dc_stream) -{ - enum dc_status result = DC_ERROR_UNEXPECTED; - - result = resource_map_pool_resources(dc, new_ctx, dc_stream); - - if (result == DC_OK) - result = resource_map_clock_resources(dc, new_ctx, dc_stream); - - if (result == DC_OK) - result = build_mapped_resource(dc, new_ctx, dc_stream); - - return result; -} - -static void dce100_destroy_resource_pool(struct resource_pool **pool) -{ - struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); - - dce100_resource_destruct(dce110_pool); - kfree(dce110_pool); - *pool = NULL; -} - -enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps) -{ - - if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) - return DC_OK; - - return DC_FAIL_SURFACE_VALIDATE; -} - -struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link( - struct resource_context *res_ctx, - const struct resource_pool *pool, - struct dc_stream_state *stream) -{ - int i; - int j = -1; - struct dc_link *link = stream->link; - - for (i = 0; i < pool->stream_enc_count; i++) { - if (!res_ctx->is_stream_enc_acquired[i] && - pool->stream_enc[i]) { - /* Store first available for MST second display - * in daisy chain use case - */ - j = i; - if (pool->stream_enc[i]->id == - link->link_enc->preferred_engine) - return pool->stream_enc[i]; - } - } - - /* - * below can happen in cases when stream encoder is acquired: - * 1) for second MST display in chain, so preferred engine already - * acquired; - * 2) for another link, which preferred engine already acquired by any - * MST configuration. - * - * If signal is of DP type and preferred engine not found, return last available - * - * TODO - This is just a patch up and a generic solution is - * required for non DP connectors. - */ - - if (j >= 0 && link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) - return pool->stream_enc[j]; - - return NULL; -} - -static const struct resource_funcs dce100_res_pool_funcs = { - .destroy = dce100_destroy_resource_pool, - .link_enc_create = dce100_link_encoder_create, - .panel_cntl_create = dce100_panel_cntl_create, - .validate_bandwidth = dce100_validate_bandwidth, - .validate_plane = dce100_validate_plane, - .add_stream_to_ctx = dce100_add_stream_to_ctx, - .validate_global = dce100_validate_global, - .find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link -}; - -static bool dce100_resource_construct( - uint8_t num_virtual_links, - struct dc *dc, - struct dce110_resource_pool *pool) -{ - unsigned int i; - struct dc_context *ctx = dc->ctx; - struct dc_bios *bp; - - ctx->dc_bios->regs = &bios_regs; - - pool->base.res_cap = &res_cap; - pool->base.funcs = &dce100_res_pool_funcs; - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - - bp = ctx->dc_bios; - - if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { - pool->base.dp_clock_source = - dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); - - pool->base.clock_sources[0] = - dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); - pool->base.clock_sources[1] = - dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); - pool->base.clock_sources[2] = - dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); - pool->base.clk_src_count = 3; - - } else { - pool->base.dp_clock_source = - dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); - - pool->base.clock_sources[0] = - dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); - pool->base.clock_sources[1] = - dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); - pool->base.clk_src_count = 2; - } - - if (pool->base.dp_clock_source == NULL) { - dm_error("DC: failed to create dp clock source!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - } - - pool->base.dmcu = dce_dmcu_create(ctx, - &dmcu_regs, - &dmcu_shift, - &dmcu_mask); - if (pool->base.dmcu == NULL) { - dm_error("DC: failed to create dmcu!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - - pool->base.abm = dce_abm_create(ctx, - &abm_regs, - &abm_shift, - &abm_mask); - if (pool->base.abm == NULL) { - dm_error("DC: failed to create abm!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - - { - struct irq_service_init_data init_data; - init_data.ctx = dc->ctx; - pool->base.irqs = dal_irq_service_dce110_create(&init_data); - if (!pool->base.irqs) - goto res_create_fail; - } - - /************************************************* - * Resource + asic cap harcoding * - *************************************************/ - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - pool->base.pipe_count = res_cap.num_timing_generator; - pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; - dc->caps.max_downscale_ratio = 200; - dc->caps.i2c_speed_in_khz = 40; - dc->caps.i2c_speed_in_khz = 40; - dc->caps.max_cursor_size = 128; - dc->caps.min_horizontal_blanking_period = 80; - dc->caps.dual_link_dvi = true; - dc->caps.disable_dp_clk_share = true; - dc->caps.extended_aux_timeout_support = false; - dc->debug = debug_defaults; - - for (i = 0; i < pool->base.pipe_count; i++) { - pool->base.timing_generators[i] = - dce100_timing_generator_create( - ctx, - i, - &dce100_tg_offsets[i]); - if (pool->base.timing_generators[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto res_create_fail; - } - - pool->base.mis[i] = dce100_mem_input_create(ctx, i); - if (pool->base.mis[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create memory input!\n"); - goto res_create_fail; - } - - pool->base.ipps[i] = dce100_ipp_create(ctx, i); - if (pool->base.ipps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create input pixel processor!\n"); - goto res_create_fail; - } - - pool->base.transforms[i] = dce100_transform_create(ctx, i); - if (pool->base.transforms[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create transform!\n"); - goto res_create_fail; - } - - pool->base.opps[i] = dce100_opp_create(ctx, i); - if (pool->base.opps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create output pixel processor!\n"); - goto res_create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - pool->base.engines[i] = dce100_aux_engine_create(ctx, i); - if (pool->base.engines[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create aux engine!!\n"); - goto res_create_fail; - } - pool->base.hw_i2cs[i] = dce100_i2c_hw_create(ctx, i); - if (pool->base.hw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create i2c engine!!\n"); - goto res_create_fail; - } - pool->base.sw_i2cs[i] = NULL; - } - - dc->caps.max_planes = pool->base.pipe_count; - - for (i = 0; i < dc->caps.max_planes; ++i) - dc->caps.planes[i] = plane_cap; - - if (!resource_construct(num_virtual_links, dc, &pool->base, - &res_create_funcs)) - goto res_create_fail; - - /* Create hardware sequencer */ - dce100_hw_sequencer_construct(dc); - return true; - -res_create_fail: - dce100_resource_destruct(pool); - - return false; -} - -struct resource_pool *dce100_create_resource_pool( - uint8_t num_virtual_links, - struct dc *dc) -{ - struct dce110_resource_pool *pool = - kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); - - if (!pool) - return NULL; - - if (dce100_resource_construct(num_virtual_links, dc, pool)) - return &pool->base; - - kfree(pool); - BREAK_TO_DEBUGGER(); - return NULL; -} - diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h deleted file mode 100644 index fecab7c560..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * - */ -/* - * dce100_resource.h - * - * Created on: 2016-01-20 - * Author: qyang - */ - -#ifndef DCE100_RESOURCE_H_ -#define DCE100_RESOURCE_H_ - -struct dc; -struct resource_pool; -struct dc_validation_set; - -struct resource_pool *dce100_create_resource_pool( - uint8_t num_virtual_links, - struct dc *dc); - -enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps); - -enum dc_status dce100_add_stream_to_ctx( - struct dc *dc, - struct dc_state *new_ctx, - struct dc_stream_state *dc_stream); - -struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link( - struct resource_context *res_ctx, - const struct resource_pool *pool, - struct dc_stream_state *stream); - -#endif /* DCE100_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce110/Makefile b/drivers/gpu/drm/amd/display/dc/dce110/Makefile index 695a50ed5a..c307f040e4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce110/Makefile @@ -23,11 +23,11 @@ # Makefile for the 'controller' sub-component of DAL. # It provides the control and status of HW CRTC block. -CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = $(call cc-disable-warning, override-init) +CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = -Wno-override-init DCE110 = dce110_timing_generator.o \ -dce110_compressor.o dce110_resource.o \ -dce110_opp_regamma_v.o dce110_opp_csc_v.o dce110_timing_generator_v.o \ +dce110_compressor.o dce110_opp_regamma_v.o \ +dce110_opp_csc_v.o dce110_timing_generator_v.o \ dce110_mem_input_v.o dce110_opp_v.o dce110_transform_v.o AMD_DAL_DCE110 = $(addprefix $(AMDDALPATH)/dc/dce110/,$(DCE110)) diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c deleted file mode 100644 index fe518fd27b..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ /dev/null @@ -1,1551 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -#include "link_encoder.h" -#include "stream_encoder.h" - -#include "resource.h" -#include "dce110/dce110_resource.h" -#include "include/irq_service_interface.h" -#include "dce/dce_audio.h" -#include "dce110/dce110_timing_generator.h" -#include "irq/dce110/irq_service_dce110.h" -#include "dce110/dce110_timing_generator_v.h" -#include "dce/dce_link_encoder.h" -#include "dce/dce_stream_encoder.h" -#include "dce/dce_mem_input.h" -#include "dce110/dce110_mem_input_v.h" -#include "dce/dce_ipp.h" -#include "dce/dce_transform.h" -#include "dce110/dce110_transform_v.h" -#include "dce/dce_opp.h" -#include "dce110/dce110_opp_v.h" -#include "dce/dce_clock_source.h" -#include "dce/dce_hwseq.h" -#include "dce110/dce110_hwseq.h" -#include "dce/dce_aux.h" -#include "dce/dce_abm.h" -#include "dce/dce_dmcu.h" -#include "dce/dce_i2c.h" -#include "dce/dce_panel_cntl.h" - -#define DC_LOGGER \ - dc->ctx->logger - -#include "dce110/dce110_compressor.h" - -#include "reg_helper.h" - -#include "dce/dce_11_0_d.h" -#include "dce/dce_11_0_sh_mask.h" - -#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT -#include "gmc/gmc_8_2_d.h" -#include "gmc/gmc_8_2_sh_mask.h" -#endif - -#ifndef mmDP_DPHY_INTERNAL_CTRL - #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7 - #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7 - #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7 - #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7 - #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7 - #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7 - #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7 - #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7 - #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7 - #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7 -#endif - -#ifndef mmBIOS_SCRATCH_2 - #define mmBIOS_SCRATCH_2 0x05CB - #define mmBIOS_SCRATCH_3 0x05CC - #define mmBIOS_SCRATCH_6 0x05CF -#endif - -#ifndef mmDP_DPHY_BS_SR_SWAP_CNTL - #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC - #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC - #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC - #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC - #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC - #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC - #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC - #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC -#endif - -#ifndef mmDP_DPHY_FAST_TRAINING - #define mmDP_DPHY_FAST_TRAINING 0x4ABC - #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC - #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC - #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC - #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC - #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC - #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC - #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC -#endif - -#ifndef DPHY_RX_FAST_TRAINING_CAPABLE - #define DPHY_RX_FAST_TRAINING_CAPABLE 0x1 -#endif - -static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = { - { - .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL), - }, - { - .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL), - }, - { - .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL), - }, - { - .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL), - }, - { - .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL), - }, - { - .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL), - } -}; - -/* set register offset */ -#define SR(reg_name)\ - .reg_name = mm ## reg_name - -/* set register offset with instance */ -#define SRI(reg_name, block, id)\ - .reg_name = mm ## block ## id ## _ ## reg_name - -static const struct dce_dmcu_registers dmcu_regs = { - DMCU_DCE110_COMMON_REG_LIST() -}; - -static const struct dce_dmcu_shift dmcu_shift = { - DMCU_MASK_SH_LIST_DCE110(__SHIFT) -}; - -static const struct dce_dmcu_mask dmcu_mask = { - DMCU_MASK_SH_LIST_DCE110(_MASK) -}; - -static const struct dce_abm_registers abm_regs = { - ABM_DCE110_COMMON_REG_LIST() -}; - -static const struct dce_abm_shift abm_shift = { - ABM_MASK_SH_LIST_DCE110(__SHIFT) -}; - -static const struct dce_abm_mask abm_mask = { - ABM_MASK_SH_LIST_DCE110(_MASK) -}; - -#define ipp_regs(id)\ -[id] = {\ - IPP_DCE110_REG_LIST_DCE_BASE(id)\ -} - -static const struct dce_ipp_registers ipp_regs[] = { - ipp_regs(0), - ipp_regs(1), - ipp_regs(2) -}; - -static const struct dce_ipp_shift ipp_shift = { - IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) -}; - -static const struct dce_ipp_mask ipp_mask = { - IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) -}; - -#define transform_regs(id)\ -[id] = {\ - XFM_COMMON_REG_LIST_DCE110(id)\ -} - -static const struct dce_transform_registers xfm_regs[] = { - transform_regs(0), - transform_regs(1), - transform_regs(2) -}; - -static const struct dce_transform_shift xfm_shift = { - XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT) -}; - -static const struct dce_transform_mask xfm_mask = { - XFM_COMMON_MASK_SH_LIST_DCE110(_MASK) -}; - -#define aux_regs(id)\ -[id] = {\ - AUX_REG_LIST(id)\ -} - -static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { - aux_regs(0), - aux_regs(1), - aux_regs(2), - aux_regs(3), - aux_regs(4), - aux_regs(5) -}; - -#define hpd_regs(id)\ -[id] = {\ - HPD_REG_LIST(id)\ -} - -static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { - hpd_regs(0), - hpd_regs(1), - hpd_regs(2), - hpd_regs(3), - hpd_regs(4), - hpd_regs(5) -}; - - -#define link_regs(id)\ -[id] = {\ - LE_DCE110_REG_LIST(id)\ -} - -static const struct dce110_link_enc_registers link_enc_regs[] = { - link_regs(0), - link_regs(1), - link_regs(2), - link_regs(3), - link_regs(4), - link_regs(5), - link_regs(6), -}; - -#define stream_enc_regs(id)\ -[id] = {\ - SE_COMMON_REG_LIST(id),\ - .TMDS_CNTL = 0,\ -} - -static const struct dce110_stream_enc_registers stream_enc_regs[] = { - stream_enc_regs(0), - stream_enc_regs(1), - stream_enc_regs(2) -}; - -static const struct dce_stream_encoder_shift se_shift = { - SE_COMMON_MASK_SH_LIST_DCE110(__SHIFT) -}; - -static const struct dce_stream_encoder_mask se_mask = { - SE_COMMON_MASK_SH_LIST_DCE110(_MASK) -}; - -static const struct dce_panel_cntl_registers panel_cntl_regs[] = { - { DCE_PANEL_CNTL_REG_LIST() } -}; - -static const struct dce_panel_cntl_shift panel_cntl_shift = { - DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_panel_cntl_mask panel_cntl_mask = { - DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) -}; - -static const struct dce110_aux_registers_shift aux_shift = { - DCE_AUX_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce110_aux_registers_mask aux_mask = { - DCE_AUX_MASK_SH_LIST(_MASK) -}; - -#define opp_regs(id)\ -[id] = {\ - OPP_DCE_110_REG_LIST(id),\ -} - -static const struct dce_opp_registers opp_regs[] = { - opp_regs(0), - opp_regs(1), - opp_regs(2), - opp_regs(3), - opp_regs(4), - opp_regs(5) -}; - -static const struct dce_opp_shift opp_shift = { - OPP_COMMON_MASK_SH_LIST_DCE_110(__SHIFT) -}; - -static const struct dce_opp_mask opp_mask = { - OPP_COMMON_MASK_SH_LIST_DCE_110(_MASK) -}; - -#define aux_engine_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST(id), \ - .AUX_RESET_MASK = 0 \ -} - -static const struct dce110_aux_registers aux_engine_regs[] = { - aux_engine_regs(0), - aux_engine_regs(1), - aux_engine_regs(2), - aux_engine_regs(3), - aux_engine_regs(4), - aux_engine_regs(5) -}; - -#define audio_regs(id)\ -[id] = {\ - AUD_COMMON_REG_LIST(id)\ -} - -static const struct dce_audio_registers audio_regs[] = { - audio_regs(0), - audio_regs(1), - audio_regs(2), - audio_regs(3), - audio_regs(4), - audio_regs(5), - audio_regs(6), -}; - -static const struct dce_audio_shift audio_shift = { - AUD_COMMON_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_audio_mask audio_mask = { - AUD_COMMON_MASK_SH_LIST(_MASK) -}; - -/* AG TBD Needs to be reduced back to 3 pipes once dce10 hw sequencer implemented. */ - - -#define clk_src_regs(id)\ -[id] = {\ - CS_COMMON_REG_LIST_DCE_100_110(id),\ -} - -static const struct dce110_clk_src_regs clk_src_regs[] = { - clk_src_regs(0), - clk_src_regs(1), - clk_src_regs(2) -}; - -static const struct dce110_clk_src_shift cs_shift = { - CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) -}; - -static const struct dce110_clk_src_mask cs_mask = { - CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) -}; - -static const struct bios_registers bios_regs = { - .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, - .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 -}; - -static const struct resource_caps carrizo_resource_cap = { - .num_timing_generator = 3, - .num_video_plane = 1, - .num_audio = 3, - .num_stream_encoder = 3, - .num_pll = 2, - .num_ddc = 3, -}; - -static const struct resource_caps stoney_resource_cap = { - .num_timing_generator = 2, - .num_video_plane = 1, - .num_audio = 3, - .num_stream_encoder = 3, - .num_pll = 2, - .num_ddc = 3, -}; - -static const struct dc_plane_cap plane_cap = { - .type = DC_PLANE_TYPE_DCE_RGB, - .per_pixel_alpha = 1, - - .pixel_format_support = { - .argb8888 = true, - .nv12 = false, - .fp16 = true - }, - - .max_upscale_factor = { - .argb8888 = 16000, - .nv12 = 1, - .fp16 = 1 - }, - - .max_downscale_factor = { - .argb8888 = 250, - .nv12 = 1, - .fp16 = 1 - }, - 64, - 64 -}; - -static const struct dc_debug_options debug_defaults = { - .enable_legacy_fast_update = true, -}; - -static const struct dc_plane_cap underlay_plane_cap = { - .type = DC_PLANE_TYPE_DCE_UNDERLAY, - .per_pixel_alpha = 1, - - .pixel_format_support = { - .argb8888 = false, - .nv12 = true, - .fp16 = false - }, - - .max_upscale_factor = { - .argb8888 = 1, - .nv12 = 16000, - .fp16 = 1 - }, - - .max_downscale_factor = { - .argb8888 = 1, - .nv12 = 250, - .fp16 = 1 - }, - 64, - 64 -}; - -#define CTX ctx -#define REG(reg) mm ## reg - -#ifndef mmCC_DC_HDMI_STRAPS -#define mmCC_DC_HDMI_STRAPS 0x4819 -#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40 -#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6 -#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700 -#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 -#endif - -static int map_transmitter_id_to_phy_instance( - enum transmitter transmitter) -{ - switch (transmitter) { - case TRANSMITTER_UNIPHY_A: - return 0; - case TRANSMITTER_UNIPHY_B: - return 1; - case TRANSMITTER_UNIPHY_C: - return 2; - case TRANSMITTER_UNIPHY_D: - return 3; - case TRANSMITTER_UNIPHY_E: - return 4; - case TRANSMITTER_UNIPHY_F: - return 5; - case TRANSMITTER_UNIPHY_G: - return 6; - default: - ASSERT(0); - return 0; - } -} - -static void read_dce_straps( - struct dc_context *ctx, - struct resource_straps *straps) -{ - REG_GET_2(CC_DC_HDMI_STRAPS, - HDMI_DISABLE, &straps->hdmi_disable, - AUDIO_STREAM_NUMBER, &straps->audio_stream_number); - - REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio); -} - -static struct audio *create_audio( - struct dc_context *ctx, unsigned int inst) -{ - return dce_audio_create(ctx, inst, - &audio_regs[inst], &audio_shift, &audio_mask); -} - -static struct timing_generator *dce110_timing_generator_create( - struct dc_context *ctx, - uint32_t instance, - const struct dce110_timing_generator_offsets *offsets) -{ - struct dce110_timing_generator *tg110 = - kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); - - if (!tg110) - return NULL; - - dce110_timing_generator_construct(tg110, ctx, instance, offsets); - return &tg110->base; -} - -static struct stream_encoder *dce110_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dce110_stream_encoder *enc110 = - kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); - - if (!enc110) - return NULL; - - dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, - &stream_enc_regs[eng_id], - &se_shift, &se_mask); - return &enc110->base; -} - -#define SRII(reg_name, block, id)\ - .reg_name[id] = mm ## block ## id ## _ ## reg_name - -static const struct dce_hwseq_registers hwseq_stoney_reg = { - HWSEQ_ST_REG_LIST() -}; - -static const struct dce_hwseq_registers hwseq_cz_reg = { - HWSEQ_CZ_REG_LIST() -}; - -static const struct dce_hwseq_shift hwseq_shift = { - HWSEQ_DCE11_MASK_SH_LIST(__SHIFT), -}; - -static const struct dce_hwseq_mask hwseq_mask = { - HWSEQ_DCE11_MASK_SH_LIST(_MASK), -}; - -static struct dce_hwseq *dce110_hwseq_create( - struct dc_context *ctx) -{ - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); - - if (hws) { - hws->ctx = ctx; - hws->regs = ASIC_REV_IS_STONEY(ctx->asic_id.hw_internal_rev) ? - &hwseq_stoney_reg : &hwseq_cz_reg; - hws->shifts = &hwseq_shift; - hws->masks = &hwseq_mask; - hws->wa.blnd_crtc_trigger = true; - } - return hws; -} - -static const struct resource_create_funcs res_create_funcs = { - .read_dce_straps = read_dce_straps, - .create_audio = create_audio, - .create_stream_encoder = dce110_stream_encoder_create, - .create_hwseq = dce110_hwseq_create, -}; - -#define mi_inst_regs(id) { \ - MI_DCE11_REG_LIST(id), \ - .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \ -} -static const struct dce_mem_input_registers mi_regs[] = { - mi_inst_regs(0), - mi_inst_regs(1), - mi_inst_regs(2), -}; - -static const struct dce_mem_input_shift mi_shifts = { - MI_DCE11_MASK_SH_LIST(__SHIFT), - .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT -}; - -static const struct dce_mem_input_mask mi_masks = { - MI_DCE11_MASK_SH_LIST(_MASK), - .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK -}; - - -static struct mem_input *dce110_mem_input_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), - GFP_KERNEL); - - if (!dce_mi) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); - dce_mi->wa.single_head_rdreq_dmif_limit = 3; - return &dce_mi->base; -} - -static void dce110_transform_destroy(struct transform **xfm) -{ - kfree(TO_DCE_TRANSFORM(*xfm)); - *xfm = NULL; -} - -static struct transform *dce110_transform_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_transform *transform = - kzalloc(sizeof(struct dce_transform), GFP_KERNEL); - - if (!transform) - return NULL; - - dce_transform_construct(transform, ctx, inst, - &xfm_regs[inst], &xfm_shift, &xfm_mask); - return &transform->base; -} - -static struct input_pixel_processor *dce110_ipp_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); - - if (!ipp) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dce_ipp_construct(ipp, ctx, inst, - &ipp_regs[inst], &ipp_shift, &ipp_mask); - return &ipp->base; -} - -static const struct encoder_feature_support link_enc_feature = { - .max_hdmi_deep_color = COLOR_DEPTH_121212, - .max_hdmi_pixel_clock = 300000, - .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true -}; - -static struct link_encoder *dce110_link_encoder_create( - struct dc_context *ctx, - const struct encoder_init_data *enc_init_data) -{ - struct dce110_link_encoder *enc110 = - kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); - int link_regs_id; - - if (!enc110) - return NULL; - - link_regs_id = - map_transmitter_id_to_phy_instance(enc_init_data->transmitter); - - dce110_link_encoder_construct(enc110, - enc_init_data, - &link_enc_feature, - &link_enc_regs[link_regs_id], - &link_enc_aux_regs[enc_init_data->channel - 1], - &link_enc_hpd_regs[enc_init_data->hpd_source]); - return &enc110->base; -} - -static struct panel_cntl *dce110_panel_cntl_create(const struct panel_cntl_init_data *init_data) -{ - struct dce_panel_cntl *panel_cntl = - kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); - - if (!panel_cntl) - return NULL; - - dce_panel_cntl_construct(panel_cntl, - init_data, - &panel_cntl_regs[init_data->inst], - &panel_cntl_shift, - &panel_cntl_mask); - - return &panel_cntl->base; -} - -static struct output_pixel_processor *dce110_opp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce110_opp *opp = - kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); - - if (!opp) - return NULL; - - dce110_opp_construct(opp, - ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); - return &opp->base; -} - -static struct dce_aux *dce110_aux_engine_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); - - if (!aux_engine) - return NULL; - - dce110_aux_engine_construct(aux_engine, ctx, inst, - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst], - &aux_mask, - &aux_shift, - ctx->dc->caps.extended_aux_timeout_support); - - return &aux_engine->base; -} -#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } - -static const struct dce_i2c_registers i2c_hw_regs[] = { - i2c_inst_regs(1), - i2c_inst_regs(2), - i2c_inst_regs(3), - i2c_inst_regs(4), - i2c_inst_regs(5), - i2c_inst_regs(6), -}; - -static const struct dce_i2c_shift i2c_shifts = { - I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) -}; - -static const struct dce_i2c_mask i2c_masks = { - I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) -}; - -static struct dce_i2c_hw *dce110_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); - - if (!dce_i2c_hw) - return NULL; - - dce100_i2c_hw_construct(dce_i2c_hw, ctx, inst, - &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); - - return dce_i2c_hw; -} -static struct clock_source *dce110_clock_source_create( - struct dc_context *ctx, - struct dc_bios *bios, - enum clock_source_id id, - const struct dce110_clk_src_regs *regs, - bool dp_clk_src) -{ - struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); - - if (!clk_src) - return NULL; - - if (dce110_clk_src_construct(clk_src, ctx, bios, id, - regs, &cs_shift, &cs_mask)) { - clk_src->base.dp_clk_src = dp_clk_src; - return &clk_src->base; - } - - kfree(clk_src); - BREAK_TO_DEBUGGER(); - return NULL; -} - -static void dce110_clock_source_destroy(struct clock_source **clk_src) -{ - struct dce110_clk_src *dce110_clk_src; - - if (!clk_src) - return; - - dce110_clk_src = TO_DCE110_CLK_SRC(*clk_src); - - kfree(dce110_clk_src->dp_ss_params); - kfree(dce110_clk_src->hdmi_ss_params); - kfree(dce110_clk_src->dvi_ss_params); - - kfree(dce110_clk_src); - *clk_src = NULL; -} - -static void dce110_resource_destruct(struct dce110_resource_pool *pool) -{ - unsigned int i; - - for (i = 0; i < pool->base.pipe_count; i++) { - if (pool->base.opps[i] != NULL) - dce110_opp_destroy(&pool->base.opps[i]); - - if (pool->base.transforms[i] != NULL) - dce110_transform_destroy(&pool->base.transforms[i]); - - if (pool->base.ipps[i] != NULL) - dce_ipp_destroy(&pool->base.ipps[i]); - - if (pool->base.mis[i] != NULL) { - kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); - pool->base.mis[i] = NULL; - } - - if (pool->base.timing_generators[i] != NULL) { - kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); - pool->base.timing_generators[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - if (pool->base.engines[i] != NULL) - dce110_engine_destroy(&pool->base.engines[i]); - if (pool->base.hw_i2cs[i] != NULL) { - kfree(pool->base.hw_i2cs[i]); - pool->base.hw_i2cs[i] = NULL; - } - if (pool->base.sw_i2cs[i] != NULL) { - kfree(pool->base.sw_i2cs[i]); - pool->base.sw_i2cs[i] = NULL; - } - } - - for (i = 0; i < pool->base.stream_enc_count; i++) { - if (pool->base.stream_enc[i] != NULL) - kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] != NULL) { - dce110_clock_source_destroy(&pool->base.clock_sources[i]); - } - } - - if (pool->base.dp_clock_source != NULL) - dce110_clock_source_destroy(&pool->base.dp_clock_source); - - for (i = 0; i < pool->base.audio_count; i++) { - if (pool->base.audios[i] != NULL) { - dce_aud_destroy(&pool->base.audios[i]); - } - } - - if (pool->base.abm != NULL) - dce_abm_destroy(&pool->base.abm); - - if (pool->base.dmcu != NULL) - dce_dmcu_destroy(&pool->base.dmcu); - - if (pool->base.irqs != NULL) { - dal_irq_service_destroy(&pool->base.irqs); - } -} - - -static void get_pixel_clock_parameters( - const struct pipe_ctx *pipe_ctx, - struct pixel_clk_params *pixel_clk_params) -{ - const struct dc_stream_state *stream = pipe_ctx->stream; - - /*TODO: is this halved for YCbCr 420? in that case we might want to move - * the pixel clock normalization for hdmi up to here instead of doing it - * in pll_adjust_pix_clk - */ - pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; - pixel_clk_params->encoder_object_id = stream->link->link_enc->id; - pixel_clk_params->signal_type = pipe_ctx->stream->signal; - pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; - /* TODO: un-hardcode*/ - pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * - LINK_RATE_REF_FREQ_IN_KHZ; - pixel_clk_params->flags.ENABLE_SS = 0; - pixel_clk_params->color_depth = - stream->timing.display_color_depth; - pixel_clk_params->flags.DISPLAY_BLANKED = 1; - pixel_clk_params->flags.SUPPORT_YCBCR420 = (stream->timing.pixel_encoding == - PIXEL_ENCODING_YCBCR420); - pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding; - if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) { - pixel_clk_params->color_depth = COLOR_DEPTH_888; - } - if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) { - pixel_clk_params->requested_pix_clk_100hz = pixel_clk_params->requested_pix_clk_100hz / 2; - } - if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) - pixel_clk_params->requested_pix_clk_100hz *= 2; - -} - -void dce110_resource_build_pipe_hw_param(struct pipe_ctx *pipe_ctx) -{ - get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); - pipe_ctx->clock_source->funcs->get_pix_clk_dividers( - pipe_ctx->clock_source, - &pipe_ctx->stream_res.pix_clk_params, - &pipe_ctx->pll_settings); - resource_build_bit_depth_reduction_params(pipe_ctx->stream, - &pipe_ctx->stream->bit_depth_params); - pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; -} - -static bool is_surface_pixel_format_supported(struct pipe_ctx *pipe_ctx, unsigned int underlay_idx) -{ - if (pipe_ctx->pipe_idx != underlay_idx) - return true; - if (!pipe_ctx->plane_state) - return false; - if (pipe_ctx->plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) - return false; - return true; -} - -static enum dc_status build_mapped_resource( - const struct dc *dc, - struct dc_state *context, - struct dc_stream_state *stream) -{ - struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); - - if (!pipe_ctx) - return DC_ERROR_UNEXPECTED; - - if (!is_surface_pixel_format_supported(pipe_ctx, - dc->res_pool->underlay_pipe_index)) - return DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED; - - dce110_resource_build_pipe_hw_param(pipe_ctx); - - /* TODO: validate audio ASIC caps, encoder */ - - resource_build_info_frame(pipe_ctx); - - return DC_OK; -} - -static bool dce110_validate_bandwidth( - struct dc *dc, - struct dc_state *context, - bool fast_validate) -{ - bool result = false; - - DC_LOG_BANDWIDTH_CALCS( - "%s: start", - __func__); - - if (bw_calcs( - dc->ctx, - dc->bw_dceip, - dc->bw_vbios, - context->res_ctx.pipe_ctx, - dc->res_pool->pipe_count, - &context->bw_ctx.bw.dce)) - result = true; - - if (!result) - DC_LOG_BANDWIDTH_VALIDATION("%s: %dx%d@%d Bandwidth validation failed!\n", - __func__, - context->streams[0]->timing.h_addressable, - context->streams[0]->timing.v_addressable, - context->streams[0]->timing.pix_clk_100hz / 10); - - if (memcmp(&dc->current_state->bw_ctx.bw.dce, - &context->bw_ctx.bw.dce, sizeof(context->bw_ctx.bw.dce))) { - - DC_LOG_BANDWIDTH_CALCS( - "%s: finish,\n" - "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" - "stutMark_b: %d stutMark_a: %d\n" - "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" - "stutMark_b: %d stutMark_a: %d\n" - "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" - "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n" - "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n" - "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n" - , - __func__, - context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].b_mark, - context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].a_mark, - context->bw_ctx.bw.dce.urgent_wm_ns[0].b_mark, - context->bw_ctx.bw.dce.urgent_wm_ns[0].a_mark, - context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].b_mark, - context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].a_mark, - context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].b_mark, - context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].a_mark, - context->bw_ctx.bw.dce.urgent_wm_ns[1].b_mark, - context->bw_ctx.bw.dce.urgent_wm_ns[1].a_mark, - context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].b_mark, - context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].a_mark, - context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].b_mark, - context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].a_mark, - context->bw_ctx.bw.dce.urgent_wm_ns[2].b_mark, - context->bw_ctx.bw.dce.urgent_wm_ns[2].a_mark, - context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].b_mark, - context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].a_mark, - context->bw_ctx.bw.dce.stutter_mode_enable, - context->bw_ctx.bw.dce.cpuc_state_change_enable, - context->bw_ctx.bw.dce.cpup_state_change_enable, - context->bw_ctx.bw.dce.nbp_state_change_enable, - context->bw_ctx.bw.dce.all_displays_in_sync, - context->bw_ctx.bw.dce.dispclk_khz, - context->bw_ctx.bw.dce.sclk_khz, - context->bw_ctx.bw.dce.sclk_deep_sleep_khz, - context->bw_ctx.bw.dce.yclk_khz, - context->bw_ctx.bw.dce.blackout_recovery_time_us); - } - return result; -} - -static enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state, - struct dc_caps *caps) -{ - if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) || - ((plane_state->dst_rect.height * 2) < plane_state->src_rect.height)) - return DC_FAIL_SURFACE_VALIDATE; - - return DC_OK; -} - -static bool dce110_validate_surface_sets( - struct dc_state *context) -{ - int i, j; - - for (i = 0; i < context->stream_count; i++) { - if (context->stream_status[i].plane_count == 0) - continue; - - if (context->stream_status[i].plane_count > 2) - return false; - - for (j = 0; j < context->stream_status[i].plane_count; j++) { - struct dc_plane_state *plane = - context->stream_status[i].plane_states[j]; - - /* underlay validation */ - if (plane->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { - - if ((plane->src_rect.width > 1920 || - plane->src_rect.height > 1080)) - return false; - - /* we don't have the logic to support underlay - * only yet so block the use case where we get - * NV12 plane as top layer - */ - if (j == 0) - return false; - - /* irrespective of plane format, - * stream should be RGB encoded - */ - if (context->streams[i]->timing.pixel_encoding - != PIXEL_ENCODING_RGB) - return false; - - } - - } - } - - return true; -} - -static enum dc_status dce110_validate_global( - struct dc *dc, - struct dc_state *context) -{ - if (!dce110_validate_surface_sets(context)) - return DC_FAIL_SURFACE_VALIDATE; - - return DC_OK; -} - -static enum dc_status dce110_add_stream_to_ctx( - struct dc *dc, - struct dc_state *new_ctx, - struct dc_stream_state *dc_stream) -{ - enum dc_status result = DC_ERROR_UNEXPECTED; - - result = resource_map_pool_resources(dc, new_ctx, dc_stream); - - if (result == DC_OK) - result = resource_map_clock_resources(dc, new_ctx, dc_stream); - - - if (result == DC_OK) - result = build_mapped_resource(dc, new_ctx, dc_stream); - - return result; -} - -static struct pipe_ctx *dce110_acquire_underlay( - const struct dc_state *cur_ctx, - struct dc_state *new_ctx, - const struct resource_pool *pool, - const struct pipe_ctx *opp_head_pipe) -{ - struct dc_stream_state *stream = opp_head_pipe->stream; - struct dc *dc = stream->ctx->dc; - struct dce_hwseq *hws = dc->hwseq; - struct resource_context *res_ctx = &new_ctx->res_ctx; - unsigned int underlay_idx = pool->underlay_pipe_index; - struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[underlay_idx]; - - if (res_ctx->pipe_ctx[underlay_idx].stream) - return NULL; - - pipe_ctx->stream_res.tg = pool->timing_generators[underlay_idx]; - pipe_ctx->plane_res.mi = pool->mis[underlay_idx]; - /*pipe_ctx->plane_res.ipp = res_ctx->pool->ipps[underlay_idx];*/ - pipe_ctx->plane_res.xfm = pool->transforms[underlay_idx]; - pipe_ctx->stream_res.opp = pool->opps[underlay_idx]; - pipe_ctx->pipe_idx = underlay_idx; - - pipe_ctx->stream = stream; - - if (!dc->current_state->res_ctx.pipe_ctx[underlay_idx].stream) { - struct tg_color black_color = {0}; - struct dc_bios *dcb = dc->ctx->dc_bios; - - hws->funcs.enable_display_power_gating( - dc, - pipe_ctx->stream_res.tg->inst, - dcb, PIPE_GATING_CONTROL_DISABLE); - - /* - * This is for powering on underlay, so crtc does not - * need to be enabled - */ - - pipe_ctx->stream_res.tg->funcs->program_timing(pipe_ctx->stream_res.tg, - &stream->timing, - 0, - 0, - 0, - 0, - pipe_ctx->stream->signal, - false); - - pipe_ctx->stream_res.tg->funcs->enable_advanced_request( - pipe_ctx->stream_res.tg, - true, - &stream->timing); - - pipe_ctx->plane_res.mi->funcs->allocate_mem_input(pipe_ctx->plane_res.mi, - stream->timing.h_total, - stream->timing.v_total, - stream->timing.pix_clk_100hz / 10, - new_ctx->stream_count); - - color_space_to_black_color(dc, - COLOR_SPACE_YCBCR601, &black_color); - pipe_ctx->stream_res.tg->funcs->set_blank_color( - pipe_ctx->stream_res.tg, - &black_color); - } - - return pipe_ctx; -} - -static void dce110_destroy_resource_pool(struct resource_pool **pool) -{ - struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); - - dce110_resource_destruct(dce110_pool); - kfree(dce110_pool); - *pool = NULL; -} - -struct stream_encoder *dce110_find_first_free_match_stream_enc_for_link( - struct resource_context *res_ctx, - const struct resource_pool *pool, - struct dc_stream_state *stream) -{ - int i; - int j = -1; - struct dc_link *link = stream->link; - - for (i = 0; i < pool->stream_enc_count; i++) { - if (!res_ctx->is_stream_enc_acquired[i] && - pool->stream_enc[i]) { - /* Store first available for MST second display - * in daisy chain use case - */ - j = i; - if (pool->stream_enc[i]->id == - link->link_enc->preferred_engine) - return pool->stream_enc[i]; - } - } - - /* - * For CZ and later, we can allow DIG FE and BE to differ for all display types - */ - - if (j >= 0) - return pool->stream_enc[j]; - - return NULL; -} - - -static const struct resource_funcs dce110_res_pool_funcs = { - .destroy = dce110_destroy_resource_pool, - .link_enc_create = dce110_link_encoder_create, - .panel_cntl_create = dce110_panel_cntl_create, - .validate_bandwidth = dce110_validate_bandwidth, - .validate_plane = dce110_validate_plane, - .acquire_free_pipe_as_secondary_dpp_pipe = dce110_acquire_underlay, - .add_stream_to_ctx = dce110_add_stream_to_ctx, - .validate_global = dce110_validate_global, - .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link -}; - -static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool) -{ - struct dce110_timing_generator *dce110_tgv = kzalloc(sizeof(*dce110_tgv), - GFP_KERNEL); - struct dce_transform *dce110_xfmv = kzalloc(sizeof(*dce110_xfmv), - GFP_KERNEL); - struct dce_mem_input *dce110_miv = kzalloc(sizeof(*dce110_miv), - GFP_KERNEL); - struct dce110_opp *dce110_oppv = kzalloc(sizeof(*dce110_oppv), - GFP_KERNEL); - - if (!dce110_tgv || !dce110_xfmv || !dce110_miv || !dce110_oppv) { - kfree(dce110_tgv); - kfree(dce110_xfmv); - kfree(dce110_miv); - kfree(dce110_oppv); - return false; - } - - dce110_opp_v_construct(dce110_oppv, ctx); - - dce110_timing_generator_v_construct(dce110_tgv, ctx); - dce110_mem_input_v_construct(dce110_miv, ctx); - dce110_transform_v_construct(dce110_xfmv, ctx); - - pool->opps[pool->pipe_count] = &dce110_oppv->base; - pool->timing_generators[pool->pipe_count] = &dce110_tgv->base; - pool->mis[pool->pipe_count] = &dce110_miv->base; - pool->transforms[pool->pipe_count] = &dce110_xfmv->base; - pool->pipe_count++; - - /* update the public caps to indicate an underlay is available */ - ctx->dc->caps.max_slave_planes = 1; - ctx->dc->caps.max_slave_yuv_planes = 1; - ctx->dc->caps.max_slave_rgb_planes = 0; - - return true; -} - -static void bw_calcs_data_update_from_pplib(struct dc *dc) -{ - struct dm_pp_clock_levels clks = {0}; - - /*do system clock*/ - dm_pp_get_clock_levels_by_type( - dc->ctx, - DM_PP_CLOCK_TYPE_ENGINE_CLK, - &clks); - /* convert all the clock fro kHz to fix point mHz */ - dc->bw_vbios->high_sclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels-1], 1000); - dc->bw_vbios->mid1_sclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels/8], 1000); - dc->bw_vbios->mid2_sclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels*2/8], 1000); - dc->bw_vbios->mid3_sclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels*3/8], 1000); - dc->bw_vbios->mid4_sclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels*4/8], 1000); - dc->bw_vbios->mid5_sclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels*5/8], 1000); - dc->bw_vbios->mid6_sclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels*6/8], 1000); - dc->bw_vbios->low_sclk = bw_frc_to_fixed( - clks.clocks_in_khz[0], 1000); - dc->sclk_lvls = clks; - - /*do display clock*/ - dm_pp_get_clock_levels_by_type( - dc->ctx, - DM_PP_CLOCK_TYPE_DISPLAY_CLK, - &clks); - dc->bw_vbios->high_voltage_max_dispclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels-1], 1000); - dc->bw_vbios->mid_voltage_max_dispclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels>>1], 1000); - dc->bw_vbios->low_voltage_max_dispclk = bw_frc_to_fixed( - clks.clocks_in_khz[0], 1000); - - /*do memory clock*/ - dm_pp_get_clock_levels_by_type( - dc->ctx, - DM_PP_CLOCK_TYPE_MEMORY_CLK, - &clks); - - dc->bw_vbios->low_yclk = bw_frc_to_fixed( - clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); - dc->bw_vbios->mid_yclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ, - 1000); - dc->bw_vbios->high_yclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ, - 1000); -} - -static const struct resource_caps *dce110_resource_cap( - struct hw_asic_id *asic_id) -{ - if (ASIC_REV_IS_STONEY(asic_id->hw_internal_rev)) - return &stoney_resource_cap; - else - return &carrizo_resource_cap; -} - -static bool dce110_resource_construct( - uint8_t num_virtual_links, - struct dc *dc, - struct dce110_resource_pool *pool, - struct hw_asic_id asic_id) -{ - unsigned int i; - struct dc_context *ctx = dc->ctx; - struct dc_bios *bp; - - ctx->dc_bios->regs = &bios_regs; - - pool->base.res_cap = dce110_resource_cap(&ctx->asic_id); - pool->base.funcs = &dce110_res_pool_funcs; - - /************************************************* - * Resource + asic cap harcoding * - *************************************************/ - - pool->base.pipe_count = pool->base.res_cap->num_timing_generator; - pool->base.underlay_pipe_index = pool->base.pipe_count; - pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; - dc->caps.max_downscale_ratio = 150; - dc->caps.i2c_speed_in_khz = 40; - dc->caps.i2c_speed_in_khz_hdcp = 40; - dc->caps.max_cursor_size = 128; - dc->caps.min_horizontal_blanking_period = 80; - dc->caps.is_apu = true; - dc->caps.extended_aux_timeout_support = false; - dc->debug = debug_defaults; - - /************************************************* - * Create resources * - *************************************************/ - - bp = ctx->dc_bios; - - if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { - pool->base.dp_clock_source = - dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); - - pool->base.clock_sources[0] = - dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, - &clk_src_regs[0], false); - pool->base.clock_sources[1] = - dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, - &clk_src_regs[1], false); - - pool->base.clk_src_count = 2; - - /* TODO: find out if CZ support 3 PLLs */ - } - - if (pool->base.dp_clock_source == NULL) { - dm_error("DC: failed to create dp clock source!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - } - - pool->base.dmcu = dce_dmcu_create(ctx, - &dmcu_regs, - &dmcu_shift, - &dmcu_mask); - if (pool->base.dmcu == NULL) { - dm_error("DC: failed to create dmcu!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - - pool->base.abm = dce_abm_create(ctx, - &abm_regs, - &abm_shift, - &abm_mask); - if (pool->base.abm == NULL) { - dm_error("DC: failed to create abm!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - - { - struct irq_service_init_data init_data; - init_data.ctx = dc->ctx; - pool->base.irqs = dal_irq_service_dce110_create(&init_data); - if (!pool->base.irqs) - goto res_create_fail; - } - - for (i = 0; i < pool->base.pipe_count; i++) { - pool->base.timing_generators[i] = dce110_timing_generator_create( - ctx, i, &dce110_tg_offsets[i]); - if (pool->base.timing_generators[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto res_create_fail; - } - - pool->base.mis[i] = dce110_mem_input_create(ctx, i); - if (pool->base.mis[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create memory input!\n"); - goto res_create_fail; - } - - pool->base.ipps[i] = dce110_ipp_create(ctx, i); - if (pool->base.ipps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create input pixel processor!\n"); - goto res_create_fail; - } - - pool->base.transforms[i] = dce110_transform_create(ctx, i); - if (pool->base.transforms[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create transform!\n"); - goto res_create_fail; - } - - pool->base.opps[i] = dce110_opp_create(ctx, i); - if (pool->base.opps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create output pixel processor!\n"); - goto res_create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - pool->base.engines[i] = dce110_aux_engine_create(ctx, i); - if (pool->base.engines[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create aux engine!!\n"); - goto res_create_fail; - } - pool->base.hw_i2cs[i] = dce110_i2c_hw_create(ctx, i); - if (pool->base.hw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create i2c engine!!\n"); - goto res_create_fail; - } - pool->base.sw_i2cs[i] = NULL; - } - - if (dc->config.fbc_support) - dc->fbc_compressor = dce110_compressor_create(ctx); - - if (!underlay_create(ctx, &pool->base)) - goto res_create_fail; - - if (!resource_construct(num_virtual_links, dc, &pool->base, - &res_create_funcs)) - goto res_create_fail; - - /* Create hardware sequencer */ - dce110_hw_sequencer_construct(dc); - - dc->caps.max_planes = pool->base.pipe_count; - - for (i = 0; i < pool->base.underlay_pipe_index; ++i) - dc->caps.planes[i] = plane_cap; - - dc->caps.planes[pool->base.underlay_pipe_index] = underlay_plane_cap; - - bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id); - - bw_calcs_data_update_from_pplib(dc); - - return true; - -res_create_fail: - dce110_resource_destruct(pool); - return false; -} - -struct resource_pool *dce110_create_resource_pool( - uint8_t num_virtual_links, - struct dc *dc, - struct hw_asic_id asic_id) -{ - struct dce110_resource_pool *pool = - kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); - - if (!pool) - return NULL; - - if (dce110_resource_construct(num_virtual_links, dc, pool, asic_id)) - return &pool->base; - - kfree(pool); - BREAK_TO_DEBUGGER(); - return NULL; -} diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h deleted file mode 100644 index aa4531e080..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h +++ /dev/null @@ -1,54 +0,0 @@ -/* -* Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_RESOURCE_DCE110_H__ -#define __DC_RESOURCE_DCE110_H__ - -#include "core_types.h" - -struct dc; -struct resource_pool; - -#define TO_DCE110_RES_POOL(pool)\ - container_of(pool, struct dce110_resource_pool, base) - -struct dce110_resource_pool { - struct resource_pool base; -}; - -void dce110_resource_build_pipe_hw_param(struct pipe_ctx *pipe_ctx); - -struct resource_pool *dce110_create_resource_pool( - uint8_t num_virtual_links, - struct dc *dc, - struct hw_asic_id asic_id); - -struct stream_encoder *dce110_find_first_free_match_stream_enc_for_link( - struct resource_context *res_ctx, - const struct resource_pool *pool, - struct dc_stream_state *stream); - -#endif /* __DC_RESOURCE_DCE110_H__ */ - diff --git a/drivers/gpu/drm/amd/display/dc/dce112/Makefile b/drivers/gpu/drm/amd/display/dc/dce112/Makefile index e846ef58ca..6838667977 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce112/Makefile @@ -23,10 +23,9 @@ # Makefile for the 'controller' sub-component of DAL. # It provides the control and status of HW CRTC block. -CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = $(call cc-disable-warning, override-init) +CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = -Wno-override-init -DCE112 = dce112_compressor.o \ -dce112_resource.o +DCE112 = dce112_compressor.o AMD_DAL_DCE112 = $(addprefix $(AMDDALPATH)/dc/dce112/,$(DCE112)) diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c deleted file mode 100644 index d1edac46c9..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ /dev/null @@ -1,1431 +0,0 @@ -/* -* Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -#include "link_encoder.h" -#include "stream_encoder.h" - -#include "resource.h" -#include "include/irq_service_interface.h" -#include "dce110/dce110_resource.h" -#include "dce110/dce110_timing_generator.h" - -#include "irq/dce110/irq_service_dce110.h" -#include "dce/dce_mem_input.h" -#include "dce/dce_transform.h" -#include "dce/dce_link_encoder.h" -#include "dce/dce_stream_encoder.h" -#include "dce/dce_audio.h" -#include "dce/dce_opp.h" -#include "dce/dce_ipp.h" -#include "dce/dce_clock_source.h" - -#include "dce/dce_hwseq.h" -#include "dce112/dce112_hwseq.h" -#include "dce/dce_abm.h" -#include "dce/dce_dmcu.h" -#include "dce/dce_aux.h" -#include "dce/dce_i2c.h" -#include "dce/dce_panel_cntl.h" - -#include "reg_helper.h" - -#include "dce/dce_11_2_d.h" -#include "dce/dce_11_2_sh_mask.h" - -#include "dce100/dce100_resource.h" -#include "dce112_resource.h" - -#define DC_LOGGER \ - dc->ctx->logger - -#ifndef mmDP_DPHY_INTERNAL_CTRL - #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7 - #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7 - #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7 - #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7 - #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7 - #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7 - #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7 - #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7 - #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7 - #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7 -#endif - -#ifndef mmBIOS_SCRATCH_2 - #define mmBIOS_SCRATCH_2 0x05CB - #define mmBIOS_SCRATCH_3 0x05CC - #define mmBIOS_SCRATCH_6 0x05CF -#endif - -#ifndef mmDP_DPHY_BS_SR_SWAP_CNTL - #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC - #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC - #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC - #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC - #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC - #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC - #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC - #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC -#endif - -#ifndef mmDP_DPHY_FAST_TRAINING - #define mmDP_DPHY_FAST_TRAINING 0x4ABC - #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC - #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC - #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC - #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC - #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC - #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC - #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC -#endif - -enum dce112_clk_src_array_id { - DCE112_CLK_SRC_PLL0, - DCE112_CLK_SRC_PLL1, - DCE112_CLK_SRC_PLL2, - DCE112_CLK_SRC_PLL3, - DCE112_CLK_SRC_PLL4, - DCE112_CLK_SRC_PLL5, - - DCE112_CLK_SRC_TOTAL -}; - -static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = { - { - .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL), - }, - { - .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL), - }, - { - .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL), - }, - { - .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL), - }, - { - .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL), - }, - { - .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL), - } -}; - -/* set register offset */ -#define SR(reg_name)\ - .reg_name = mm ## reg_name - -/* set register offset with instance */ -#define SRI(reg_name, block, id)\ - .reg_name = mm ## block ## id ## _ ## reg_name - -static const struct dce_dmcu_registers dmcu_regs = { - DMCU_DCE110_COMMON_REG_LIST() -}; - -static const struct dce_dmcu_shift dmcu_shift = { - DMCU_MASK_SH_LIST_DCE110(__SHIFT) -}; - -static const struct dce_dmcu_mask dmcu_mask = { - DMCU_MASK_SH_LIST_DCE110(_MASK) -}; - -static const struct dce_abm_registers abm_regs = { - ABM_DCE110_COMMON_REG_LIST() -}; - -static const struct dce_abm_shift abm_shift = { - ABM_MASK_SH_LIST_DCE110(__SHIFT) -}; - -static const struct dce_abm_mask abm_mask = { - ABM_MASK_SH_LIST_DCE110(_MASK) -}; - -static const struct dce110_aux_registers_shift aux_shift = { - DCE_AUX_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce110_aux_registers_mask aux_mask = { - DCE_AUX_MASK_SH_LIST(_MASK) -}; - -#define ipp_regs(id)\ -[id] = {\ - IPP_DCE110_REG_LIST_DCE_BASE(id)\ -} - -static const struct dce_ipp_registers ipp_regs[] = { - ipp_regs(0), - ipp_regs(1), - ipp_regs(2), - ipp_regs(3), - ipp_regs(4), - ipp_regs(5) -}; - -static const struct dce_ipp_shift ipp_shift = { - IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) -}; - -static const struct dce_ipp_mask ipp_mask = { - IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) -}; - -#define transform_regs(id)\ -[id] = {\ - XFM_COMMON_REG_LIST_DCE110(id)\ -} - -static const struct dce_transform_registers xfm_regs[] = { - transform_regs(0), - transform_regs(1), - transform_regs(2), - transform_regs(3), - transform_regs(4), - transform_regs(5) -}; - -static const struct dce_transform_shift xfm_shift = { - XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT) -}; - -static const struct dce_transform_mask xfm_mask = { - XFM_COMMON_MASK_SH_LIST_DCE110(_MASK) -}; - -#define aux_regs(id)\ -[id] = {\ - AUX_REG_LIST(id)\ -} - -static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { - aux_regs(0), - aux_regs(1), - aux_regs(2), - aux_regs(3), - aux_regs(4), - aux_regs(5) -}; - -static const struct dce_panel_cntl_registers panel_cntl_regs[] = { - { DCE_PANEL_CNTL_REG_LIST() } -}; - -static const struct dce_panel_cntl_shift panel_cntl_shift = { - DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_panel_cntl_mask panel_cntl_mask = { - DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) -}; - -#define hpd_regs(id)\ -[id] = {\ - HPD_REG_LIST(id)\ -} - -static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { - hpd_regs(0), - hpd_regs(1), - hpd_regs(2), - hpd_regs(3), - hpd_regs(4), - hpd_regs(5) -}; - -#define link_regs(id)\ -[id] = {\ - LE_DCE110_REG_LIST(id)\ -} - -static const struct dce110_link_enc_registers link_enc_regs[] = { - link_regs(0), - link_regs(1), - link_regs(2), - link_regs(3), - link_regs(4), - link_regs(5), - link_regs(6), -}; - -#define stream_enc_regs(id)\ -[id] = {\ - SE_COMMON_REG_LIST(id),\ - .TMDS_CNTL = 0,\ -} - -static const struct dce110_stream_enc_registers stream_enc_regs[] = { - stream_enc_regs(0), - stream_enc_regs(1), - stream_enc_regs(2), - stream_enc_regs(3), - stream_enc_regs(4), - stream_enc_regs(5) -}; - -static const struct dce_stream_encoder_shift se_shift = { - SE_COMMON_MASK_SH_LIST_DCE112(__SHIFT) -}; - -static const struct dce_stream_encoder_mask se_mask = { - SE_COMMON_MASK_SH_LIST_DCE112(_MASK) -}; - -#define opp_regs(id)\ -[id] = {\ - OPP_DCE_112_REG_LIST(id),\ -} - -static const struct dce_opp_registers opp_regs[] = { - opp_regs(0), - opp_regs(1), - opp_regs(2), - opp_regs(3), - opp_regs(4), - opp_regs(5) -}; - -static const struct dce_opp_shift opp_shift = { - OPP_COMMON_MASK_SH_LIST_DCE_112(__SHIFT) -}; - -static const struct dce_opp_mask opp_mask = { - OPP_COMMON_MASK_SH_LIST_DCE_112(_MASK) -}; - -#define aux_engine_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST(id), \ - .AUX_RESET_MASK = 0 \ -} - -static const struct dce110_aux_registers aux_engine_regs[] = { - aux_engine_regs(0), - aux_engine_regs(1), - aux_engine_regs(2), - aux_engine_regs(3), - aux_engine_regs(4), - aux_engine_regs(5) -}; - -#define audio_regs(id)\ -[id] = {\ - AUD_COMMON_REG_LIST(id)\ -} - -static const struct dce_audio_registers audio_regs[] = { - audio_regs(0), - audio_regs(1), - audio_regs(2), - audio_regs(3), - audio_regs(4), - audio_regs(5) -}; - -static const struct dce_audio_shift audio_shift = { - AUD_COMMON_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_audio_mask audio_mask = { - AUD_COMMON_MASK_SH_LIST(_MASK) -}; - -#define clk_src_regs(index, id)\ -[index] = {\ - CS_COMMON_REG_LIST_DCE_112(id),\ -} - -static const struct dce110_clk_src_regs clk_src_regs[] = { - clk_src_regs(0, A), - clk_src_regs(1, B), - clk_src_regs(2, C), - clk_src_regs(3, D), - clk_src_regs(4, E), - clk_src_regs(5, F) -}; - -static const struct dce110_clk_src_shift cs_shift = { - CS_COMMON_MASK_SH_LIST_DCE_112(__SHIFT) -}; - -static const struct dce110_clk_src_mask cs_mask = { - CS_COMMON_MASK_SH_LIST_DCE_112(_MASK) -}; - -static const struct bios_registers bios_regs = { - .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, - .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 -}; - -static const struct resource_caps polaris_10_resource_cap = { - .num_timing_generator = 6, - .num_audio = 6, - .num_stream_encoder = 6, - .num_pll = 8, /* why 8? 6 combo PHY PLL + 2 regular PLLs? */ - .num_ddc = 6, -}; - -static const struct resource_caps polaris_11_resource_cap = { - .num_timing_generator = 5, - .num_audio = 5, - .num_stream_encoder = 5, - .num_pll = 8, /* why 8? 6 combo PHY PLL + 2 regular PLLs? */ - .num_ddc = 5, -}; - -static const struct dc_plane_cap plane_cap = { - .type = DC_PLANE_TYPE_DCE_RGB, - - .pixel_format_support = { - .argb8888 = true, - .nv12 = false, - .fp16 = true - }, - - .max_upscale_factor = { - .argb8888 = 16000, - .nv12 = 1, - .fp16 = 1 - }, - - .max_downscale_factor = { - .argb8888 = 250, - .nv12 = 1, - .fp16 = 1 - }, - 64, - 64 -}; - -static const struct dc_debug_options debug_defaults = { - .enable_legacy_fast_update = true, -}; - -#define CTX ctx -#define REG(reg) mm ## reg - -#ifndef mmCC_DC_HDMI_STRAPS -#define mmCC_DC_HDMI_STRAPS 0x4819 -#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40 -#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6 -#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700 -#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 -#endif - -static int map_transmitter_id_to_phy_instance( - enum transmitter transmitter) -{ - switch (transmitter) { - case TRANSMITTER_UNIPHY_A: - return 0; - case TRANSMITTER_UNIPHY_B: - return 1; - case TRANSMITTER_UNIPHY_C: - return 2; - case TRANSMITTER_UNIPHY_D: - return 3; - case TRANSMITTER_UNIPHY_E: - return 4; - case TRANSMITTER_UNIPHY_F: - return 5; - case TRANSMITTER_UNIPHY_G: - return 6; - default: - ASSERT(0); - return 0; - } -} - -static void read_dce_straps( - struct dc_context *ctx, - struct resource_straps *straps) -{ - REG_GET_2(CC_DC_HDMI_STRAPS, - HDMI_DISABLE, &straps->hdmi_disable, - AUDIO_STREAM_NUMBER, &straps->audio_stream_number); - - REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio); -} - -static struct audio *create_audio( - struct dc_context *ctx, unsigned int inst) -{ - return dce_audio_create(ctx, inst, - &audio_regs[inst], &audio_shift, &audio_mask); -} - - -static struct timing_generator *dce112_timing_generator_create( - struct dc_context *ctx, - uint32_t instance, - const struct dce110_timing_generator_offsets *offsets) -{ - struct dce110_timing_generator *tg110 = - kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); - - if (!tg110) - return NULL; - - dce110_timing_generator_construct(tg110, ctx, instance, offsets); - return &tg110->base; -} - -static struct stream_encoder *dce112_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dce110_stream_encoder *enc110 = - kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); - - if (!enc110) - return NULL; - - dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, - &stream_enc_regs[eng_id], - &se_shift, &se_mask); - return &enc110->base; -} - -#define SRII(reg_name, block, id)\ - .reg_name[id] = mm ## block ## id ## _ ## reg_name - -static const struct dce_hwseq_registers hwseq_reg = { - HWSEQ_DCE112_REG_LIST() -}; - -static const struct dce_hwseq_shift hwseq_shift = { - HWSEQ_DCE112_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_hwseq_mask hwseq_mask = { - HWSEQ_DCE112_MASK_SH_LIST(_MASK) -}; - -static struct dce_hwseq *dce112_hwseq_create( - struct dc_context *ctx) -{ - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); - - if (hws) { - hws->ctx = ctx; - hws->regs = &hwseq_reg; - hws->shifts = &hwseq_shift; - hws->masks = &hwseq_mask; - } - return hws; -} - -static const struct resource_create_funcs res_create_funcs = { - .read_dce_straps = read_dce_straps, - .create_audio = create_audio, - .create_stream_encoder = dce112_stream_encoder_create, - .create_hwseq = dce112_hwseq_create, -}; - -#define mi_inst_regs(id) { MI_DCE11_2_REG_LIST(id) } -static const struct dce_mem_input_registers mi_regs[] = { - mi_inst_regs(0), - mi_inst_regs(1), - mi_inst_regs(2), - mi_inst_regs(3), - mi_inst_regs(4), - mi_inst_regs(5), -}; - -static const struct dce_mem_input_shift mi_shifts = { - MI_DCE11_2_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_mem_input_mask mi_masks = { - MI_DCE11_2_MASK_SH_LIST(_MASK) -}; - -static struct mem_input *dce112_mem_input_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), - GFP_KERNEL); - - if (!dce_mi) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dce112_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); - return &dce_mi->base; -} - -static void dce112_transform_destroy(struct transform **xfm) -{ - kfree(TO_DCE_TRANSFORM(*xfm)); - *xfm = NULL; -} - -static struct transform *dce112_transform_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_transform *transform = - kzalloc(sizeof(struct dce_transform), GFP_KERNEL); - - if (!transform) - return NULL; - - dce_transform_construct(transform, ctx, inst, - &xfm_regs[inst], &xfm_shift, &xfm_mask); - transform->lb_memory_size = 0x1404; /*5124*/ - return &transform->base; -} - -static const struct encoder_feature_support link_enc_feature = { - .max_hdmi_deep_color = COLOR_DEPTH_121212, - .max_hdmi_pixel_clock = 600000, - .hdmi_ycbcr420_supported = true, - .dp_ycbcr420_supported = false, - .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_HBR3_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true, - .flags.bits.IS_TPS4_CAPABLE = true -}; - -static struct link_encoder *dce112_link_encoder_create( - struct dc_context *ctx, - const struct encoder_init_data *enc_init_data) -{ - struct dce110_link_encoder *enc110 = - kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); - int link_regs_id; - - if (!enc110) - return NULL; - - link_regs_id = - map_transmitter_id_to_phy_instance(enc_init_data->transmitter); - - dce110_link_encoder_construct(enc110, - enc_init_data, - &link_enc_feature, - &link_enc_regs[link_regs_id], - &link_enc_aux_regs[enc_init_data->channel - 1], - &link_enc_hpd_regs[enc_init_data->hpd_source]); - return &enc110->base; -} - -static struct panel_cntl *dce112_panel_cntl_create(const struct panel_cntl_init_data *init_data) -{ - struct dce_panel_cntl *panel_cntl = - kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); - - if (!panel_cntl) - return NULL; - - dce_panel_cntl_construct(panel_cntl, - init_data, - &panel_cntl_regs[init_data->inst], - &panel_cntl_shift, - &panel_cntl_mask); - - return &panel_cntl->base; -} - -static struct input_pixel_processor *dce112_ipp_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); - - if (!ipp) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dce_ipp_construct(ipp, ctx, inst, - &ipp_regs[inst], &ipp_shift, &ipp_mask); - return &ipp->base; -} - -static struct output_pixel_processor *dce112_opp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce110_opp *opp = - kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); - - if (!opp) - return NULL; - - dce110_opp_construct(opp, - ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); - return &opp->base; -} - -static struct dce_aux *dce112_aux_engine_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); - - if (!aux_engine) - return NULL; - - dce110_aux_engine_construct(aux_engine, ctx, inst, - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst], - &aux_mask, - &aux_shift, - ctx->dc->caps.extended_aux_timeout_support); - - return &aux_engine->base; -} -#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } - -static const struct dce_i2c_registers i2c_hw_regs[] = { - i2c_inst_regs(1), - i2c_inst_regs(2), - i2c_inst_regs(3), - i2c_inst_regs(4), - i2c_inst_regs(5), - i2c_inst_regs(6), -}; - -static const struct dce_i2c_shift i2c_shifts = { - I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) -}; - -static const struct dce_i2c_mask i2c_masks = { - I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) -}; - -static struct dce_i2c_hw *dce112_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); - - if (!dce_i2c_hw) - return NULL; - - dce112_i2c_hw_construct(dce_i2c_hw, ctx, inst, - &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); - - return dce_i2c_hw; -} -static struct clock_source *dce112_clock_source_create( - struct dc_context *ctx, - struct dc_bios *bios, - enum clock_source_id id, - const struct dce110_clk_src_regs *regs, - bool dp_clk_src) -{ - struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); - - if (!clk_src) - return NULL; - - if (dce112_clk_src_construct(clk_src, ctx, bios, id, - regs, &cs_shift, &cs_mask)) { - clk_src->base.dp_clk_src = dp_clk_src; - return &clk_src->base; - } - - kfree(clk_src); - BREAK_TO_DEBUGGER(); - return NULL; -} - -static void dce112_clock_source_destroy(struct clock_source **clk_src) -{ - kfree(TO_DCE110_CLK_SRC(*clk_src)); - *clk_src = NULL; -} - -static void dce112_resource_destruct(struct dce110_resource_pool *pool) -{ - unsigned int i; - - for (i = 0; i < pool->base.pipe_count; i++) { - if (pool->base.opps[i] != NULL) - dce110_opp_destroy(&pool->base.opps[i]); - - if (pool->base.transforms[i] != NULL) - dce112_transform_destroy(&pool->base.transforms[i]); - - if (pool->base.ipps[i] != NULL) - dce_ipp_destroy(&pool->base.ipps[i]); - - if (pool->base.mis[i] != NULL) { - kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); - pool->base.mis[i] = NULL; - } - - if (pool->base.timing_generators[i] != NULL) { - kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); - pool->base.timing_generators[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - if (pool->base.engines[i] != NULL) - dce110_engine_destroy(&pool->base.engines[i]); - if (pool->base.hw_i2cs[i] != NULL) { - kfree(pool->base.hw_i2cs[i]); - pool->base.hw_i2cs[i] = NULL; - } - if (pool->base.sw_i2cs[i] != NULL) { - kfree(pool->base.sw_i2cs[i]); - pool->base.sw_i2cs[i] = NULL; - } - } - - for (i = 0; i < pool->base.stream_enc_count; i++) { - if (pool->base.stream_enc[i] != NULL) - kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] != NULL) { - dce112_clock_source_destroy(&pool->base.clock_sources[i]); - } - } - - if (pool->base.dp_clock_source != NULL) - dce112_clock_source_destroy(&pool->base.dp_clock_source); - - for (i = 0; i < pool->base.audio_count; i++) { - if (pool->base.audios[i] != NULL) { - dce_aud_destroy(&pool->base.audios[i]); - } - } - - if (pool->base.abm != NULL) - dce_abm_destroy(&pool->base.abm); - - if (pool->base.dmcu != NULL) - dce_dmcu_destroy(&pool->base.dmcu); - - if (pool->base.irqs != NULL) { - dal_irq_service_destroy(&pool->base.irqs); - } -} - -static struct clock_source *find_matching_pll( - struct resource_context *res_ctx, - const struct resource_pool *pool, - const struct dc_stream_state *const stream) -{ - switch (stream->link->link_enc->transmitter) { - case TRANSMITTER_UNIPHY_A: - return pool->clock_sources[DCE112_CLK_SRC_PLL0]; - case TRANSMITTER_UNIPHY_B: - return pool->clock_sources[DCE112_CLK_SRC_PLL1]; - case TRANSMITTER_UNIPHY_C: - return pool->clock_sources[DCE112_CLK_SRC_PLL2]; - case TRANSMITTER_UNIPHY_D: - return pool->clock_sources[DCE112_CLK_SRC_PLL3]; - case TRANSMITTER_UNIPHY_E: - return pool->clock_sources[DCE112_CLK_SRC_PLL4]; - case TRANSMITTER_UNIPHY_F: - return pool->clock_sources[DCE112_CLK_SRC_PLL5]; - default: - return NULL; - } - - return NULL; -} - -static enum dc_status build_mapped_resource( - const struct dc *dc, - struct dc_state *context, - struct dc_stream_state *stream) -{ - struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); - - if (!pipe_ctx) - return DC_ERROR_UNEXPECTED; - - dce110_resource_build_pipe_hw_param(pipe_ctx); - - resource_build_info_frame(pipe_ctx); - - return DC_OK; -} - -bool dce112_validate_bandwidth( - struct dc *dc, - struct dc_state *context, - bool fast_validate) -{ - bool result = false; - - DC_LOG_BANDWIDTH_CALCS( - "%s: start", - __func__); - - if (bw_calcs( - dc->ctx, - dc->bw_dceip, - dc->bw_vbios, - context->res_ctx.pipe_ctx, - dc->res_pool->pipe_count, - &context->bw_ctx.bw.dce)) - result = true; - - if (!result) - DC_LOG_BANDWIDTH_VALIDATION( - "%s: Bandwidth validation failed!", - __func__); - - if (memcmp(&dc->current_state->bw_ctx.bw.dce, - &context->bw_ctx.bw.dce, sizeof(context->bw_ctx.bw.dce))) { - - DC_LOG_BANDWIDTH_CALCS( - "%s: finish,\n" - "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" - "stutMark_b: %d stutMark_a: %d\n" - "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" - "stutMark_b: %d stutMark_a: %d\n" - "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" - "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n" - "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n" - "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n" - , - __func__, - context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].b_mark, - context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].a_mark, - context->bw_ctx.bw.dce.urgent_wm_ns[0].b_mark, - context->bw_ctx.bw.dce.urgent_wm_ns[0].a_mark, - context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].b_mark, - context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].a_mark, - context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].b_mark, - context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].a_mark, - context->bw_ctx.bw.dce.urgent_wm_ns[1].b_mark, - context->bw_ctx.bw.dce.urgent_wm_ns[1].a_mark, - context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].b_mark, - context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].a_mark, - context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].b_mark, - context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].a_mark, - context->bw_ctx.bw.dce.urgent_wm_ns[2].b_mark, - context->bw_ctx.bw.dce.urgent_wm_ns[2].a_mark, - context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].b_mark, - context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].a_mark, - context->bw_ctx.bw.dce.stutter_mode_enable, - context->bw_ctx.bw.dce.cpuc_state_change_enable, - context->bw_ctx.bw.dce.cpup_state_change_enable, - context->bw_ctx.bw.dce.nbp_state_change_enable, - context->bw_ctx.bw.dce.all_displays_in_sync, - context->bw_ctx.bw.dce.dispclk_khz, - context->bw_ctx.bw.dce.sclk_khz, - context->bw_ctx.bw.dce.sclk_deep_sleep_khz, - context->bw_ctx.bw.dce.yclk_khz, - context->bw_ctx.bw.dce.blackout_recovery_time_us); - } - return result; -} - -enum dc_status resource_map_phy_clock_resources( - const struct dc *dc, - struct dc_state *context, - struct dc_stream_state *stream) -{ - - /* acquire new resources */ - struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream( - &context->res_ctx, stream); - - if (!pipe_ctx) - return DC_ERROR_UNEXPECTED; - - if (dc_is_dp_signal(pipe_ctx->stream->signal) - || dc_is_virtual_signal(pipe_ctx->stream->signal)) - pipe_ctx->clock_source = - dc->res_pool->dp_clock_source; - else { - if (stream && stream->link && stream->link->link_enc) - pipe_ctx->clock_source = find_matching_pll( - &context->res_ctx, dc->res_pool, - stream); - } - - if (pipe_ctx->clock_source == NULL) - return DC_NO_CLOCK_SOURCE_RESOURCE; - - resource_reference_clock_source( - &context->res_ctx, - dc->res_pool, - pipe_ctx->clock_source); - - return DC_OK; -} - -static bool dce112_validate_surface_sets( - struct dc_state *context) -{ - int i; - - for (i = 0; i < context->stream_count; i++) { - if (context->stream_status[i].plane_count == 0) - continue; - - if (context->stream_status[i].plane_count > 1) - return false; - - if (context->stream_status[i].plane_states[0]->format - >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) - return false; - } - - return true; -} - -enum dc_status dce112_add_stream_to_ctx( - struct dc *dc, - struct dc_state *new_ctx, - struct dc_stream_state *dc_stream) -{ - enum dc_status result; - - result = resource_map_pool_resources(dc, new_ctx, dc_stream); - - if (result == DC_OK) - result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream); - - - if (result == DC_OK) - result = build_mapped_resource(dc, new_ctx, dc_stream); - - return result; -} - -static enum dc_status dce112_validate_global( - struct dc *dc, - struct dc_state *context) -{ - if (!dce112_validate_surface_sets(context)) - return DC_FAIL_SURFACE_VALIDATE; - - return DC_OK; -} - -static void dce112_destroy_resource_pool(struct resource_pool **pool) -{ - struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); - - dce112_resource_destruct(dce110_pool); - kfree(dce110_pool); - *pool = NULL; -} - -static const struct resource_funcs dce112_res_pool_funcs = { - .destroy = dce112_destroy_resource_pool, - .link_enc_create = dce112_link_encoder_create, - .panel_cntl_create = dce112_panel_cntl_create, - .validate_bandwidth = dce112_validate_bandwidth, - .validate_plane = dce100_validate_plane, - .add_stream_to_ctx = dce112_add_stream_to_ctx, - .validate_global = dce112_validate_global, - .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link -}; - -static void bw_calcs_data_update_from_pplib(struct dc *dc) -{ - struct dm_pp_clock_levels_with_latency eng_clks = {0}; - struct dm_pp_clock_levels_with_latency mem_clks = {0}; - struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0}; - struct dm_pp_clock_levels clks = {0}; - int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ; - - if (dc->bw_vbios && dc->bw_vbios->memory_type == bw_def_hbm) - memory_type_multiplier = MEMORY_TYPE_HBM; - - /*do system clock TODO PPLIB: after PPLIB implement, - * then remove old way - */ - if (!dm_pp_get_clock_levels_by_type_with_latency( - dc->ctx, - DM_PP_CLOCK_TYPE_ENGINE_CLK, - &eng_clks)) { - - /* This is only for temporary */ - dm_pp_get_clock_levels_by_type( - dc->ctx, - DM_PP_CLOCK_TYPE_ENGINE_CLK, - &clks); - /* convert all the clock fro kHz to fix point mHz */ - dc->bw_vbios->high_sclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels-1], 1000); - dc->bw_vbios->mid1_sclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels/8], 1000); - dc->bw_vbios->mid2_sclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels*2/8], 1000); - dc->bw_vbios->mid3_sclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels*3/8], 1000); - dc->bw_vbios->mid4_sclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels*4/8], 1000); - dc->bw_vbios->mid5_sclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels*5/8], 1000); - dc->bw_vbios->mid6_sclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels*6/8], 1000); - dc->bw_vbios->low_sclk = bw_frc_to_fixed( - clks.clocks_in_khz[0], 1000); - - /*do memory clock*/ - dm_pp_get_clock_levels_by_type( - dc->ctx, - DM_PP_CLOCK_TYPE_MEMORY_CLK, - &clks); - - dc->bw_vbios->low_yclk = bw_frc_to_fixed( - clks.clocks_in_khz[0] * memory_type_multiplier, 1000); - dc->bw_vbios->mid_yclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels>>1] * memory_type_multiplier, - 1000); - dc->bw_vbios->high_yclk = bw_frc_to_fixed( - clks.clocks_in_khz[clks.num_levels-1] * memory_type_multiplier, - 1000); - - return; - } - - /* convert all the clock fro kHz to fix point mHz TODO: wloop data */ - dc->bw_vbios->high_sclk = bw_frc_to_fixed( - eng_clks.data[eng_clks.num_levels-1].clocks_in_khz, 1000); - dc->bw_vbios->mid1_sclk = bw_frc_to_fixed( - eng_clks.data[eng_clks.num_levels/8].clocks_in_khz, 1000); - dc->bw_vbios->mid2_sclk = bw_frc_to_fixed( - eng_clks.data[eng_clks.num_levels*2/8].clocks_in_khz, 1000); - dc->bw_vbios->mid3_sclk = bw_frc_to_fixed( - eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz, 1000); - dc->bw_vbios->mid4_sclk = bw_frc_to_fixed( - eng_clks.data[eng_clks.num_levels*4/8].clocks_in_khz, 1000); - dc->bw_vbios->mid5_sclk = bw_frc_to_fixed( - eng_clks.data[eng_clks.num_levels*5/8].clocks_in_khz, 1000); - dc->bw_vbios->mid6_sclk = bw_frc_to_fixed( - eng_clks.data[eng_clks.num_levels*6/8].clocks_in_khz, 1000); - dc->bw_vbios->low_sclk = bw_frc_to_fixed( - eng_clks.data[0].clocks_in_khz, 1000); - - /*do memory clock*/ - dm_pp_get_clock_levels_by_type_with_latency( - dc->ctx, - DM_PP_CLOCK_TYPE_MEMORY_CLK, - &mem_clks); - - /* we don't need to call PPLIB for validation clock since they - * also give us the highest sclk and highest mclk (UMA clock). - * ALSO always convert UMA clock (from PPLIB) to YCLK (HW formula): - * YCLK = UMACLK*m_memoryTypeMultiplier - */ - dc->bw_vbios->low_yclk = bw_frc_to_fixed( - mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000); - dc->bw_vbios->mid_yclk = bw_frc_to_fixed( - mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier, - 1000); - dc->bw_vbios->high_yclk = bw_frc_to_fixed( - mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier, - 1000); - - /* Now notify PPLib/SMU about which Watermarks sets they should select - * depending on DPM state they are in. And update BW MGR GFX Engine and - * Memory clock member variables for Watermarks calculations for each - * Watermark Set - */ - clk_ranges.num_wm_sets = 4; - clk_ranges.wm_clk_ranges[0].wm_set_id = WM_SET_A; - clk_ranges.wm_clk_ranges[0].wm_min_eng_clk_in_khz = - eng_clks.data[0].clocks_in_khz; - clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz = - eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1; - clk_ranges.wm_clk_ranges[0].wm_min_mem_clk_in_khz = - mem_clks.data[0].clocks_in_khz; - clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz = - mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1; - - clk_ranges.wm_clk_ranges[1].wm_set_id = WM_SET_B; - clk_ranges.wm_clk_ranges[1].wm_min_eng_clk_in_khz = - eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz; - /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */ - clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000; - clk_ranges.wm_clk_ranges[1].wm_min_mem_clk_in_khz = - mem_clks.data[0].clocks_in_khz; - clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz = - mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1; - - clk_ranges.wm_clk_ranges[2].wm_set_id = WM_SET_C; - clk_ranges.wm_clk_ranges[2].wm_min_eng_clk_in_khz = - eng_clks.data[0].clocks_in_khz; - clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz = - eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1; - clk_ranges.wm_clk_ranges[2].wm_min_mem_clk_in_khz = - mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz; - /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */ - clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000; - - clk_ranges.wm_clk_ranges[3].wm_set_id = WM_SET_D; - clk_ranges.wm_clk_ranges[3].wm_min_eng_clk_in_khz = - eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz; - /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */ - clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000; - clk_ranges.wm_clk_ranges[3].wm_min_mem_clk_in_khz = - mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz; - /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */ - clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000; - - /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ - dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges); -} - -static const struct resource_caps *dce112_resource_cap( - struct hw_asic_id *asic_id) -{ - if (ASIC_REV_IS_POLARIS11_M(asic_id->hw_internal_rev) || - ASIC_REV_IS_POLARIS12_V(asic_id->hw_internal_rev)) - return &polaris_11_resource_cap; - else - return &polaris_10_resource_cap; -} - -static bool dce112_resource_construct( - uint8_t num_virtual_links, - struct dc *dc, - struct dce110_resource_pool *pool) -{ - unsigned int i; - struct dc_context *ctx = dc->ctx; - - ctx->dc_bios->regs = &bios_regs; - - pool->base.res_cap = dce112_resource_cap(&ctx->asic_id); - pool->base.funcs = &dce112_res_pool_funcs; - - /************************************************* - * Resource + asic cap harcoding * - *************************************************/ - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - pool->base.pipe_count = pool->base.res_cap->num_timing_generator; - pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; - dc->caps.max_downscale_ratio = 200; - dc->caps.i2c_speed_in_khz = 100; - dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a not applied by default*/ - dc->caps.max_cursor_size = 128; - dc->caps.min_horizontal_blanking_period = 80; - dc->caps.dual_link_dvi = true; - dc->caps.extended_aux_timeout_support = false; - dc->debug = debug_defaults; - - /************************************************* - * Create resources * - *************************************************/ - - pool->base.clock_sources[DCE112_CLK_SRC_PLL0] = - dce112_clock_source_create( - ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL0, - &clk_src_regs[0], false); - pool->base.clock_sources[DCE112_CLK_SRC_PLL1] = - dce112_clock_source_create( - ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL1, - &clk_src_regs[1], false); - pool->base.clock_sources[DCE112_CLK_SRC_PLL2] = - dce112_clock_source_create( - ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL2, - &clk_src_regs[2], false); - pool->base.clock_sources[DCE112_CLK_SRC_PLL3] = - dce112_clock_source_create( - ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL3, - &clk_src_regs[3], false); - pool->base.clock_sources[DCE112_CLK_SRC_PLL4] = - dce112_clock_source_create( - ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL4, - &clk_src_regs[4], false); - pool->base.clock_sources[DCE112_CLK_SRC_PLL5] = - dce112_clock_source_create( - ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL5, - &clk_src_regs[5], false); - pool->base.clk_src_count = DCE112_CLK_SRC_TOTAL; - - pool->base.dp_clock_source = dce112_clock_source_create( - ctx, ctx->dc_bios, - CLOCK_SOURCE_ID_DP_DTO, &clk_src_regs[0], true); - - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - } - - pool->base.dmcu = dce_dmcu_create(ctx, - &dmcu_regs, - &dmcu_shift, - &dmcu_mask); - if (pool->base.dmcu == NULL) { - dm_error("DC: failed to create dmcu!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - - pool->base.abm = dce_abm_create(ctx, - &abm_regs, - &abm_shift, - &abm_mask); - if (pool->base.abm == NULL) { - dm_error("DC: failed to create abm!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - - { - struct irq_service_init_data init_data; - init_data.ctx = dc->ctx; - pool->base.irqs = dal_irq_service_dce110_create(&init_data); - if (!pool->base.irqs) - goto res_create_fail; - } - - for (i = 0; i < pool->base.pipe_count; i++) { - pool->base.timing_generators[i] = - dce112_timing_generator_create( - ctx, - i, - &dce112_tg_offsets[i]); - if (pool->base.timing_generators[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto res_create_fail; - } - - pool->base.mis[i] = dce112_mem_input_create(ctx, i); - if (pool->base.mis[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create memory input!\n"); - goto res_create_fail; - } - - pool->base.ipps[i] = dce112_ipp_create(ctx, i); - if (pool->base.ipps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create input pixel processor!\n"); - goto res_create_fail; - } - - pool->base.transforms[i] = dce112_transform_create(ctx, i); - if (pool->base.transforms[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create transform!\n"); - goto res_create_fail; - } - - pool->base.opps[i] = dce112_opp_create( - ctx, - i); - if (pool->base.opps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create output pixel processor!\n"); - goto res_create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - pool->base.engines[i] = dce112_aux_engine_create(ctx, i); - if (pool->base.engines[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create aux engine!!\n"); - goto res_create_fail; - } - pool->base.hw_i2cs[i] = dce112_i2c_hw_create(ctx, i); - if (pool->base.hw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create i2c engine!!\n"); - goto res_create_fail; - } - pool->base.sw_i2cs[i] = NULL; - } - - if (!resource_construct(num_virtual_links, dc, &pool->base, - &res_create_funcs)) - goto res_create_fail; - - dc->caps.max_planes = pool->base.pipe_count; - - for (i = 0; i < dc->caps.max_planes; ++i) - dc->caps.planes[i] = plane_cap; - - /* Create hardware sequencer */ - dce112_hw_sequencer_construct(dc); - - bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id); - - bw_calcs_data_update_from_pplib(dc); - - return true; - -res_create_fail: - dce112_resource_destruct(pool); - return false; -} - -struct resource_pool *dce112_create_resource_pool( - uint8_t num_virtual_links, - struct dc *dc) -{ - struct dce110_resource_pool *pool = - kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); - - if (!pool) - return NULL; - - if (dce112_resource_construct(num_virtual_links, dc, pool)) - return &pool->base; - - kfree(pool); - BREAK_TO_DEBUGGER(); - return NULL; -} diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h deleted file mode 100644 index 1f57ebc6f9..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h +++ /dev/null @@ -1,57 +0,0 @@ -/* -* Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_RESOURCE_DCE112_H__ -#define __DC_RESOURCE_DCE112_H__ - -#include "core_types.h" - -struct dc; -struct resource_pool; - -struct resource_pool *dce112_create_resource_pool( - uint8_t num_virtual_links, - struct dc *dc); - -enum dc_status dce112_validate_with_context( - struct dc *dc, - const struct dc_validation_set set[], - int set_count, - struct dc_state *context, - struct dc_state *old_context); - -bool dce112_validate_bandwidth( - struct dc *dc, - struct dc_state *context, - bool fast_validate); - -enum dc_status dce112_add_stream_to_ctx( - struct dc *dc, - struct dc_state *new_ctx, - struct dc_stream_state *dc_stream); - - -#endif /* __DC_RESOURCE_DCE112_H__ */ - diff --git a/drivers/gpu/drm/amd/display/dc/dce120/Makefile b/drivers/gpu/drm/amd/display/dc/dce120/Makefile index 097cf407a1..8f508e6627 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce120/Makefile @@ -24,9 +24,9 @@ # It provides the control and status of HW CRTC block. -CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = $(call cc-disable-warning, override-init) +CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = -Wno-override-init -DCE120 = dce120_resource.o dce120_timing_generator.o \ +DCE120 = dce120_timing_generator.o AMD_DAL_DCE120 = $(addprefix $(AMDDALPATH)/dc/dce120/,$(DCE120)) diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c deleted file mode 100644 index 962de79be1..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ /dev/null @@ -1,1288 +0,0 @@ -/* -* Copyright 2012-15 Advanced Micro Devices, Inc.cls -* - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - - -#include "stream_encoder.h" -#include "resource.h" -#include "include/irq_service_interface.h" -#include "dce120_resource.h" - -#include "dce112/dce112_resource.h" - -#include "dce110/dce110_resource.h" -#include "virtual/virtual_stream_encoder.h" -#include "dce120_timing_generator.h" -#include "irq/dce120/irq_service_dce120.h" -#include "dce/dce_opp.h" -#include "dce/dce_clock_source.h" -#include "dce/dce_ipp.h" -#include "dce/dce_mem_input.h" -#include "dce/dce_panel_cntl.h" - -#include "dce110/dce110_hwseq.h" -#include "dce120/dce120_hwseq.h" -#include "dce/dce_transform.h" -#include "clk_mgr.h" -#include "dce/dce_audio.h" -#include "dce/dce_link_encoder.h" -#include "dce/dce_stream_encoder.h" -#include "dce/dce_hwseq.h" -#include "dce/dce_abm.h" -#include "dce/dce_dmcu.h" -#include "dce/dce_aux.h" -#include "dce/dce_i2c.h" - -#include "dce/dce_12_0_offset.h" -#include "dce/dce_12_0_sh_mask.h" -#include "soc15_hw_ip.h" -#include "vega10_ip_offset.h" -#include "nbio/nbio_6_1_offset.h" -#include "mmhub/mmhub_1_0_offset.h" -#include "mmhub/mmhub_1_0_sh_mask.h" -#include "reg_helper.h" - -#include "dce100/dce100_resource.h" - -#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL - #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f - #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 - #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f - #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 - #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f - #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 - #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f - #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 - #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f - #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 - #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f - #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 - #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f - #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 -#endif - -enum dce120_clk_src_array_id { - DCE120_CLK_SRC_PLL0, - DCE120_CLK_SRC_PLL1, - DCE120_CLK_SRC_PLL2, - DCE120_CLK_SRC_PLL3, - DCE120_CLK_SRC_PLL4, - DCE120_CLK_SRC_PLL5, - - DCE120_CLK_SRC_TOTAL -}; - -static const struct dce110_timing_generator_offsets dce120_tg_offsets[] = { - { - .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), - }, - { - .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), - }, - { - .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), - }, - { - .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), - }, - { - .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), - }, - { - .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), - } -}; - -/* begin ********************* - * macros to expend register list macro defined in HW object header file */ - -#define BASE_INNER(seg) \ - DCE_BASE__INST0_SEG ## seg - -#define NBIO_BASE_INNER(seg) \ - NBIF_BASE__INST0_SEG ## seg - -#define NBIO_BASE(seg) \ - NBIO_BASE_INNER(seg) - -/* compile time expand base address. */ -#define BASE(seg) \ - BASE_INNER(seg) - -#define SR(reg_name)\ - .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -#define SRI(reg_name, block, id)\ - .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -/* MMHUB */ -#define MMHUB_BASE_INNER(seg) \ - MMHUB_BASE__INST0_SEG ## seg - -#define MMHUB_BASE(seg) \ - MMHUB_BASE_INNER(seg) - -#define MMHUB_SR(reg_name)\ - .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -/* macros to expend register list macro defined in HW object header file - * end *********************/ - - -static const struct dce_dmcu_registers dmcu_regs = { - DMCU_DCE110_COMMON_REG_LIST() -}; - -static const struct dce_dmcu_shift dmcu_shift = { - DMCU_MASK_SH_LIST_DCE110(__SHIFT) -}; - -static const struct dce_dmcu_mask dmcu_mask = { - DMCU_MASK_SH_LIST_DCE110(_MASK) -}; - -static const struct dce_abm_registers abm_regs = { - ABM_DCE110_COMMON_REG_LIST() -}; - -static const struct dce_abm_shift abm_shift = { - ABM_MASK_SH_LIST_DCE110(__SHIFT) -}; - -static const struct dce_abm_mask abm_mask = { - ABM_MASK_SH_LIST_DCE110(_MASK) -}; - -#define ipp_regs(id)\ -[id] = {\ - IPP_DCE110_REG_LIST_DCE_BASE(id)\ -} - -static const struct dce_ipp_registers ipp_regs[] = { - ipp_regs(0), - ipp_regs(1), - ipp_regs(2), - ipp_regs(3), - ipp_regs(4), - ipp_regs(5) -}; - -static const struct dce_ipp_shift ipp_shift = { - IPP_DCE120_MASK_SH_LIST_SOC_BASE(__SHIFT) -}; - -static const struct dce_ipp_mask ipp_mask = { - IPP_DCE120_MASK_SH_LIST_SOC_BASE(_MASK) -}; - -#define transform_regs(id)\ -[id] = {\ - XFM_COMMON_REG_LIST_DCE110(id)\ -} - -static const struct dce_transform_registers xfm_regs[] = { - transform_regs(0), - transform_regs(1), - transform_regs(2), - transform_regs(3), - transform_regs(4), - transform_regs(5) -}; - -static const struct dce_transform_shift xfm_shift = { - XFM_COMMON_MASK_SH_LIST_SOC_BASE(__SHIFT) -}; - -static const struct dce_transform_mask xfm_mask = { - XFM_COMMON_MASK_SH_LIST_SOC_BASE(_MASK) -}; - -#define aux_regs(id)\ -[id] = {\ - AUX_REG_LIST(id)\ -} - -static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { - aux_regs(0), - aux_regs(1), - aux_regs(2), - aux_regs(3), - aux_regs(4), - aux_regs(5) -}; - -#define hpd_regs(id)\ -[id] = {\ - HPD_REG_LIST(id)\ -} - -static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { - hpd_regs(0), - hpd_regs(1), - hpd_regs(2), - hpd_regs(3), - hpd_regs(4), - hpd_regs(5) -}; - -#define link_regs(id)\ -[id] = {\ - LE_DCE120_REG_LIST(id), \ - SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ -} - -static const struct dce110_link_enc_registers link_enc_regs[] = { - link_regs(0), - link_regs(1), - link_regs(2), - link_regs(3), - link_regs(4), - link_regs(5), - link_regs(6), -}; - - -#define stream_enc_regs(id)\ -[id] = {\ - SE_COMMON_REG_LIST(id),\ - .TMDS_CNTL = 0,\ -} - -static const struct dce110_stream_enc_registers stream_enc_regs[] = { - stream_enc_regs(0), - stream_enc_regs(1), - stream_enc_regs(2), - stream_enc_regs(3), - stream_enc_regs(4), - stream_enc_regs(5) -}; - -static const struct dce_stream_encoder_shift se_shift = { - SE_COMMON_MASK_SH_LIST_DCE120(__SHIFT) -}; - -static const struct dce_stream_encoder_mask se_mask = { - SE_COMMON_MASK_SH_LIST_DCE120(_MASK) -}; - -static const struct dce_panel_cntl_registers panel_cntl_regs[] = { - { DCE_PANEL_CNTL_REG_LIST() } -}; - -static const struct dce_panel_cntl_shift panel_cntl_shift = { - DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_panel_cntl_mask panel_cntl_mask = { - DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) -}; - -static const struct dce110_aux_registers_shift aux_shift = { - DCE12_AUX_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce110_aux_registers_mask aux_mask = { - DCE12_AUX_MASK_SH_LIST(_MASK) -}; - -#define opp_regs(id)\ -[id] = {\ - OPP_DCE_120_REG_LIST(id),\ -} - -static const struct dce_opp_registers opp_regs[] = { - opp_regs(0), - opp_regs(1), - opp_regs(2), - opp_regs(3), - opp_regs(4), - opp_regs(5) -}; - -static const struct dce_opp_shift opp_shift = { - OPP_COMMON_MASK_SH_LIST_DCE_120(__SHIFT) -}; - -static const struct dce_opp_mask opp_mask = { - OPP_COMMON_MASK_SH_LIST_DCE_120(_MASK) -}; - #define aux_engine_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST(id), \ - .AUX_RESET_MASK = 0 \ -} - -static const struct dce110_aux_registers aux_engine_regs[] = { - aux_engine_regs(0), - aux_engine_regs(1), - aux_engine_regs(2), - aux_engine_regs(3), - aux_engine_regs(4), - aux_engine_regs(5) -}; - -#define audio_regs(id)\ -[id] = {\ - AUD_COMMON_REG_LIST(id)\ -} - -static const struct dce_audio_registers audio_regs[] = { - audio_regs(0), - audio_regs(1), - audio_regs(2), - audio_regs(3), - audio_regs(4), - audio_regs(5), - audio_regs(6), -}; - -#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ - AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) - -static const struct dce_audio_shift audio_shift = { - DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_audio_mask audio_mask = { - DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) -}; - -static int map_transmitter_id_to_phy_instance( - enum transmitter transmitter) -{ - switch (transmitter) { - case TRANSMITTER_UNIPHY_A: - return 0; - case TRANSMITTER_UNIPHY_B: - return 1; - case TRANSMITTER_UNIPHY_C: - return 2; - case TRANSMITTER_UNIPHY_D: - return 3; - case TRANSMITTER_UNIPHY_E: - return 4; - case TRANSMITTER_UNIPHY_F: - return 5; - case TRANSMITTER_UNIPHY_G: - return 6; - default: - ASSERT(0); - return 0; - } -} - -#define clk_src_regs(index, id)\ -[index] = {\ - CS_COMMON_REG_LIST_DCE_112(id),\ -} - -static const struct dce110_clk_src_regs clk_src_regs[] = { - clk_src_regs(0, A), - clk_src_regs(1, B), - clk_src_regs(2, C), - clk_src_regs(3, D), - clk_src_regs(4, E), - clk_src_regs(5, F) -}; - -static const struct dce110_clk_src_shift cs_shift = { - CS_COMMON_MASK_SH_LIST_DCE_112(__SHIFT) -}; - -static const struct dce110_clk_src_mask cs_mask = { - CS_COMMON_MASK_SH_LIST_DCE_112(_MASK) -}; - -static struct output_pixel_processor *dce120_opp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce110_opp *opp = - kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); - - if (!opp) - return NULL; - - dce110_opp_construct(opp, - ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); - return &opp->base; -} -static struct dce_aux *dce120_aux_engine_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); - - if (!aux_engine) - return NULL; - - dce110_aux_engine_construct(aux_engine, ctx, inst, - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst], - &aux_mask, - &aux_shift, - ctx->dc->caps.extended_aux_timeout_support); - - return &aux_engine->base; -} -#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } - -static const struct dce_i2c_registers i2c_hw_regs[] = { - i2c_inst_regs(1), - i2c_inst_regs(2), - i2c_inst_regs(3), - i2c_inst_regs(4), - i2c_inst_regs(5), - i2c_inst_regs(6), -}; - -static const struct dce_i2c_shift i2c_shifts = { - I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) -}; - -static const struct dce_i2c_mask i2c_masks = { - I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) -}; - -static struct dce_i2c_hw *dce120_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); - - if (!dce_i2c_hw) - return NULL; - - dce112_i2c_hw_construct(dce_i2c_hw, ctx, inst, - &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); - - return dce_i2c_hw; -} -static const struct bios_registers bios_regs = { - .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3 + NBIO_BASE(mmBIOS_SCRATCH_3_BASE_IDX), - .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX) -}; - -static const struct resource_caps res_cap = { - .num_timing_generator = 6, - .num_audio = 7, - .num_stream_encoder = 6, - .num_pll = 6, - .num_ddc = 6, -}; - -static const struct dc_plane_cap plane_cap = { - .type = DC_PLANE_TYPE_DCE_RGB, - - .pixel_format_support = { - .argb8888 = true, - .nv12 = false, - .fp16 = true - }, - - .max_upscale_factor = { - .argb8888 = 16000, - .nv12 = 1, - .fp16 = 1 - }, - - .max_downscale_factor = { - .argb8888 = 250, - .nv12 = 1, - .fp16 = 1 - } -}; - -static const struct dc_debug_options debug_defaults = { - .disable_clock_gate = true, - .enable_legacy_fast_update = true, -}; - -static struct clock_source *dce120_clock_source_create( - struct dc_context *ctx, - struct dc_bios *bios, - enum clock_source_id id, - const struct dce110_clk_src_regs *regs, - bool dp_clk_src) -{ - struct dce110_clk_src *clk_src = - kzalloc(sizeof(*clk_src), GFP_KERNEL); - - if (!clk_src) - return NULL; - - if (dce112_clk_src_construct(clk_src, ctx, bios, id, - regs, &cs_shift, &cs_mask)) { - clk_src->base.dp_clk_src = dp_clk_src; - return &clk_src->base; - } - - kfree(clk_src); - BREAK_TO_DEBUGGER(); - return NULL; -} - -static void dce120_clock_source_destroy(struct clock_source **clk_src) -{ - kfree(TO_DCE110_CLK_SRC(*clk_src)); - *clk_src = NULL; -} - - -static bool dce120_hw_sequencer_create(struct dc *dc) -{ - /* All registers used by dce11.2 match those in dce11 in offset and - * structure - */ - dce120_hw_sequencer_construct(dc); - - /*TODO Move to separate file and Override what is needed */ - - return true; -} - -static struct timing_generator *dce120_timing_generator_create( - struct dc_context *ctx, - uint32_t instance, - const struct dce110_timing_generator_offsets *offsets) -{ - struct dce110_timing_generator *tg110 = - kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); - - if (!tg110) - return NULL; - - dce120_timing_generator_construct(tg110, ctx, instance, offsets); - return &tg110->base; -} - -static void dce120_transform_destroy(struct transform **xfm) -{ - kfree(TO_DCE_TRANSFORM(*xfm)); - *xfm = NULL; -} - -static void dce120_resource_destruct(struct dce110_resource_pool *pool) -{ - unsigned int i; - - for (i = 0; i < pool->base.pipe_count; i++) { - if (pool->base.opps[i] != NULL) - dce110_opp_destroy(&pool->base.opps[i]); - - if (pool->base.transforms[i] != NULL) - dce120_transform_destroy(&pool->base.transforms[i]); - - if (pool->base.ipps[i] != NULL) - dce_ipp_destroy(&pool->base.ipps[i]); - - if (pool->base.mis[i] != NULL) { - kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); - pool->base.mis[i] = NULL; - } - - if (pool->base.irqs != NULL) { - dal_irq_service_destroy(&pool->base.irqs); - } - - if (pool->base.timing_generators[i] != NULL) { - kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); - pool->base.timing_generators[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - if (pool->base.engines[i] != NULL) - dce110_engine_destroy(&pool->base.engines[i]); - if (pool->base.hw_i2cs[i] != NULL) { - kfree(pool->base.hw_i2cs[i]); - pool->base.hw_i2cs[i] = NULL; - } - if (pool->base.sw_i2cs[i] != NULL) { - kfree(pool->base.sw_i2cs[i]); - pool->base.sw_i2cs[i] = NULL; - } - } - - for (i = 0; i < pool->base.audio_count; i++) { - if (pool->base.audios[i]) - dce_aud_destroy(&pool->base.audios[i]); - } - - for (i = 0; i < pool->base.stream_enc_count; i++) { - if (pool->base.stream_enc[i] != NULL) - kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] != NULL) - dce120_clock_source_destroy( - &pool->base.clock_sources[i]); - } - - if (pool->base.dp_clock_source != NULL) - dce120_clock_source_destroy(&pool->base.dp_clock_source); - - if (pool->base.abm != NULL) - dce_abm_destroy(&pool->base.abm); - - if (pool->base.dmcu != NULL) - dce_dmcu_destroy(&pool->base.dmcu); -} - -static void read_dce_straps( - struct dc_context *ctx, - struct resource_straps *straps) -{ - uint32_t reg_val = dm_read_reg_soc15(ctx, mmCC_DC_MISC_STRAPS, 0); - - straps->audio_stream_number = get_reg_field_value(reg_val, - CC_DC_MISC_STRAPS, - AUDIO_STREAM_NUMBER); - straps->hdmi_disable = get_reg_field_value(reg_val, - CC_DC_MISC_STRAPS, - HDMI_DISABLE); - - reg_val = dm_read_reg_soc15(ctx, mmDC_PINSTRAPS, 0); - straps->dc_pinstraps_audio = get_reg_field_value(reg_val, - DC_PINSTRAPS, - DC_PINSTRAPS_AUDIO); -} - -static struct audio *create_audio( - struct dc_context *ctx, unsigned int inst) -{ - return dce_audio_create(ctx, inst, - &audio_regs[inst], &audio_shift, &audio_mask); -} - -static const struct encoder_feature_support link_enc_feature = { - .max_hdmi_deep_color = COLOR_DEPTH_121212, - .max_hdmi_pixel_clock = 600000, - .hdmi_ycbcr420_supported = true, - .dp_ycbcr420_supported = false, - .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_HBR3_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true, - .flags.bits.IS_TPS4_CAPABLE = true, -}; - -static struct link_encoder *dce120_link_encoder_create( - struct dc_context *ctx, - const struct encoder_init_data *enc_init_data) -{ - struct dce110_link_encoder *enc110 = - kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); - int link_regs_id; - - if (!enc110) - return NULL; - - link_regs_id = - map_transmitter_id_to_phy_instance(enc_init_data->transmitter); - - dce110_link_encoder_construct(enc110, - enc_init_data, - &link_enc_feature, - &link_enc_regs[link_regs_id], - &link_enc_aux_regs[enc_init_data->channel - 1], - &link_enc_hpd_regs[enc_init_data->hpd_source]); - - return &enc110->base; -} - -static struct panel_cntl *dce120_panel_cntl_create(const struct panel_cntl_init_data *init_data) -{ - struct dce_panel_cntl *panel_cntl = - kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); - - if (!panel_cntl) - return NULL; - - dce_panel_cntl_construct(panel_cntl, - init_data, - &panel_cntl_regs[init_data->inst], - &panel_cntl_shift, - &panel_cntl_mask); - - return &panel_cntl->base; -} - -static struct input_pixel_processor *dce120_ipp_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); - - if (!ipp) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dce_ipp_construct(ipp, ctx, inst, - &ipp_regs[inst], &ipp_shift, &ipp_mask); - return &ipp->base; -} - -static struct stream_encoder *dce120_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dce110_stream_encoder *enc110 = - kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); - - if (!enc110) - return NULL; - - dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, - &stream_enc_regs[eng_id], - &se_shift, &se_mask); - return &enc110->base; -} - -#define SRII(reg_name, block, id)\ - .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -static const struct dce_hwseq_registers hwseq_reg = { - HWSEQ_DCE120_REG_LIST() -}; - -static const struct dce_hwseq_shift hwseq_shift = { - HWSEQ_DCE12_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_hwseq_mask hwseq_mask = { - HWSEQ_DCE12_MASK_SH_LIST(_MASK) -}; - -/* HWSEQ regs for VG20 */ -static const struct dce_hwseq_registers dce121_hwseq_reg = { - HWSEQ_VG20_REG_LIST() -}; - -static const struct dce_hwseq_shift dce121_hwseq_shift = { - HWSEQ_VG20_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_hwseq_mask dce121_hwseq_mask = { - HWSEQ_VG20_MASK_SH_LIST(_MASK) -}; - -static struct dce_hwseq *dce120_hwseq_create( - struct dc_context *ctx) -{ - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); - - if (hws) { - hws->ctx = ctx; - hws->regs = &hwseq_reg; - hws->shifts = &hwseq_shift; - hws->masks = &hwseq_mask; - } - return hws; -} - -static struct dce_hwseq *dce121_hwseq_create( - struct dc_context *ctx) -{ - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); - - if (hws) { - hws->ctx = ctx; - hws->regs = &dce121_hwseq_reg; - hws->shifts = &dce121_hwseq_shift; - hws->masks = &dce121_hwseq_mask; - } - return hws; -} - -static const struct resource_create_funcs res_create_funcs = { - .read_dce_straps = read_dce_straps, - .create_audio = create_audio, - .create_stream_encoder = dce120_stream_encoder_create, - .create_hwseq = dce120_hwseq_create, -}; - -static const struct resource_create_funcs dce121_res_create_funcs = { - .read_dce_straps = read_dce_straps, - .create_audio = create_audio, - .create_stream_encoder = dce120_stream_encoder_create, - .create_hwseq = dce121_hwseq_create, -}; - - -#define mi_inst_regs(id) { MI_DCE12_REG_LIST(id) } -static const struct dce_mem_input_registers mi_regs[] = { - mi_inst_regs(0), - mi_inst_regs(1), - mi_inst_regs(2), - mi_inst_regs(3), - mi_inst_regs(4), - mi_inst_regs(5), -}; - -static const struct dce_mem_input_shift mi_shifts = { - MI_DCE12_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_mem_input_mask mi_masks = { - MI_DCE12_MASK_SH_LIST(_MASK) -}; - -static struct mem_input *dce120_mem_input_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), - GFP_KERNEL); - - if (!dce_mi) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dce120_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); - return &dce_mi->base; -} - -static struct transform *dce120_transform_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_transform *transform = - kzalloc(sizeof(struct dce_transform), GFP_KERNEL); - - if (!transform) - return NULL; - - dce_transform_construct(transform, ctx, inst, - &xfm_regs[inst], &xfm_shift, &xfm_mask); - transform->lb_memory_size = 0x1404; /*5124*/ - return &transform->base; -} - -static void dce120_destroy_resource_pool(struct resource_pool **pool) -{ - struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); - - dce120_resource_destruct(dce110_pool); - kfree(dce110_pool); - *pool = NULL; -} - -static const struct resource_funcs dce120_res_pool_funcs = { - .destroy = dce120_destroy_resource_pool, - .link_enc_create = dce120_link_encoder_create, - .panel_cntl_create = dce120_panel_cntl_create, - .validate_bandwidth = dce112_validate_bandwidth, - .validate_plane = dce100_validate_plane, - .add_stream_to_ctx = dce112_add_stream_to_ctx, - .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link -}; - -static void bw_calcs_data_update_from_pplib(struct dc *dc) -{ - struct dm_pp_clock_levels_with_latency eng_clks = {0}; - struct dm_pp_clock_levels_with_latency mem_clks = {0}; - struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0}; - int i; - unsigned int clk; - unsigned int latency; - /*original logic in dal3*/ - int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ; - - /*do system clock*/ - if (!dm_pp_get_clock_levels_by_type_with_latency( - dc->ctx, - DM_PP_CLOCK_TYPE_ENGINE_CLK, - &eng_clks) || eng_clks.num_levels == 0) { - - eng_clks.num_levels = 8; - clk = 300000; - - for (i = 0; i < eng_clks.num_levels; i++) { - eng_clks.data[i].clocks_in_khz = clk; - clk += 100000; - } - } - - /* convert all the clock fro kHz to fix point mHz TODO: wloop data */ - dc->bw_vbios->high_sclk = bw_frc_to_fixed( - eng_clks.data[eng_clks.num_levels-1].clocks_in_khz, 1000); - dc->bw_vbios->mid1_sclk = bw_frc_to_fixed( - eng_clks.data[eng_clks.num_levels/8].clocks_in_khz, 1000); - dc->bw_vbios->mid2_sclk = bw_frc_to_fixed( - eng_clks.data[eng_clks.num_levels*2/8].clocks_in_khz, 1000); - dc->bw_vbios->mid3_sclk = bw_frc_to_fixed( - eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz, 1000); - dc->bw_vbios->mid4_sclk = bw_frc_to_fixed( - eng_clks.data[eng_clks.num_levels*4/8].clocks_in_khz, 1000); - dc->bw_vbios->mid5_sclk = bw_frc_to_fixed( - eng_clks.data[eng_clks.num_levels*5/8].clocks_in_khz, 1000); - dc->bw_vbios->mid6_sclk = bw_frc_to_fixed( - eng_clks.data[eng_clks.num_levels*6/8].clocks_in_khz, 1000); - dc->bw_vbios->low_sclk = bw_frc_to_fixed( - eng_clks.data[0].clocks_in_khz, 1000); - - /*do memory clock*/ - if (!dm_pp_get_clock_levels_by_type_with_latency( - dc->ctx, - DM_PP_CLOCK_TYPE_MEMORY_CLK, - &mem_clks) || mem_clks.num_levels == 0) { - - mem_clks.num_levels = 3; - clk = 250000; - latency = 45; - - for (i = 0; i < eng_clks.num_levels; i++) { - mem_clks.data[i].clocks_in_khz = clk; - mem_clks.data[i].latency_in_us = latency; - clk += 500000; - latency -= 5; - } - - } - - /* we don't need to call PPLIB for validation clock since they - * also give us the highest sclk and highest mclk (UMA clock). - * ALSO always convert UMA clock (from PPLIB) to YCLK (HW formula): - * YCLK = UMACLK*m_memoryTypeMultiplier - */ - if (dc->bw_vbios->memory_type == bw_def_hbm) - memory_type_multiplier = MEMORY_TYPE_HBM; - - dc->bw_vbios->low_yclk = bw_frc_to_fixed( - mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000); - dc->bw_vbios->mid_yclk = bw_frc_to_fixed( - mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier, - 1000); - dc->bw_vbios->high_yclk = bw_frc_to_fixed( - mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier, - 1000); - - /* Now notify PPLib/SMU about which Watermarks sets they should select - * depending on DPM state they are in. And update BW MGR GFX Engine and - * Memory clock member variables for Watermarks calculations for each - * Watermark Set - */ - clk_ranges.num_wm_sets = 4; - clk_ranges.wm_clk_ranges[0].wm_set_id = WM_SET_A; - clk_ranges.wm_clk_ranges[0].wm_min_eng_clk_in_khz = - eng_clks.data[0].clocks_in_khz; - clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz = - eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1; - clk_ranges.wm_clk_ranges[0].wm_min_mem_clk_in_khz = - mem_clks.data[0].clocks_in_khz; - clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz = - mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1; - - clk_ranges.wm_clk_ranges[1].wm_set_id = WM_SET_B; - clk_ranges.wm_clk_ranges[1].wm_min_eng_clk_in_khz = - eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz; - /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */ - clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000; - clk_ranges.wm_clk_ranges[1].wm_min_mem_clk_in_khz = - mem_clks.data[0].clocks_in_khz; - clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz = - mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1; - - clk_ranges.wm_clk_ranges[2].wm_set_id = WM_SET_C; - clk_ranges.wm_clk_ranges[2].wm_min_eng_clk_in_khz = - eng_clks.data[0].clocks_in_khz; - clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz = - eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1; - clk_ranges.wm_clk_ranges[2].wm_min_mem_clk_in_khz = - mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz; - /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */ - clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000; - - clk_ranges.wm_clk_ranges[3].wm_set_id = WM_SET_D; - clk_ranges.wm_clk_ranges[3].wm_min_eng_clk_in_khz = - eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz; - /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */ - clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000; - clk_ranges.wm_clk_ranges[3].wm_min_mem_clk_in_khz = - mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz; - /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */ - clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000; - - /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ - dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges); -} - -static uint32_t read_pipe_fuses(struct dc_context *ctx) -{ - uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0); - /* VG20 support max 6 pipes */ - value = value & 0x3f; - return value; -} - -static bool dce120_resource_construct( - uint8_t num_virtual_links, - struct dc *dc, - struct dce110_resource_pool *pool) -{ - unsigned int i; - int j; - struct dc_context *ctx = dc->ctx; - struct irq_service_init_data irq_init_data; - static const struct resource_create_funcs *res_funcs; - bool is_vg20 = ASICREV_IS_VEGA20_P(ctx->asic_id.hw_internal_rev); - uint32_t pipe_fuses; - - ctx->dc_bios->regs = &bios_regs; - - pool->base.res_cap = &res_cap; - pool->base.funcs = &dce120_res_pool_funcs; - - /* TODO: Fill more data from GreenlandAsicCapability.cpp */ - pool->base.pipe_count = res_cap.num_timing_generator; - pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - - dc->caps.max_downscale_ratio = 200; - dc->caps.i2c_speed_in_khz = 100; - dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a not applied by default*/ - dc->caps.max_cursor_size = 128; - dc->caps.min_horizontal_blanking_period = 80; - dc->caps.dual_link_dvi = true; - dc->caps.psp_setup_panel_mode = true; - dc->caps.extended_aux_timeout_support = false; - dc->debug = debug_defaults; - - /************************************************* - * Create resources * - *************************************************/ - - pool->base.clock_sources[DCE120_CLK_SRC_PLL0] = - dce120_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL0, - &clk_src_regs[0], false); - pool->base.clock_sources[DCE120_CLK_SRC_PLL1] = - dce120_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL1, - &clk_src_regs[1], false); - pool->base.clock_sources[DCE120_CLK_SRC_PLL2] = - dce120_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL2, - &clk_src_regs[2], false); - pool->base.clock_sources[DCE120_CLK_SRC_PLL3] = - dce120_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL3, - &clk_src_regs[3], false); - pool->base.clock_sources[DCE120_CLK_SRC_PLL4] = - dce120_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL4, - &clk_src_regs[4], false); - pool->base.clock_sources[DCE120_CLK_SRC_PLL5] = - dce120_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL5, - &clk_src_regs[5], false); - pool->base.clk_src_count = DCE120_CLK_SRC_TOTAL; - - pool->base.dp_clock_source = - dce120_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_ID_DP_DTO, - &clk_src_regs[0], true); - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - BREAK_TO_DEBUGGER(); - goto clk_src_create_fail; - } - } - - pool->base.dmcu = dce_dmcu_create(ctx, - &dmcu_regs, - &dmcu_shift, - &dmcu_mask); - if (pool->base.dmcu == NULL) { - dm_error("DC: failed to create dmcu!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - - pool->base.abm = dce_abm_create(ctx, - &abm_regs, - &abm_shift, - &abm_mask); - if (pool->base.abm == NULL) { - dm_error("DC: failed to create abm!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - - - irq_init_data.ctx = dc->ctx; - pool->base.irqs = dal_irq_service_dce120_create(&irq_init_data); - if (!pool->base.irqs) - goto irqs_create_fail; - - /* VG20: Pipe harvesting enabled, retrieve valid pipe fuses */ - if (is_vg20) - pipe_fuses = read_pipe_fuses(ctx); - - /* index to valid pipe resource */ - j = 0; - for (i = 0; i < pool->base.pipe_count; i++) { - if (is_vg20) { - if ((pipe_fuses & (1 << i)) != 0) { - dm_error("DC: skip invalid pipe %d!\n", i); - continue; - } - } - - pool->base.timing_generators[j] = - dce120_timing_generator_create( - ctx, - i, - &dce120_tg_offsets[i]); - if (pool->base.timing_generators[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto controller_create_fail; - } - - pool->base.mis[j] = dce120_mem_input_create(ctx, i); - - if (pool->base.mis[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create memory input!\n"); - goto controller_create_fail; - } - - pool->base.ipps[j] = dce120_ipp_create(ctx, i); - if (pool->base.ipps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create input pixel processor!\n"); - goto controller_create_fail; - } - - pool->base.transforms[j] = dce120_transform_create(ctx, i); - if (pool->base.transforms[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create transform!\n"); - goto res_create_fail; - } - - pool->base.opps[j] = dce120_opp_create( - ctx, - i); - if (pool->base.opps[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create output pixel processor!\n"); - } - - /* check next valid pipe */ - j++; - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - pool->base.engines[i] = dce120_aux_engine_create(ctx, i); - if (pool->base.engines[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create aux engine!!\n"); - goto res_create_fail; - } - pool->base.hw_i2cs[i] = dce120_i2c_hw_create(ctx, i); - if (pool->base.hw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create i2c engine!!\n"); - goto res_create_fail; - } - pool->base.sw_i2cs[i] = NULL; - } - - /* valid pipe num */ - pool->base.pipe_count = j; - pool->base.timing_generator_count = j; - - if (is_vg20) - res_funcs = &dce121_res_create_funcs; - else - res_funcs = &res_create_funcs; - - if (!resource_construct(num_virtual_links, dc, &pool->base, res_funcs)) - goto res_create_fail; - - /* Create hardware sequencer */ - if (!dce120_hw_sequencer_create(dc)) - goto controller_create_fail; - - dc->caps.max_planes = pool->base.pipe_count; - - for (i = 0; i < dc->caps.max_planes; ++i) - dc->caps.planes[i] = plane_cap; - - bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id); - - bw_calcs_data_update_from_pplib(dc); - - return true; - -irqs_create_fail: -controller_create_fail: -clk_src_create_fail: -res_create_fail: - - dce120_resource_destruct(pool); - - return false; -} - -struct resource_pool *dce120_create_resource_pool( - uint8_t num_virtual_links, - struct dc *dc) -{ - struct dce110_resource_pool *pool = - kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); - - if (!pool) - return NULL; - - if (dce120_resource_construct(num_virtual_links, dc, pool)) - return &pool->base; - - kfree(pool); - BREAK_TO_DEBUGGER(); - return NULL; -} diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h deleted file mode 100644 index 3d1f3cf012..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h +++ /dev/null @@ -1,39 +0,0 @@ -/* -* Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_RESOURCE_DCE120_H__ -#define __DC_RESOURCE_DCE120_H__ - -#include "core_types.h" - -struct dc; -struct resource_pool; - -struct resource_pool *dce120_create_resource_pool( - uint8_t num_virtual_links, - struct dc *dc); - -#endif /* __DC_RESOURCE_DCE120_H__ */ - diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile index fee331accc..eede83ad91 100644 --- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile @@ -23,7 +23,7 @@ # Makefile for the 'controller' sub-component of DAL. # It provides the control and status of HW CRTC block. -CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init) +CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = -Wno-override-init DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \ dce60_resource.o diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile index 93dd68c312..fba189d266 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile @@ -23,10 +23,9 @@ # Makefile for the 'controller' sub-component of DAL. # It provides the control and status of HW CRTC block. -CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = $(call cc-disable-warning, override-init) +CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = -Wno-override-init -DCE80 = dce80_timing_generator.o \ - dce80_resource.o +DCE80 = dce80_timing_generator.o AMD_DAL_DCE80 = $(addprefix $(AMDDALPATH)/dc/dce80/,$(DCE80)) diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c deleted file mode 100644 index 35a2cce0c2..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ /dev/null @@ -1,1544 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dce/dce_8_0_d.h" -#include "dce/dce_8_0_sh_mask.h" - -#include "dm_services.h" - -#include "link_encoder.h" -#include "stream_encoder.h" - -#include "resource.h" -#include "include/irq_service_interface.h" -#include "irq/dce80/irq_service_dce80.h" -#include "dce110/dce110_timing_generator.h" -#include "dce110/dce110_resource.h" -#include "dce80/dce80_timing_generator.h" -#include "dce/dce_mem_input.h" -#include "dce/dce_link_encoder.h" -#include "dce/dce_stream_encoder.h" -#include "dce/dce_ipp.h" -#include "dce/dce_transform.h" -#include "dce/dce_opp.h" -#include "dce/dce_clock_source.h" -#include "dce/dce_audio.h" -#include "dce/dce_hwseq.h" -#include "dce80/dce80_hwseq.h" -#include "dce100/dce100_resource.h" -#include "dce/dce_panel_cntl.h" - -#include "reg_helper.h" - -#include "dce/dce_dmcu.h" -#include "dce/dce_aux.h" -#include "dce/dce_abm.h" -#include "dce/dce_i2c.h" -/* TODO remove this include */ - -#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT -#include "gmc/gmc_7_1_d.h" -#include "gmc/gmc_7_1_sh_mask.h" -#endif - -#include "dce80/dce80_resource.h" - -#ifndef mmDP_DPHY_INTERNAL_CTRL -#define mmDP_DPHY_INTERNAL_CTRL 0x1CDE -#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x1CDE -#define mmDP1_DP_DPHY_INTERNAL_CTRL 0x1FDE -#define mmDP2_DP_DPHY_INTERNAL_CTRL 0x42DE -#define mmDP3_DP_DPHY_INTERNAL_CTRL 0x45DE -#define mmDP4_DP_DPHY_INTERNAL_CTRL 0x48DE -#define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4BDE -#define mmDP6_DP_DPHY_INTERNAL_CTRL 0x4EDE -#endif - - -#ifndef mmBIOS_SCRATCH_2 - #define mmBIOS_SCRATCH_2 0x05CB - #define mmBIOS_SCRATCH_3 0x05CC - #define mmBIOS_SCRATCH_6 0x05CF -#endif - -#ifndef mmDP_DPHY_FAST_TRAINING - #define mmDP_DPHY_FAST_TRAINING 0x1CCE - #define mmDP0_DP_DPHY_FAST_TRAINING 0x1CCE - #define mmDP1_DP_DPHY_FAST_TRAINING 0x1FCE - #define mmDP2_DP_DPHY_FAST_TRAINING 0x42CE - #define mmDP3_DP_DPHY_FAST_TRAINING 0x45CE - #define mmDP4_DP_DPHY_FAST_TRAINING 0x48CE - #define mmDP5_DP_DPHY_FAST_TRAINING 0x4BCE - #define mmDP6_DP_DPHY_FAST_TRAINING 0x4ECE -#endif - - -#ifndef mmHPD_DC_HPD_CONTROL - #define mmHPD_DC_HPD_CONTROL 0x189A - #define mmHPD0_DC_HPD_CONTROL 0x189A - #define mmHPD1_DC_HPD_CONTROL 0x18A2 - #define mmHPD2_DC_HPD_CONTROL 0x18AA - #define mmHPD3_DC_HPD_CONTROL 0x18B2 - #define mmHPD4_DC_HPD_CONTROL 0x18BA - #define mmHPD5_DC_HPD_CONTROL 0x18C2 -#endif - -#define DCE11_DIG_FE_CNTL 0x4a00 -#define DCE11_DIG_BE_CNTL 0x4a47 -#define DCE11_DP_SEC 0x4ac3 - -static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = { - { - .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmGRPH_CONTROL - mmGRPH_CONTROL), - .dmif = (mmDMIF_PG0_DPG_WATERMARK_MASK_CONTROL - - mmDPG_WATERMARK_MASK_CONTROL), - }, - { - .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL), - .dmif = (mmDMIF_PG1_DPG_WATERMARK_MASK_CONTROL - - mmDPG_WATERMARK_MASK_CONTROL), - }, - { - .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL), - .dmif = (mmDMIF_PG2_DPG_WATERMARK_MASK_CONTROL - - mmDPG_WATERMARK_MASK_CONTROL), - }, - { - .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL), - .dmif = (mmDMIF_PG3_DPG_WATERMARK_MASK_CONTROL - - mmDPG_WATERMARK_MASK_CONTROL), - }, - { - .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL), - .dmif = (mmDMIF_PG4_DPG_WATERMARK_MASK_CONTROL - - mmDPG_WATERMARK_MASK_CONTROL), - }, - { - .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL), - .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL), - .dmif = (mmDMIF_PG5_DPG_WATERMARK_MASK_CONTROL - - mmDPG_WATERMARK_MASK_CONTROL), - } -}; - -/* set register offset */ -#define SR(reg_name)\ - .reg_name = mm ## reg_name - -/* set register offset with instance */ -#define SRI(reg_name, block, id)\ - .reg_name = mm ## block ## id ## _ ## reg_name - -#define ipp_regs(id)\ -[id] = {\ - IPP_COMMON_REG_LIST_DCE_BASE(id)\ -} - -static const struct dce_ipp_registers ipp_regs[] = { - ipp_regs(0), - ipp_regs(1), - ipp_regs(2), - ipp_regs(3), - ipp_regs(4), - ipp_regs(5) -}; - -static const struct dce_ipp_shift ipp_shift = { - IPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) -}; - -static const struct dce_ipp_mask ipp_mask = { - IPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) -}; - -#define transform_regs(id)\ -[id] = {\ - XFM_COMMON_REG_LIST_DCE80(id)\ -} - -static const struct dce_transform_registers xfm_regs[] = { - transform_regs(0), - transform_regs(1), - transform_regs(2), - transform_regs(3), - transform_regs(4), - transform_regs(5) -}; - -static const struct dce_transform_shift xfm_shift = { - XFM_COMMON_MASK_SH_LIST_DCE80(__SHIFT) -}; - -static const struct dce_transform_mask xfm_mask = { - XFM_COMMON_MASK_SH_LIST_DCE80(_MASK) -}; - -#define aux_regs(id)\ -[id] = {\ - AUX_REG_LIST(id)\ -} - -static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { - aux_regs(0), - aux_regs(1), - aux_regs(2), - aux_regs(3), - aux_regs(4), - aux_regs(5) -}; - -#define hpd_regs(id)\ -[id] = {\ - HPD_REG_LIST(id)\ -} - -static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { - hpd_regs(0), - hpd_regs(1), - hpd_regs(2), - hpd_regs(3), - hpd_regs(4), - hpd_regs(5) -}; - -#define link_regs(id)\ -[id] = {\ - LE_DCE80_REG_LIST(id)\ -} - -static const struct dce110_link_enc_registers link_enc_regs[] = { - link_regs(0), - link_regs(1), - link_regs(2), - link_regs(3), - link_regs(4), - link_regs(5), - link_regs(6), -}; - -#define stream_enc_regs(id)\ -[id] = {\ - SE_COMMON_REG_LIST_DCE_BASE(id),\ - .AFMT_CNTL = 0,\ -} - -static const struct dce110_stream_enc_registers stream_enc_regs[] = { - stream_enc_regs(0), - stream_enc_regs(1), - stream_enc_regs(2), - stream_enc_regs(3), - stream_enc_regs(4), - stream_enc_regs(5), - stream_enc_regs(6) -}; - -static const struct dce_stream_encoder_shift se_shift = { - SE_COMMON_MASK_SH_LIST_DCE80_100(__SHIFT) -}; - -static const struct dce_stream_encoder_mask se_mask = { - SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK) -}; - -static const struct dce_panel_cntl_registers panel_cntl_regs[] = { - { DCE_PANEL_CNTL_REG_LIST() } -}; - -static const struct dce_panel_cntl_shift panel_cntl_shift = { - DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_panel_cntl_mask panel_cntl_mask = { - DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) -}; - -#define opp_regs(id)\ -[id] = {\ - OPP_DCE_80_REG_LIST(id),\ -} - -static const struct dce_opp_registers opp_regs[] = { - opp_regs(0), - opp_regs(1), - opp_regs(2), - opp_regs(3), - opp_regs(4), - opp_regs(5) -}; - -static const struct dce_opp_shift opp_shift = { - OPP_COMMON_MASK_SH_LIST_DCE_80(__SHIFT) -}; - -static const struct dce_opp_mask opp_mask = { - OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK) -}; - -static const struct dce110_aux_registers_shift aux_shift = { - DCE10_AUX_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce110_aux_registers_mask aux_mask = { - DCE10_AUX_MASK_SH_LIST(_MASK) -}; - -#define aux_engine_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST(id), \ - .AUX_RESET_MASK = 0 \ -} - -static const struct dce110_aux_registers aux_engine_regs[] = { - aux_engine_regs(0), - aux_engine_regs(1), - aux_engine_regs(2), - aux_engine_regs(3), - aux_engine_regs(4), - aux_engine_regs(5) -}; - -#define audio_regs(id)\ -[id] = {\ - AUD_COMMON_REG_LIST(id)\ -} - -static const struct dce_audio_registers audio_regs[] = { - audio_regs(0), - audio_regs(1), - audio_regs(2), - audio_regs(3), - audio_regs(4), - audio_regs(5), - audio_regs(6), -}; - -static const struct dce_audio_shift audio_shift = { - AUD_COMMON_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_audio_mask audio_mask = { - AUD_COMMON_MASK_SH_LIST(_MASK) -}; - -#define clk_src_regs(id)\ -[id] = {\ - CS_COMMON_REG_LIST_DCE_80(id),\ -} - - -static const struct dce110_clk_src_regs clk_src_regs[] = { - clk_src_regs(0), - clk_src_regs(1), - clk_src_regs(2) -}; - -static const struct dce110_clk_src_shift cs_shift = { - CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) -}; - -static const struct dce110_clk_src_mask cs_mask = { - CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) -}; - -static const struct bios_registers bios_regs = { - .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, - .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 -}; - -static const struct resource_caps res_cap = { - .num_timing_generator = 6, - .num_audio = 6, - .num_stream_encoder = 6, - .num_pll = 3, - .num_ddc = 6, -}; - -static const struct resource_caps res_cap_81 = { - .num_timing_generator = 4, - .num_audio = 7, - .num_stream_encoder = 7, - .num_pll = 3, - .num_ddc = 6, -}; - -static const struct resource_caps res_cap_83 = { - .num_timing_generator = 2, - .num_audio = 6, - .num_stream_encoder = 6, - .num_pll = 2, - .num_ddc = 2, -}; - -static const struct dc_plane_cap plane_cap = { - .type = DC_PLANE_TYPE_DCE_RGB, - - .pixel_format_support = { - .argb8888 = true, - .nv12 = false, - .fp16 = true - }, - - .max_upscale_factor = { - .argb8888 = 16000, - .nv12 = 1, - .fp16 = 1 - }, - - .max_downscale_factor = { - .argb8888 = 250, - .nv12 = 1, - .fp16 = 1 - } -}; - -static const struct dc_debug_options debug_defaults = { - .enable_legacy_fast_update = true, -}; - -static const struct dce_dmcu_registers dmcu_regs = { - DMCU_DCE80_REG_LIST() -}; - -static const struct dce_dmcu_shift dmcu_shift = { - DMCU_MASK_SH_LIST_DCE80(__SHIFT) -}; - -static const struct dce_dmcu_mask dmcu_mask = { - DMCU_MASK_SH_LIST_DCE80(_MASK) -}; -static const struct dce_abm_registers abm_regs = { - ABM_DCE110_COMMON_REG_LIST() -}; - -static const struct dce_abm_shift abm_shift = { - ABM_MASK_SH_LIST_DCE110(__SHIFT) -}; - -static const struct dce_abm_mask abm_mask = { - ABM_MASK_SH_LIST_DCE110(_MASK) -}; - -#define CTX ctx -#define REG(reg) mm ## reg - -#ifndef mmCC_DC_HDMI_STRAPS -#define mmCC_DC_HDMI_STRAPS 0x1918 -#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40 -#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6 -#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700 -#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 -#endif - -static int map_transmitter_id_to_phy_instance( - enum transmitter transmitter) -{ - switch (transmitter) { - case TRANSMITTER_UNIPHY_A: - return 0; - case TRANSMITTER_UNIPHY_B: - return 1; - case TRANSMITTER_UNIPHY_C: - return 2; - case TRANSMITTER_UNIPHY_D: - return 3; - case TRANSMITTER_UNIPHY_E: - return 4; - case TRANSMITTER_UNIPHY_F: - return 5; - case TRANSMITTER_UNIPHY_G: - return 6; - default: - ASSERT(0); - return 0; - } -} - -static void read_dce_straps( - struct dc_context *ctx, - struct resource_straps *straps) -{ - REG_GET_2(CC_DC_HDMI_STRAPS, - HDMI_DISABLE, &straps->hdmi_disable, - AUDIO_STREAM_NUMBER, &straps->audio_stream_number); - - REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio); -} - -static struct audio *create_audio( - struct dc_context *ctx, unsigned int inst) -{ - return dce_audio_create(ctx, inst, - &audio_regs[inst], &audio_shift, &audio_mask); -} - -static struct timing_generator *dce80_timing_generator_create( - struct dc_context *ctx, - uint32_t instance, - const struct dce110_timing_generator_offsets *offsets) -{ - struct dce110_timing_generator *tg110 = - kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); - - if (!tg110) - return NULL; - - dce80_timing_generator_construct(tg110, ctx, instance, offsets); - return &tg110->base; -} - -static struct output_pixel_processor *dce80_opp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce110_opp *opp = - kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); - - if (!opp) - return NULL; - - dce110_opp_construct(opp, - ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); - return &opp->base; -} - -static struct dce_aux *dce80_aux_engine_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); - - if (!aux_engine) - return NULL; - - dce110_aux_engine_construct(aux_engine, ctx, inst, - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst], - &aux_mask, - &aux_shift, - ctx->dc->caps.extended_aux_timeout_support); - - return &aux_engine->base; -} -#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } - -static const struct dce_i2c_registers i2c_hw_regs[] = { - i2c_inst_regs(1), - i2c_inst_regs(2), - i2c_inst_regs(3), - i2c_inst_regs(4), - i2c_inst_regs(5), - i2c_inst_regs(6), -}; - -static const struct dce_i2c_shift i2c_shifts = { - I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) -}; - -static const struct dce_i2c_mask i2c_masks = { - I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) -}; - -static struct dce_i2c_hw *dce80_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); - - if (!dce_i2c_hw) - return NULL; - - dce_i2c_hw_construct(dce_i2c_hw, ctx, inst, - &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); - - return dce_i2c_hw; -} - -static struct dce_i2c_sw *dce80_i2c_sw_create( - struct dc_context *ctx) -{ - struct dce_i2c_sw *dce_i2c_sw = - kzalloc(sizeof(struct dce_i2c_sw), GFP_KERNEL); - - if (!dce_i2c_sw) - return NULL; - - dce_i2c_sw_construct(dce_i2c_sw, ctx); - - return dce_i2c_sw; -} -static struct stream_encoder *dce80_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dce110_stream_encoder *enc110 = - kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); - - if (!enc110) - return NULL; - - dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, - &stream_enc_regs[eng_id], - &se_shift, &se_mask); - return &enc110->base; -} - -#define SRII(reg_name, block, id)\ - .reg_name[id] = mm ## block ## id ## _ ## reg_name - -static const struct dce_hwseq_registers hwseq_reg = { - HWSEQ_DCE8_REG_LIST() -}; - -static const struct dce_hwseq_shift hwseq_shift = { - HWSEQ_DCE8_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_hwseq_mask hwseq_mask = { - HWSEQ_DCE8_MASK_SH_LIST(_MASK) -}; - -static struct dce_hwseq *dce80_hwseq_create( - struct dc_context *ctx) -{ - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); - - if (hws) { - hws->ctx = ctx; - hws->regs = &hwseq_reg; - hws->shifts = &hwseq_shift; - hws->masks = &hwseq_mask; - } - return hws; -} - -static const struct resource_create_funcs res_create_funcs = { - .read_dce_straps = read_dce_straps, - .create_audio = create_audio, - .create_stream_encoder = dce80_stream_encoder_create, - .create_hwseq = dce80_hwseq_create, -}; - -#define mi_inst_regs(id) { \ - MI_DCE8_REG_LIST(id), \ - .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \ -} -static const struct dce_mem_input_registers mi_regs[] = { - mi_inst_regs(0), - mi_inst_regs(1), - mi_inst_regs(2), - mi_inst_regs(3), - mi_inst_regs(4), - mi_inst_regs(5), -}; - -static const struct dce_mem_input_shift mi_shifts = { - MI_DCE8_MASK_SH_LIST(__SHIFT), - .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT -}; - -static const struct dce_mem_input_mask mi_masks = { - MI_DCE8_MASK_SH_LIST(_MASK), - .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK -}; - -static struct mem_input *dce80_mem_input_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), - GFP_KERNEL); - - if (!dce_mi) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); - dce_mi->wa.single_head_rdreq_dmif_limit = 2; - return &dce_mi->base; -} - -static void dce80_transform_destroy(struct transform **xfm) -{ - kfree(TO_DCE_TRANSFORM(*xfm)); - *xfm = NULL; -} - -static struct transform *dce80_transform_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_transform *transform = - kzalloc(sizeof(struct dce_transform), GFP_KERNEL); - - if (!transform) - return NULL; - - dce_transform_construct(transform, ctx, inst, - &xfm_regs[inst], &xfm_shift, &xfm_mask); - transform->prescaler_on = false; - return &transform->base; -} - -static const struct encoder_feature_support link_enc_feature = { - .max_hdmi_deep_color = COLOR_DEPTH_121212, - .max_hdmi_pixel_clock = 297000, - .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true -}; - -static struct link_encoder *dce80_link_encoder_create( - struct dc_context *ctx, - const struct encoder_init_data *enc_init_data) -{ - struct dce110_link_encoder *enc110 = - kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); - int link_regs_id; - - if (!enc110) - return NULL; - - link_regs_id = - map_transmitter_id_to_phy_instance(enc_init_data->transmitter); - - dce110_link_encoder_construct(enc110, - enc_init_data, - &link_enc_feature, - &link_enc_regs[link_regs_id], - &link_enc_aux_regs[enc_init_data->channel - 1], - &link_enc_hpd_regs[enc_init_data->hpd_source]); - return &enc110->base; -} - -static struct panel_cntl *dce80_panel_cntl_create(const struct panel_cntl_init_data *init_data) -{ - struct dce_panel_cntl *panel_cntl = - kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); - - if (!panel_cntl) - return NULL; - - dce_panel_cntl_construct(panel_cntl, - init_data, - &panel_cntl_regs[init_data->inst], - &panel_cntl_shift, - &panel_cntl_mask); - - return &panel_cntl->base; -} - -static struct clock_source *dce80_clock_source_create( - struct dc_context *ctx, - struct dc_bios *bios, - enum clock_source_id id, - const struct dce110_clk_src_regs *regs, - bool dp_clk_src) -{ - struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); - - if (!clk_src) - return NULL; - - if (dce110_clk_src_construct(clk_src, ctx, bios, id, - regs, &cs_shift, &cs_mask)) { - clk_src->base.dp_clk_src = dp_clk_src; - return &clk_src->base; - } - - kfree(clk_src); - BREAK_TO_DEBUGGER(); - return NULL; -} - -static void dce80_clock_source_destroy(struct clock_source **clk_src) -{ - kfree(TO_DCE110_CLK_SRC(*clk_src)); - *clk_src = NULL; -} - -static struct input_pixel_processor *dce80_ipp_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); - - if (!ipp) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dce_ipp_construct(ipp, ctx, inst, - &ipp_regs[inst], &ipp_shift, &ipp_mask); - return &ipp->base; -} - -static void dce80_resource_destruct(struct dce110_resource_pool *pool) -{ - unsigned int i; - - for (i = 0; i < pool->base.pipe_count; i++) { - if (pool->base.opps[i] != NULL) - dce110_opp_destroy(&pool->base.opps[i]); - - if (pool->base.transforms[i] != NULL) - dce80_transform_destroy(&pool->base.transforms[i]); - - if (pool->base.ipps[i] != NULL) - dce_ipp_destroy(&pool->base.ipps[i]); - - if (pool->base.mis[i] != NULL) { - kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); - pool->base.mis[i] = NULL; - } - - if (pool->base.timing_generators[i] != NULL) { - kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); - pool->base.timing_generators[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - if (pool->base.engines[i] != NULL) - dce110_engine_destroy(&pool->base.engines[i]); - if (pool->base.hw_i2cs[i] != NULL) { - kfree(pool->base.hw_i2cs[i]); - pool->base.hw_i2cs[i] = NULL; - } - if (pool->base.sw_i2cs[i] != NULL) { - kfree(pool->base.sw_i2cs[i]); - pool->base.sw_i2cs[i] = NULL; - } - } - - for (i = 0; i < pool->base.stream_enc_count; i++) { - if (pool->base.stream_enc[i] != NULL) - kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] != NULL) { - dce80_clock_source_destroy(&pool->base.clock_sources[i]); - } - } - - if (pool->base.abm != NULL) - dce_abm_destroy(&pool->base.abm); - - if (pool->base.dmcu != NULL) - dce_dmcu_destroy(&pool->base.dmcu); - - if (pool->base.dp_clock_source != NULL) - dce80_clock_source_destroy(&pool->base.dp_clock_source); - - for (i = 0; i < pool->base.audio_count; i++) { - if (pool->base.audios[i] != NULL) { - dce_aud_destroy(&pool->base.audios[i]); - } - } - - if (pool->base.irqs != NULL) { - dal_irq_service_destroy(&pool->base.irqs); - } -} - -static bool dce80_validate_bandwidth( - struct dc *dc, - struct dc_state *context, - bool fast_validate) -{ - int i; - bool at_least_one_pipe = false; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - if (context->res_ctx.pipe_ctx[i].stream) - at_least_one_pipe = true; - } - - if (at_least_one_pipe) { - /* TODO implement when needed but for now hardcode max value*/ - context->bw_ctx.bw.dce.dispclk_khz = 681000; - context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; - } else { - context->bw_ctx.bw.dce.dispclk_khz = 0; - context->bw_ctx.bw.dce.yclk_khz = 0; - } - - return true; -} - -static bool dce80_validate_surface_sets( - struct dc_state *context) -{ - int i; - - for (i = 0; i < context->stream_count; i++) { - if (context->stream_status[i].plane_count == 0) - continue; - - if (context->stream_status[i].plane_count > 1) - return false; - - if (context->stream_status[i].plane_states[0]->format - >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) - return false; - } - - return true; -} - -static enum dc_status dce80_validate_global( - struct dc *dc, - struct dc_state *context) -{ - if (!dce80_validate_surface_sets(context)) - return DC_FAIL_SURFACE_VALIDATE; - - return DC_OK; -} - -static void dce80_destroy_resource_pool(struct resource_pool **pool) -{ - struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); - - dce80_resource_destruct(dce110_pool); - kfree(dce110_pool); - *pool = NULL; -} - -static const struct resource_funcs dce80_res_pool_funcs = { - .destroy = dce80_destroy_resource_pool, - .link_enc_create = dce80_link_encoder_create, - .panel_cntl_create = dce80_panel_cntl_create, - .validate_bandwidth = dce80_validate_bandwidth, - .validate_plane = dce100_validate_plane, - .add_stream_to_ctx = dce100_add_stream_to_ctx, - .validate_global = dce80_validate_global, - .find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link -}; - -static bool dce80_construct( - uint8_t num_virtual_links, - struct dc *dc, - struct dce110_resource_pool *pool) -{ - unsigned int i; - struct dc_context *ctx = dc->ctx; - struct dc_bios *bp; - - ctx->dc_bios->regs = &bios_regs; - - pool->base.res_cap = &res_cap; - pool->base.funcs = &dce80_res_pool_funcs; - - - /************************************************* - * Resource + asic cap harcoding * - *************************************************/ - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - pool->base.pipe_count = res_cap.num_timing_generator; - pool->base.timing_generator_count = res_cap.num_timing_generator; - dc->caps.max_downscale_ratio = 200; - dc->caps.i2c_speed_in_khz = 40; - dc->caps.i2c_speed_in_khz_hdcp = 40; - dc->caps.max_cursor_size = 128; - dc->caps.min_horizontal_blanking_period = 80; - dc->caps.dual_link_dvi = true; - dc->caps.extended_aux_timeout_support = false; - dc->debug = debug_defaults; - - /************************************************* - * Create resources * - *************************************************/ - - bp = ctx->dc_bios; - - if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { - pool->base.dp_clock_source = - dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); - - pool->base.clock_sources[0] = - dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); - pool->base.clock_sources[1] = - dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); - pool->base.clock_sources[2] = - dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); - pool->base.clk_src_count = 3; - - } else { - pool->base.dp_clock_source = - dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); - - pool->base.clock_sources[0] = - dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); - pool->base.clock_sources[1] = - dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); - pool->base.clk_src_count = 2; - } - - if (pool->base.dp_clock_source == NULL) { - dm_error("DC: failed to create dp clock source!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - } - - pool->base.dmcu = dce_dmcu_create(ctx, - &dmcu_regs, - &dmcu_shift, - &dmcu_mask); - if (pool->base.dmcu == NULL) { - dm_error("DC: failed to create dmcu!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - - pool->base.abm = dce_abm_create(ctx, - &abm_regs, - &abm_shift, - &abm_mask); - if (pool->base.abm == NULL) { - dm_error("DC: failed to create abm!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - - { - struct irq_service_init_data init_data; - init_data.ctx = dc->ctx; - pool->base.irqs = dal_irq_service_dce80_create(&init_data); - if (!pool->base.irqs) - goto res_create_fail; - } - - for (i = 0; i < pool->base.pipe_count; i++) { - pool->base.timing_generators[i] = dce80_timing_generator_create( - ctx, i, &dce80_tg_offsets[i]); - if (pool->base.timing_generators[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto res_create_fail; - } - - pool->base.mis[i] = dce80_mem_input_create(ctx, i); - if (pool->base.mis[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create memory input!\n"); - goto res_create_fail; - } - - pool->base.ipps[i] = dce80_ipp_create(ctx, i); - if (pool->base.ipps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create input pixel processor!\n"); - goto res_create_fail; - } - - pool->base.transforms[i] = dce80_transform_create(ctx, i); - if (pool->base.transforms[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create transform!\n"); - goto res_create_fail; - } - - pool->base.opps[i] = dce80_opp_create(ctx, i); - if (pool->base.opps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create output pixel processor!\n"); - goto res_create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - pool->base.engines[i] = dce80_aux_engine_create(ctx, i); - if (pool->base.engines[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create aux engine!!\n"); - goto res_create_fail; - } - pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i); - if (pool->base.hw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create i2c engine!!\n"); - goto res_create_fail; - } - pool->base.sw_i2cs[i] = dce80_i2c_sw_create(ctx); - if (pool->base.sw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create sw i2c!!\n"); - goto res_create_fail; - } - } - - dc->caps.max_planes = pool->base.pipe_count; - - for (i = 0; i < dc->caps.max_planes; ++i) - dc->caps.planes[i] = plane_cap; - - dc->caps.disable_dp_clk_share = true; - - if (!resource_construct(num_virtual_links, dc, &pool->base, - &res_create_funcs)) - goto res_create_fail; - - /* Create hardware sequencer */ - dce80_hw_sequencer_construct(dc); - - return true; - -res_create_fail: - dce80_resource_destruct(pool); - return false; -} - -struct resource_pool *dce80_create_resource_pool( - uint8_t num_virtual_links, - struct dc *dc) -{ - struct dce110_resource_pool *pool = - kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); - - if (!pool) - return NULL; - - if (dce80_construct(num_virtual_links, dc, pool)) - return &pool->base; - - kfree(pool); - BREAK_TO_DEBUGGER(); - return NULL; -} - -static bool dce81_construct( - uint8_t num_virtual_links, - struct dc *dc, - struct dce110_resource_pool *pool) -{ - unsigned int i; - struct dc_context *ctx = dc->ctx; - struct dc_bios *bp; - - ctx->dc_bios->regs = &bios_regs; - - pool->base.res_cap = &res_cap_81; - pool->base.funcs = &dce80_res_pool_funcs; - - - /************************************************* - * Resource + asic cap harcoding * - *************************************************/ - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - pool->base.pipe_count = res_cap_81.num_timing_generator; - pool->base.timing_generator_count = res_cap_81.num_timing_generator; - dc->caps.max_downscale_ratio = 200; - dc->caps.i2c_speed_in_khz = 40; - dc->caps.i2c_speed_in_khz_hdcp = 40; - dc->caps.max_cursor_size = 128; - dc->caps.min_horizontal_blanking_period = 80; - dc->caps.is_apu = true; - - /************************************************* - * Create resources * - *************************************************/ - - bp = ctx->dc_bios; - - if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { - pool->base.dp_clock_source = - dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); - - pool->base.clock_sources[0] = - dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); - pool->base.clock_sources[1] = - dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); - pool->base.clock_sources[2] = - dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); - pool->base.clk_src_count = 3; - - } else { - pool->base.dp_clock_source = - dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); - - pool->base.clock_sources[0] = - dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); - pool->base.clock_sources[1] = - dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); - pool->base.clk_src_count = 2; - } - - if (pool->base.dp_clock_source == NULL) { - dm_error("DC: failed to create dp clock source!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - } - - pool->base.dmcu = dce_dmcu_create(ctx, - &dmcu_regs, - &dmcu_shift, - &dmcu_mask); - if (pool->base.dmcu == NULL) { - dm_error("DC: failed to create dmcu!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - - pool->base.abm = dce_abm_create(ctx, - &abm_regs, - &abm_shift, - &abm_mask); - if (pool->base.abm == NULL) { - dm_error("DC: failed to create abm!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - - { - struct irq_service_init_data init_data; - init_data.ctx = dc->ctx; - pool->base.irqs = dal_irq_service_dce80_create(&init_data); - if (!pool->base.irqs) - goto res_create_fail; - } - - for (i = 0; i < pool->base.pipe_count; i++) { - pool->base.timing_generators[i] = dce80_timing_generator_create( - ctx, i, &dce80_tg_offsets[i]); - if (pool->base.timing_generators[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto res_create_fail; - } - - pool->base.mis[i] = dce80_mem_input_create(ctx, i); - if (pool->base.mis[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create memory input!\n"); - goto res_create_fail; - } - - pool->base.ipps[i] = dce80_ipp_create(ctx, i); - if (pool->base.ipps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create input pixel processor!\n"); - goto res_create_fail; - } - - pool->base.transforms[i] = dce80_transform_create(ctx, i); - if (pool->base.transforms[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create transform!\n"); - goto res_create_fail; - } - - pool->base.opps[i] = dce80_opp_create(ctx, i); - if (pool->base.opps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create output pixel processor!\n"); - goto res_create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - pool->base.engines[i] = dce80_aux_engine_create(ctx, i); - if (pool->base.engines[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create aux engine!!\n"); - goto res_create_fail; - } - pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i); - if (pool->base.hw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create i2c engine!!\n"); - goto res_create_fail; - } - pool->base.sw_i2cs[i] = dce80_i2c_sw_create(ctx); - if (pool->base.sw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create sw i2c!!\n"); - goto res_create_fail; - } - } - - dc->caps.max_planes = pool->base.pipe_count; - - for (i = 0; i < dc->caps.max_planes; ++i) - dc->caps.planes[i] = plane_cap; - - dc->caps.disable_dp_clk_share = true; - - if (!resource_construct(num_virtual_links, dc, &pool->base, - &res_create_funcs)) - goto res_create_fail; - - /* Create hardware sequencer */ - dce80_hw_sequencer_construct(dc); - - return true; - -res_create_fail: - dce80_resource_destruct(pool); - return false; -} - -struct resource_pool *dce81_create_resource_pool( - uint8_t num_virtual_links, - struct dc *dc) -{ - struct dce110_resource_pool *pool = - kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); - - if (!pool) - return NULL; - - if (dce81_construct(num_virtual_links, dc, pool)) - return &pool->base; - - kfree(pool); - BREAK_TO_DEBUGGER(); - return NULL; -} - -static bool dce83_construct( - uint8_t num_virtual_links, - struct dc *dc, - struct dce110_resource_pool *pool) -{ - unsigned int i; - struct dc_context *ctx = dc->ctx; - struct dc_bios *bp; - - ctx->dc_bios->regs = &bios_regs; - - pool->base.res_cap = &res_cap_83; - pool->base.funcs = &dce80_res_pool_funcs; - - - /************************************************* - * Resource + asic cap harcoding * - *************************************************/ - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - pool->base.pipe_count = res_cap_83.num_timing_generator; - pool->base.timing_generator_count = res_cap_83.num_timing_generator; - dc->caps.max_downscale_ratio = 200; - dc->caps.i2c_speed_in_khz = 40; - dc->caps.i2c_speed_in_khz_hdcp = 40; - dc->caps.max_cursor_size = 128; - dc->caps.min_horizontal_blanking_period = 80; - dc->caps.is_apu = true; - dc->debug = debug_defaults; - - /************************************************* - * Create resources * - *************************************************/ - - bp = ctx->dc_bios; - - if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { - pool->base.dp_clock_source = - dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); - - pool->base.clock_sources[0] = - dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], false); - pool->base.clock_sources[1] = - dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false); - pool->base.clk_src_count = 2; - - } else { - pool->base.dp_clock_source = - dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], true); - - pool->base.clock_sources[0] = - dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false); - pool->base.clk_src_count = 1; - } - - if (pool->base.dp_clock_source == NULL) { - dm_error("DC: failed to create dp clock source!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - } - - pool->base.dmcu = dce_dmcu_create(ctx, - &dmcu_regs, - &dmcu_shift, - &dmcu_mask); - if (pool->base.dmcu == NULL) { - dm_error("DC: failed to create dmcu!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - - pool->base.abm = dce_abm_create(ctx, - &abm_regs, - &abm_shift, - &abm_mask); - if (pool->base.abm == NULL) { - dm_error("DC: failed to create abm!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - - { - struct irq_service_init_data init_data; - init_data.ctx = dc->ctx; - pool->base.irqs = dal_irq_service_dce80_create(&init_data); - if (!pool->base.irqs) - goto res_create_fail; - } - - for (i = 0; i < pool->base.pipe_count; i++) { - pool->base.timing_generators[i] = dce80_timing_generator_create( - ctx, i, &dce80_tg_offsets[i]); - if (pool->base.timing_generators[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto res_create_fail; - } - - pool->base.mis[i] = dce80_mem_input_create(ctx, i); - if (pool->base.mis[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create memory input!\n"); - goto res_create_fail; - } - - pool->base.ipps[i] = dce80_ipp_create(ctx, i); - if (pool->base.ipps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create input pixel processor!\n"); - goto res_create_fail; - } - - pool->base.transforms[i] = dce80_transform_create(ctx, i); - if (pool->base.transforms[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create transform!\n"); - goto res_create_fail; - } - - pool->base.opps[i] = dce80_opp_create(ctx, i); - if (pool->base.opps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create output pixel processor!\n"); - goto res_create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - pool->base.engines[i] = dce80_aux_engine_create(ctx, i); - if (pool->base.engines[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create aux engine!!\n"); - goto res_create_fail; - } - pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i); - if (pool->base.hw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create i2c engine!!\n"); - goto res_create_fail; - } - pool->base.sw_i2cs[i] = dce80_i2c_sw_create(ctx); - if (pool->base.sw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create sw i2c!!\n"); - goto res_create_fail; - } - } - - dc->caps.max_planes = pool->base.pipe_count; - - for (i = 0; i < dc->caps.max_planes; ++i) - dc->caps.planes[i] = plane_cap; - - dc->caps.disable_dp_clk_share = true; - - if (!resource_construct(num_virtual_links, dc, &pool->base, - &res_create_funcs)) - goto res_create_fail; - - /* Create hardware sequencer */ - dce80_hw_sequencer_construct(dc); - - return true; - -res_create_fail: - dce80_resource_destruct(pool); - return false; -} - -struct resource_pool *dce83_create_resource_pool( - uint8_t num_virtual_links, - struct dc *dc) -{ - struct dce110_resource_pool *pool = - kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); - - if (!pool) - return NULL; - - if (dce83_construct(num_virtual_links, dc, pool)) - return &pool->base; - - BREAK_TO_DEBUGGER(); - return NULL; -} diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h deleted file mode 100644 index eff31ab83a..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h +++ /dev/null @@ -1,47 +0,0 @@ -/* -* Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_RESOURCE_DCE80_H__ -#define __DC_RESOURCE_DCE80_H__ - -#include "core_types.h" - -struct dc; -struct resource_pool; - -struct resource_pool *dce80_create_resource_pool( - uint8_t num_virtual_links, - struct dc *dc); - -struct resource_pool *dce81_create_resource_pool( - uint8_t num_virtual_links, - struct dc *dc); - -struct resource_pool *dce83_create_resource_pool( - uint8_t num_virtual_links, - struct dc *dc); - -#endif /* __DC_RESOURCE_DCE80_H__ */ - diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile index 2d2007c3e2..ae6a131be7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile @@ -22,9 +22,9 @@ # # Makefile for DCN. -DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o \ +DCN10 = dcn10_ipp.o \ dcn10_hw_sequencer_debug.o \ - dcn10_dpp.o dcn10_opp.o dcn10_optc.o \ + dcn10_dpp.o dcn10_opp.o \ dcn10_hubp.o dcn10_mpc.o \ dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \ dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c index 92fdab731f..9033b39e0e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c @@ -32,7 +32,7 @@ #include "dce/dce_hwseq.h" #include "abm.h" #include "dmcu.h" -#include "dcn10_optc.h" +#include "dcn10/dcn10_optc.h" #include "dcn10/dcn10_dpp.h" #include "dcn10/dcn10_mpc.h" #include "timing_generator.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c deleted file mode 100644 index a5bdac79a7..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright 2016-2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "hw_sequencer_private.h" -#include "dce110/dce110_hwseq.h" -#include "dcn10/dcn10_hwseq.h" -#include "dcn20/dcn20_hwseq.h" - -static const struct hw_sequencer_funcs dcn10_funcs = { - .program_gamut_remap = dcn10_program_gamut_remap, - .init_hw = dcn10_init_hw, - .power_down_on_boot = dcn10_power_down_on_boot, - .apply_ctx_to_hw = dce110_apply_ctx_to_hw, - .apply_ctx_for_surface = NULL, - .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, - .post_unlock_program_front_end = dcn10_post_unlock_program_front_end, - .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, - .update_plane_addr = dcn10_update_plane_addr, - .update_dchub = dcn10_update_dchub, - .update_pending_status = dcn10_update_pending_status, - .program_output_csc = dcn10_program_output_csc, - .enable_accelerated_mode = dce110_enable_accelerated_mode, - .enable_timing_synchronization = dcn10_enable_timing_synchronization, - .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, - .update_info_frame = dce110_update_info_frame, - .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, - .enable_stream = dce110_enable_stream, - .disable_stream = dce110_disable_stream, - .unblank_stream = dcn10_unblank_stream, - .blank_stream = dce110_blank_stream, - .enable_audio_stream = dce110_enable_audio_stream, - .disable_audio_stream = dce110_disable_audio_stream, - .disable_plane = dcn10_disable_plane, - .pipe_control_lock = dcn10_pipe_control_lock, - .cursor_lock = dcn10_cursor_lock, - .interdependent_update_lock = dcn10_lock_all_pipes, - .prepare_bandwidth = dcn10_prepare_bandwidth, - .optimize_bandwidth = dcn10_optimize_bandwidth, - .set_drr = dcn10_set_drr, - .get_position = dcn10_get_position, - .set_static_screen_control = dcn10_set_static_screen_control, - .setup_stereo = dcn10_setup_stereo, - .set_avmute = dce110_set_avmute, - .log_hw_state = dcn10_log_hw_state, - .get_hw_state = dcn10_get_hw_state, - .clear_status_bits = dcn10_clear_status_bits, - .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, - .edp_backlight_control = dce110_edp_backlight_control, - .edp_power_control = dce110_edp_power_control, - .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, - .set_cursor_position = dcn10_set_cursor_position, - .set_cursor_attribute = dcn10_set_cursor_attribute, - .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, - .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, - .set_clock = dcn10_set_clock, - .get_clock = dcn10_get_clock, - .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, - .calc_vupdate_position = dcn10_calc_vupdate_position, - .power_down = dce110_power_down, - .set_backlight_level = dce110_set_backlight_level, - .set_abm_immediate_disable = dce110_set_abm_immediate_disable, - .set_pipe = dce110_set_pipe, - .enable_lvds_link_output = dce110_enable_lvds_link_output, - .enable_tmds_link_output = dce110_enable_tmds_link_output, - .enable_dp_link_output = dce110_enable_dp_link_output, - .disable_link_output = dce110_disable_link_output, - .get_dcc_en_bits = dcn10_get_dcc_en_bits, - .update_visual_confirm_color = dcn10_update_visual_confirm_color, -}; - -static const struct hwseq_private_funcs dcn10_private_funcs = { - .init_pipes = dcn10_init_pipes, - .update_plane_addr = dcn10_update_plane_addr, - .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, - .program_pipe = dcn10_program_pipe, - .update_mpcc = dcn10_update_mpcc, - .set_input_transfer_func = dcn10_set_input_transfer_func, - .set_output_transfer_func = dcn10_set_output_transfer_func, - .power_down = dce110_power_down, - .enable_display_power_gating = dcn10_dummy_display_power_gating, - .blank_pixel_data = dcn10_blank_pixel_data, - .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap, - .enable_stream_timing = dcn10_enable_stream_timing, - .edp_backlight_control = dce110_edp_backlight_control, - .disable_stream_gating = NULL, - .enable_stream_gating = NULL, - .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt, - .did_underflow_occur = dcn10_did_underflow_occur, - .init_blank = NULL, - .disable_vga = dcn10_disable_vga, - .bios_golden_init = dcn10_bios_golden_init, - .plane_atomic_disable = dcn10_plane_atomic_disable, - .plane_atomic_power_down = dcn10_plane_atomic_power_down, - .enable_power_gating_plane = dcn10_enable_power_gating_plane, - .dpp_pg_control = dcn10_dpp_pg_control, - .hubp_pg_control = dcn10_hubp_pg_control, - .dsc_pg_control = NULL, - .set_hdr_multiplier = dcn10_set_hdr_multiplier, - .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, -}; - -void dcn10_hw_sequencer_construct(struct dc *dc) -{ - dc->hwss = dcn10_funcs; - dc->hwseq->funcs = dcn10_private_funcs; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h deleted file mode 100644 index 8c6fd7b844..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_DCN10_INIT_H__ -#define __DC_DCN10_INIT_H__ - -struct dc; - -void dcn10_hw_sequencer_construct(struct dc *dc); - -#endif /* __DC_DCN10_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c index 0dec576792..86bfed5dea 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c @@ -377,6 +377,7 @@ static const struct opp_funcs dcn10_opp_funcs = { .opp_set_disp_pattern_generator = NULL, .opp_program_dpg_dimensions = NULL, .dpg_is_blanked = NULL, + .dpg_is_pending = NULL, .opp_destroy = opp1_destroy }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c deleted file mode 100644 index 0e8f4f36c8..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ /dev/null @@ -1,1621 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - - -#include "reg_helper.h" -#include "dcn10_optc.h" -#include "dc.h" -#include "dc_trace.h" - -#define REG(reg)\ - optc1->tg_regs->reg - -#define CTX \ - optc1->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - optc1->tg_shift->field_name, optc1->tg_mask->field_name - -#define STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN 0x100 - -/** - * apply_front_porch_workaround() - This is a workaround for a bug that has - * existed since R5xx and has not been fixed - * keep Front porch at minimum 2 for Interlaced - * mode or 1 for progressive. - * - * @timing: Timing parameters used to configure DCN blocks. - */ -static void apply_front_porch_workaround(struct dc_crtc_timing *timing) -{ - if (timing->flags.INTERLACE == 1) { - if (timing->v_front_porch < 2) - timing->v_front_porch = 2; - } else { - if (timing->v_front_porch < 1) - timing->v_front_porch = 1; - } -} - -void optc1_program_global_sync( - struct timing_generator *optc, - int vready_offset, - int vstartup_start, - int vupdate_offset, - int vupdate_width) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - optc1->vready_offset = vready_offset; - optc1->vstartup_start = vstartup_start; - optc1->vupdate_offset = vupdate_offset; - optc1->vupdate_width = vupdate_width; - - if (optc1->vstartup_start == 0) { - BREAK_TO_DEBUGGER(); - return; - } - - REG_SET(OTG_VSTARTUP_PARAM, 0, - VSTARTUP_START, optc1->vstartup_start); - - REG_SET_2(OTG_VUPDATE_PARAM, 0, - VUPDATE_OFFSET, optc1->vupdate_offset, - VUPDATE_WIDTH, optc1->vupdate_width); - - REG_SET(OTG_VREADY_PARAM, 0, - VREADY_OFFSET, optc1->vready_offset); -} - -static void optc1_disable_stereo(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET(OTG_STEREO_CONTROL, 0, - OTG_STEREO_EN, 0); - - REG_SET_2(OTG_3D_STRUCTURE_CONTROL, 0, - OTG_3D_STRUCTURE_EN, 0, - OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0); -} - -void optc1_setup_vertical_interrupt0( - struct timing_generator *optc, - uint32_t start_line, - uint32_t end_line) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET_2(OTG_VERTICAL_INTERRUPT0_POSITION, 0, - OTG_VERTICAL_INTERRUPT0_LINE_START, start_line, - OTG_VERTICAL_INTERRUPT0_LINE_END, end_line); -} - -void optc1_setup_vertical_interrupt1( - struct timing_generator *optc, - uint32_t start_line) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET(OTG_VERTICAL_INTERRUPT1_POSITION, 0, - OTG_VERTICAL_INTERRUPT1_LINE_START, start_line); -} - -void optc1_setup_vertical_interrupt2( - struct timing_generator *optc, - uint32_t start_line) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0, - OTG_VERTICAL_INTERRUPT2_LINE_START, start_line); -} - -/** - * optc1_program_timing() - used by mode timing set Program - * CRTC Timing Registers - OTG_H_*, - * OTG_V_*, Pixel repetition. - * Including SYNC. Call BIOS command table to program Timings. - * - * @optc: timing_generator instance. - * @dc_crtc_timing: Timing parameters used to configure DCN blocks. - * @vready_offset: Vready's starting position. - * @vstartup_start: Vstartup period. - * @vupdate_offset: Vupdate starting position. - * @vupdate_width: Vupdate duration. - * @signal: DC signal types. - * @use_vbios: to program timings from BIOS command table. - * - */ -void optc1_program_timing( - struct timing_generator *optc, - const struct dc_crtc_timing *dc_crtc_timing, - int vready_offset, - int vstartup_start, - int vupdate_offset, - int vupdate_width, - const enum signal_type signal, - bool use_vbios) -{ - struct dc_crtc_timing patched_crtc_timing; - uint32_t asic_blank_end; - uint32_t asic_blank_start; - uint32_t v_total; - uint32_t v_sync_end; - uint32_t h_sync_polarity, v_sync_polarity; - uint32_t start_point = 0; - uint32_t field_num = 0; - enum h_timing_div_mode h_div = H_TIMING_NO_DIV; - - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - optc1->signal = signal; - optc1->vready_offset = vready_offset; - optc1->vstartup_start = vstartup_start; - optc1->vupdate_offset = vupdate_offset; - optc1->vupdate_width = vupdate_width; - patched_crtc_timing = *dc_crtc_timing; - apply_front_porch_workaround(&patched_crtc_timing); - optc1->orginal_patched_timing = patched_crtc_timing; - - /* Load horizontal timing */ - - /* CRTC_H_TOTAL = vesa.h_total - 1 */ - REG_SET(OTG_H_TOTAL, 0, - OTG_H_TOTAL, patched_crtc_timing.h_total - 1); - - /* h_sync_start = 0, h_sync_end = vesa.h_sync_width */ - REG_UPDATE_2(OTG_H_SYNC_A, - OTG_H_SYNC_A_START, 0, - OTG_H_SYNC_A_END, patched_crtc_timing.h_sync_width); - - /* blank_start = line end - front porch */ - asic_blank_start = patched_crtc_timing.h_total - - patched_crtc_timing.h_front_porch; - - /* blank_end = blank_start - active */ - asic_blank_end = asic_blank_start - - patched_crtc_timing.h_border_right - - patched_crtc_timing.h_addressable - - patched_crtc_timing.h_border_left; - - REG_UPDATE_2(OTG_H_BLANK_START_END, - OTG_H_BLANK_START, asic_blank_start, - OTG_H_BLANK_END, asic_blank_end); - - /* h_sync polarity */ - h_sync_polarity = patched_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? - 0 : 1; - - REG_UPDATE(OTG_H_SYNC_A_CNTL, - OTG_H_SYNC_A_POL, h_sync_polarity); - - v_total = patched_crtc_timing.v_total - 1; - - REG_SET(OTG_V_TOTAL, 0, - OTG_V_TOTAL, v_total); - - /* In case of V_TOTAL_CONTROL is on, make sure OTG_V_TOTAL_MAX and - * OTG_V_TOTAL_MIN are equal to V_TOTAL. - */ - optc->funcs->set_vtotal_min_max(optc, v_total, v_total); - - /* v_sync_start = 0, v_sync_end = v_sync_width */ - v_sync_end = patched_crtc_timing.v_sync_width; - - REG_UPDATE_2(OTG_V_SYNC_A, - OTG_V_SYNC_A_START, 0, - OTG_V_SYNC_A_END, v_sync_end); - - /* blank_start = frame end - front porch */ - asic_blank_start = patched_crtc_timing.v_total - - patched_crtc_timing.v_front_porch; - - /* blank_end = blank_start - active */ - asic_blank_end = asic_blank_start - - patched_crtc_timing.v_border_bottom - - patched_crtc_timing.v_addressable - - patched_crtc_timing.v_border_top; - - REG_UPDATE_2(OTG_V_BLANK_START_END, - OTG_V_BLANK_START, asic_blank_start, - OTG_V_BLANK_END, asic_blank_end); - - /* v_sync polarity */ - v_sync_polarity = patched_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? - 0 : 1; - - REG_UPDATE(OTG_V_SYNC_A_CNTL, - OTG_V_SYNC_A_POL, v_sync_polarity); - - if (optc1->signal == SIGNAL_TYPE_DISPLAY_PORT || - optc1->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || - optc1->signal == SIGNAL_TYPE_EDP) { - start_point = 1; - if (patched_crtc_timing.flags.INTERLACE == 1) - field_num = 1; - } - - /* Interlace */ - if (REG(OTG_INTERLACE_CONTROL)) { - if (patched_crtc_timing.flags.INTERLACE == 1) - REG_UPDATE(OTG_INTERLACE_CONTROL, - OTG_INTERLACE_ENABLE, 1); - else - REG_UPDATE(OTG_INTERLACE_CONTROL, - OTG_INTERLACE_ENABLE, 0); - } - - /* VTG enable set to 0 first VInit */ - REG_UPDATE(CONTROL, - VTG0_ENABLE, 0); - - /* original code is using VTG offset to address OTG reg, seems wrong */ - REG_UPDATE_2(OTG_CONTROL, - OTG_START_POINT_CNTL, start_point, - OTG_FIELD_NUMBER_CNTL, field_num); - - optc->funcs->program_global_sync(optc, - vready_offset, - vstartup_start, - vupdate_offset, - vupdate_width); - - optc->funcs->set_vtg_params(optc, dc_crtc_timing, true); - - /* TODO - * patched_crtc_timing.flags.HORZ_COUNT_BY_TWO == 1 - * program_horz_count_by_2 - * for DVI 30bpp mode, 0 otherwise - * program_horz_count_by_2(optc, &patched_crtc_timing); - */ - - /* Enable stereo - only when we need to pack 3D frame. Other types - * of stereo handled in explicit call - */ - - if (optc1_is_two_pixels_per_containter(&patched_crtc_timing) || optc1->opp_count == 2) - h_div = H_TIMING_DIV_BY2; - - if (REG(OPTC_DATA_FORMAT_CONTROL) && optc1->tg_mask->OPTC_DATA_FORMAT != 0) { - uint32_t data_fmt = 0; - - if (patched_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) - data_fmt = 1; - else if (patched_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) - data_fmt = 2; - - REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt); - } - - if (optc1->tg_mask->OTG_H_TIMING_DIV_MODE != 0) { - if (optc1->opp_count == 4) - h_div = H_TIMING_DIV_BY4; - - REG_UPDATE(OTG_H_TIMING_CNTL, - OTG_H_TIMING_DIV_MODE, h_div); - } else { - REG_UPDATE(OTG_H_TIMING_CNTL, - OTG_H_TIMING_DIV_BY2, h_div); - } -} - -/** - * optc1_set_vtg_params - Set Vertical Timing Generator (VTG) parameters - * - * @optc: timing_generator struct used to extract the optc parameters - * @dc_crtc_timing: Timing parameters configured - * @program_fp2: Boolean value indicating if FP2 will be programmed or not - * - * OTG is responsible for generating the global sync signals, including - * vertical timing information for each HUBP in the dcfclk domain. Each VTG is - * associated with one OTG that provides HUBP with vertical timing information - * (i.e., there is 1:1 correspondence between OTG and VTG). This function is - * responsible for setting the OTG parameters to the VTG during the pipe - * programming. - */ -void optc1_set_vtg_params(struct timing_generator *optc, - const struct dc_crtc_timing *dc_crtc_timing, bool program_fp2) -{ - struct dc_crtc_timing patched_crtc_timing; - uint32_t asic_blank_end; - uint32_t v_init; - uint32_t v_fp2 = 0; - int32_t vertical_line_start; - - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - patched_crtc_timing = *dc_crtc_timing; - apply_front_porch_workaround(&patched_crtc_timing); - - /* VCOUNT_INIT is the start of blank */ - v_init = patched_crtc_timing.v_total - patched_crtc_timing.v_front_porch; - - /* end of blank = v_init - active */ - asic_blank_end = v_init - - patched_crtc_timing.v_border_bottom - - patched_crtc_timing.v_addressable - - patched_crtc_timing.v_border_top; - - /* if VSTARTUP is before VSYNC, FP2 is the offset, otherwise 0 */ - vertical_line_start = asic_blank_end - optc1->vstartup_start + 1; - if (vertical_line_start < 0) - v_fp2 = -vertical_line_start; - - /* Interlace */ - if (REG(OTG_INTERLACE_CONTROL)) { - if (patched_crtc_timing.flags.INTERLACE == 1) { - v_init = v_init / 2; - if ((optc1->vstartup_start/2)*2 > asic_blank_end) - v_fp2 = v_fp2 / 2; - } - } - - if (program_fp2) - REG_UPDATE_2(CONTROL, - VTG0_FP2, v_fp2, - VTG0_VCOUNT_INIT, v_init); - else - REG_UPDATE(CONTROL, VTG0_VCOUNT_INIT, v_init); -} - -void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - uint32_t blank_data_double_buffer_enable = enable ? 1 : 0; - - REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL, - OTG_BLANK_DATA_DOUBLE_BUFFER_EN, blank_data_double_buffer_enable); -} - -/** - * optc1_set_timing_double_buffer() - DRR double buffering control - * - * Sets double buffer point for V_TOTAL, H_TOTAL, VTOTAL_MIN, - * VTOTAL_MAX, VTOTAL_MIN_SEL and VTOTAL_MAX_SEL registers. - * - * @optc: timing_generator instance. - * @enable: Enable DRR double buffering control if true, disable otherwise. - * - * Options: any time, start of frame, dp start of frame (range timing) - */ -void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - uint32_t mode = enable ? 2 : 0; - - REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL, - OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mode); -} - -/** - * optc1_unblank_crtc() - Call ASIC Control Object to UnBlank CRTC. - * - * @optc: timing_generator instance. - */ -static void optc1_unblank_crtc(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_UPDATE_2(OTG_BLANK_CONTROL, - OTG_BLANK_DATA_EN, 0, - OTG_BLANK_DE_MODE, 0); - - /* W/A for automated testing - * Automated testing will fail underflow test as there - * sporadic underflows which occur during the optc blank - * sequence. As a w/a, clear underflow on unblank. - * This prevents the failure, but will not mask actual - * underflow that affect real use cases. - */ - optc1_clear_optc_underflow(optc); -} - -/** - * optc1_blank_crtc() - Call ASIC Control Object to Blank CRTC. - * - * @optc: timing_generator instance. - */ - -static void optc1_blank_crtc(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_UPDATE_2(OTG_BLANK_CONTROL, - OTG_BLANK_DATA_EN, 1, - OTG_BLANK_DE_MODE, 0); - - optc1_set_blank_data_double_buffer(optc, false); -} - -void optc1_set_blank(struct timing_generator *optc, - bool enable_blanking) -{ - if (enable_blanking) - optc1_blank_crtc(optc); - else - optc1_unblank_crtc(optc); -} - -bool optc1_is_blanked(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - uint32_t blank_en; - uint32_t blank_state; - - REG_GET_2(OTG_BLANK_CONTROL, - OTG_BLANK_DATA_EN, &blank_en, - OTG_CURRENT_BLANK_STATE, &blank_state); - - return blank_en && blank_state; -} - -void optc1_enable_optc_clock(struct timing_generator *optc, bool enable) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - if (enable) { - REG_UPDATE_2(OPTC_INPUT_CLOCK_CONTROL, - OPTC_INPUT_CLK_EN, 1, - OPTC_INPUT_CLK_GATE_DIS, 1); - - REG_WAIT(OPTC_INPUT_CLOCK_CONTROL, - OPTC_INPUT_CLK_ON, 1, - 1, 1000); - - /* Enable clock */ - REG_UPDATE_2(OTG_CLOCK_CONTROL, - OTG_CLOCK_EN, 1, - OTG_CLOCK_GATE_DIS, 1); - REG_WAIT(OTG_CLOCK_CONTROL, - OTG_CLOCK_ON, 1, - 1, 1000); - } else { - - //last chance to clear underflow, otherwise, it will always there due to clock is off. - if (optc->funcs->is_optc_underflow_occurred(optc) == true) - optc->funcs->clear_optc_underflow(optc); - - REG_UPDATE_2(OTG_CLOCK_CONTROL, - OTG_CLOCK_GATE_DIS, 0, - OTG_CLOCK_EN, 0); - - REG_UPDATE_2(OPTC_INPUT_CLOCK_CONTROL, - OPTC_INPUT_CLK_GATE_DIS, 0, - OPTC_INPUT_CLK_EN, 0); - } -} - -/** - * optc1_enable_crtc() - Enable CRTC - call ASIC Control Object to enable Timing generator. - * - * @optc: timing_generator instance. - */ -static bool optc1_enable_crtc(struct timing_generator *optc) -{ - /* TODO FPGA wait for answer - * OTG_MASTER_UPDATE_MODE != CRTC_MASTER_UPDATE_MODE - * OTG_MASTER_UPDATE_LOCK != CRTC_MASTER_UPDATE_LOCK - */ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - /* opp instance for OTG. For DCN1.0, ODM is remoed. - * OPP and OPTC should 1:1 mapping - */ - REG_UPDATE(OPTC_DATA_SOURCE_SELECT, - OPTC_SRC_SEL, optc->inst); - - /* VTG enable first is for HW workaround */ - REG_UPDATE(CONTROL, - VTG0_ENABLE, 1); - - REG_SEQ_START(); - - /* Enable CRTC */ - REG_UPDATE_2(OTG_CONTROL, - OTG_DISABLE_POINT_CNTL, 3, - OTG_MASTER_EN, 1); - - REG_SEQ_SUBMIT(); - REG_SEQ_WAIT_DONE(); - - return true; -} - -/* disable_crtc - call ASIC Control Object to disable Timing generator. */ -bool optc1_disable_crtc(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - /* disable otg request until end of the first line - * in the vertical blank region - */ - REG_UPDATE_2(OTG_CONTROL, - OTG_DISABLE_POINT_CNTL, 3, - OTG_MASTER_EN, 0); - - REG_UPDATE(CONTROL, - VTG0_ENABLE, 0); - - /* CRTC disabled, so disable clock. */ - REG_WAIT(OTG_CLOCK_CONTROL, - OTG_BUSY, 0, - 1, 100000); - - return true; -} - - -void optc1_program_blank_color( - struct timing_generator *optc, - const struct tg_color *black_color) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET_3(OTG_BLACK_COLOR, 0, - OTG_BLACK_COLOR_B_CB, black_color->color_b_cb, - OTG_BLACK_COLOR_G_Y, black_color->color_g_y, - OTG_BLACK_COLOR_R_CR, black_color->color_r_cr); -} - -bool optc1_validate_timing( - struct timing_generator *optc, - const struct dc_crtc_timing *timing) -{ - uint32_t v_blank; - uint32_t h_blank; - uint32_t min_v_blank; - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - ASSERT(timing != NULL); - - v_blank = (timing->v_total - timing->v_addressable - - timing->v_border_top - timing->v_border_bottom); - - h_blank = (timing->h_total - timing->h_addressable - - timing->h_border_right - - timing->h_border_left); - - if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE && - timing->timing_3d_format != TIMING_3D_FORMAT_HW_FRAME_PACKING && - timing->timing_3d_format != TIMING_3D_FORMAT_TOP_AND_BOTTOM && - timing->timing_3d_format != TIMING_3D_FORMAT_SIDE_BY_SIDE && - timing->timing_3d_format != TIMING_3D_FORMAT_FRAME_ALTERNATE && - timing->timing_3d_format != TIMING_3D_FORMAT_INBAND_FA) - return false; - - /* Temporarily blocking interlacing mode until it's supported */ - if (timing->flags.INTERLACE == 1) - return false; - - /* Check maximum number of pixels supported by Timing Generator - * (Currently will never fail, in order to fail needs display which - * needs more than 8192 horizontal and - * more than 8192 vertical total pixels) - */ - if (timing->h_total > optc1->max_h_total || - timing->v_total > optc1->max_v_total) - return false; - - - if (h_blank < optc1->min_h_blank) - return false; - - if (timing->h_sync_width < optc1->min_h_sync_width || - timing->v_sync_width < optc1->min_v_sync_width) - return false; - - min_v_blank = timing->flags.INTERLACE?optc1->min_v_blank_interlace:optc1->min_v_blank; - - if (v_blank < min_v_blank) - return false; - - return true; - -} - -/* - * get_vblank_counter - * - * @brief - * Get counter for vertical blanks. use register CRTC_STATUS_FRAME_COUNT which - * holds the counter of frames. - * - * @param - * struct timing_generator *optc - [in] timing generator which controls the - * desired CRTC - * - * @return - * Counter of frames, which should equal to number of vblanks. - */ -uint32_t optc1_get_vblank_counter(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - uint32_t frame_count; - - REG_GET(OTG_STATUS_FRAME_COUNT, - OTG_FRAME_COUNT, &frame_count); - - return frame_count; -} - -void optc1_lock(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET(OTG_GLOBAL_CONTROL0, 0, - OTG_MASTER_UPDATE_LOCK_SEL, optc->inst); - REG_SET(OTG_MASTER_UPDATE_LOCK, 0, - OTG_MASTER_UPDATE_LOCK, 1); - - REG_WAIT(OTG_MASTER_UPDATE_LOCK, - UPDATE_LOCK_STATUS, 1, - 1, 10); - - TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true); -} - -void optc1_unlock(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET(OTG_MASTER_UPDATE_LOCK, 0, - OTG_MASTER_UPDATE_LOCK, 0); - - TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, false); -} - -void optc1_get_position(struct timing_generator *optc, - struct crtc_position *position) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_GET_2(OTG_STATUS_POSITION, - OTG_HORZ_COUNT, &position->horizontal_count, - OTG_VERT_COUNT, &position->vertical_count); - - REG_GET(OTG_NOM_VERT_POSITION, - OTG_VERT_COUNT_NOM, &position->nominal_vcount); -} - -bool optc1_is_counter_moving(struct timing_generator *optc) -{ - struct crtc_position position1, position2; - - optc->funcs->get_position(optc, &position1); - optc->funcs->get_position(optc, &position2); - - if (position1.horizontal_count == position2.horizontal_count && - position1.vertical_count == position2.vertical_count) - return false; - else - return true; -} - -bool optc1_did_triggered_reset_occur( - struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - uint32_t occurred_force, occurred_vsync; - - REG_GET(OTG_FORCE_COUNT_NOW_CNTL, - OTG_FORCE_COUNT_NOW_OCCURRED, &occurred_force); - - REG_GET(OTG_VERT_SYNC_CONTROL, - OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED, &occurred_vsync); - - return occurred_vsync != 0 || occurred_force != 0; -} - -void optc1_disable_reset_trigger(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_WRITE(OTG_TRIGA_CNTL, 0); - - REG_SET(OTG_FORCE_COUNT_NOW_CNTL, 0, - OTG_FORCE_COUNT_NOW_CLEAR, 1); - - REG_SET(OTG_VERT_SYNC_CONTROL, 0, - OTG_FORCE_VSYNC_NEXT_LINE_CLEAR, 1); -} - -void optc1_enable_reset_trigger(struct timing_generator *optc, int source_tg_inst) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - uint32_t falling_edge; - - REG_GET(OTG_V_SYNC_A_CNTL, - OTG_V_SYNC_A_POL, &falling_edge); - - if (falling_edge) - REG_SET_3(OTG_TRIGA_CNTL, 0, - /* vsync signal from selected OTG pipe based - * on OTG_TRIG_SOURCE_PIPE_SELECT setting - */ - OTG_TRIGA_SOURCE_SELECT, 20, - OTG_TRIGA_SOURCE_PIPE_SELECT, source_tg_inst, - /* always detect falling edge */ - OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 1); - else - REG_SET_3(OTG_TRIGA_CNTL, 0, - /* vsync signal from selected OTG pipe based - * on OTG_TRIG_SOURCE_PIPE_SELECT setting - */ - OTG_TRIGA_SOURCE_SELECT, 20, - OTG_TRIGA_SOURCE_PIPE_SELECT, source_tg_inst, - /* always detect rising edge */ - OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1); - - REG_SET(OTG_FORCE_COUNT_NOW_CNTL, 0, - /* force H count to H_TOTAL and V count to V_TOTAL in - * progressive mode and V_TOTAL-1 in interlaced mode - */ - OTG_FORCE_COUNT_NOW_MODE, 2); -} - -void optc1_enable_crtc_reset( - struct timing_generator *optc, - int source_tg_inst, - struct crtc_trigger_info *crtc_tp) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - uint32_t falling_edge = 0; - uint32_t rising_edge = 0; - - switch (crtc_tp->event) { - - case CRTC_EVENT_VSYNC_RISING: - rising_edge = 1; - break; - - case CRTC_EVENT_VSYNC_FALLING: - falling_edge = 1; - break; - } - - REG_SET_4(OTG_TRIGA_CNTL, 0, - /* vsync signal from selected OTG pipe based - * on OTG_TRIG_SOURCE_PIPE_SELECT setting - */ - OTG_TRIGA_SOURCE_SELECT, 20, - OTG_TRIGA_SOURCE_PIPE_SELECT, source_tg_inst, - /* always detect falling edge */ - OTG_TRIGA_RISING_EDGE_DETECT_CNTL, rising_edge, - OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, falling_edge); - - switch (crtc_tp->delay) { - case TRIGGER_DELAY_NEXT_LINE: - REG_SET(OTG_VERT_SYNC_CONTROL, 0, - OTG_AUTO_FORCE_VSYNC_MODE, 1); - break; - case TRIGGER_DELAY_NEXT_PIXEL: - REG_SET(OTG_FORCE_COUNT_NOW_CNTL, 0, - /* force H count to H_TOTAL and V count to V_TOTAL in - * progressive mode and V_TOTAL-1 in interlaced mode - */ - OTG_FORCE_COUNT_NOW_MODE, 2); - break; - } -} - -void optc1_wait_for_state(struct timing_generator *optc, - enum crtc_state state) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - switch (state) { - case CRTC_STATE_VBLANK: - REG_WAIT(OTG_STATUS, - OTG_V_BLANK, 1, - 1, 100000); /* 1 vupdate at 10hz */ - break; - - case CRTC_STATE_VACTIVE: - REG_WAIT(OTG_STATUS, - OTG_V_ACTIVE_DISP, 1, - 1, 100000); /* 1 vupdate at 10hz */ - break; - - default: - break; - } -} - -void optc1_set_early_control( - struct timing_generator *optc, - uint32_t early_cntl) -{ - /* asic design change, do not need this control - * empty for share caller logic - */ -} - - -void optc1_set_static_screen_control( - struct timing_generator *optc, - uint32_t event_triggers, - uint32_t num_frames) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - // By register spec, it only takes 8 bit value - if (num_frames > 0xFF) - num_frames = 0xFF; - - /* Bit 8 is no longer applicable in RV for PSR case, - * set bit 8 to 0 if given - */ - if ((event_triggers & STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN) - != 0) - event_triggers = event_triggers & - ~STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN; - - REG_SET_2(OTG_STATIC_SCREEN_CONTROL, 0, - OTG_STATIC_SCREEN_EVENT_MASK, event_triggers, - OTG_STATIC_SCREEN_FRAME_COUNT, num_frames); -} - -static void optc1_setup_manual_trigger(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET(OTG_GLOBAL_CONTROL2, 0, - MANUAL_FLOW_CONTROL_SEL, optc->inst); - - REG_SET_8(OTG_TRIGA_CNTL, 0, - OTG_TRIGA_SOURCE_SELECT, 22, - OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst, - OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1, - OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0, - OTG_TRIGA_POLARITY_SELECT, 0, - OTG_TRIGA_FREQUENCY_SELECT, 0, - OTG_TRIGA_DELAY, 0, - OTG_TRIGA_CLEAR, 1); -} - -static void optc1_program_manual_trigger(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET(OTG_MANUAL_FLOW_CONTROL, 0, - MANUAL_FLOW_CONTROL, 1); - - REG_SET(OTG_MANUAL_FLOW_CONTROL, 0, - MANUAL_FLOW_CONTROL, 0); -} - -/** - * optc1_set_drr() - Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*. - * - * @optc: timing_generator instance. - * @params: parameters used for Dynamic Refresh Rate. - */ -void optc1_set_drr( - struct timing_generator *optc, - const struct drr_params *params) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - if (params != NULL && - params->vertical_total_max > 0 && - params->vertical_total_min > 0) { - - if (params->vertical_total_mid != 0) { - - REG_SET(OTG_V_TOTAL_MID, 0, - OTG_V_TOTAL_MID, params->vertical_total_mid - 1); - - REG_UPDATE_2(OTG_V_TOTAL_CONTROL, - OTG_VTOTAL_MID_REPLACING_MAX_EN, 1, - OTG_VTOTAL_MID_FRAME_NUM, - (uint8_t)params->vertical_total_mid_frame_num); - - } - - optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1); - - REG_UPDATE_5(OTG_V_TOTAL_CONTROL, - OTG_V_TOTAL_MIN_SEL, 1, - OTG_V_TOTAL_MAX_SEL, 1, - OTG_FORCE_LOCK_ON_EVENT, 0, - OTG_SET_V_TOTAL_MIN_MASK_EN, 0, - OTG_SET_V_TOTAL_MIN_MASK, 0); - } - - // Setup manual flow control for EOF via TRIG_A - optc->funcs->setup_manual_trigger(optc); -} - -void optc1_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET(OTG_V_TOTAL_MAX, 0, - OTG_V_TOTAL_MAX, vtotal_max); - - REG_SET(OTG_V_TOTAL_MIN, 0, - OTG_V_TOTAL_MIN, vtotal_min); -} - -static void optc1_set_test_pattern( - struct timing_generator *optc, - /* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode' - * because this is not DP-specific (which is probably somewhere in DP - * encoder) */ - enum controller_dp_test_pattern test_pattern, - enum dc_color_depth color_depth) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - enum test_pattern_color_format bit_depth; - enum test_pattern_dyn_range dyn_range; - enum test_pattern_mode mode; - uint32_t pattern_mask; - uint32_t pattern_data; - /* color ramp generator mixes 16-bits color */ - uint32_t src_bpc = 16; - /* requested bpc */ - uint32_t dst_bpc; - uint32_t index; - /* RGB values of the color bars. - * Produce two RGB colors: RGB0 - white (all Fs) - * and RGB1 - black (all 0s) - * (three RGB components for two colors) - */ - uint16_t src_color[6] = {0xFFFF, 0xFFFF, 0xFFFF, 0x0000, - 0x0000, 0x0000}; - /* dest color (converted to the specified color format) */ - uint16_t dst_color[6]; - uint32_t inc_base; - - /* translate to bit depth */ - switch (color_depth) { - case COLOR_DEPTH_666: - bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_6; - break; - case COLOR_DEPTH_888: - bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8; - break; - case COLOR_DEPTH_101010: - bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_10; - break; - case COLOR_DEPTH_121212: - bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_12; - break; - default: - bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8; - break; - } - - switch (test_pattern) { - case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES: - case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA: - { - dyn_range = (test_pattern == - CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA ? - TEST_PATTERN_DYN_RANGE_CEA : - TEST_PATTERN_DYN_RANGE_VESA); - mode = TEST_PATTERN_MODE_COLORSQUARES_RGB; - - REG_UPDATE_2(OTG_TEST_PATTERN_PARAMETERS, - OTG_TEST_PATTERN_VRES, 6, - OTG_TEST_PATTERN_HRES, 6); - - REG_UPDATE_4(OTG_TEST_PATTERN_CONTROL, - OTG_TEST_PATTERN_EN, 1, - OTG_TEST_PATTERN_MODE, mode, - OTG_TEST_PATTERN_DYNAMIC_RANGE, dyn_range, - OTG_TEST_PATTERN_COLOR_FORMAT, bit_depth); - } - break; - - case CONTROLLER_DP_TEST_PATTERN_VERTICALBARS: - case CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS: - { - mode = (test_pattern == - CONTROLLER_DP_TEST_PATTERN_VERTICALBARS ? - TEST_PATTERN_MODE_VERTICALBARS : - TEST_PATTERN_MODE_HORIZONTALBARS); - - switch (bit_depth) { - case TEST_PATTERN_COLOR_FORMAT_BPC_6: - dst_bpc = 6; - break; - case TEST_PATTERN_COLOR_FORMAT_BPC_8: - dst_bpc = 8; - break; - case TEST_PATTERN_COLOR_FORMAT_BPC_10: - dst_bpc = 10; - break; - default: - dst_bpc = 8; - break; - } - - /* adjust color to the required colorFormat */ - for (index = 0; index < 6; index++) { - /* dst = 2^dstBpc * src / 2^srcBpc = src >> - * (srcBpc - dstBpc); - */ - dst_color[index] = - src_color[index] >> (src_bpc - dst_bpc); - /* CRTC_TEST_PATTERN_DATA has 16 bits, - * lowest 6 are hardwired to ZERO - * color bits should be left aligned to MSB - * XXXXXXXXXX000000 for 10 bit, - * XXXXXXXX00000000 for 8 bit and XXXXXX0000000000 for 6 - */ - dst_color[index] <<= (16 - dst_bpc); - } - - REG_WRITE(OTG_TEST_PATTERN_PARAMETERS, 0); - - /* We have to write the mask before data, similar to pipeline. - * For example, for 8 bpc, if we want RGB0 to be magenta, - * and RGB1 to be cyan, - * we need to make 7 writes: - * MASK DATA - * 000001 00000000 00000000 set mask to R0 - * 000010 11111111 00000000 R0 255, 0xFF00, set mask to G0 - * 000100 00000000 00000000 G0 0, 0x0000, set mask to B0 - * 001000 11111111 00000000 B0 255, 0xFF00, set mask to R1 - * 010000 00000000 00000000 R1 0, 0x0000, set mask to G1 - * 100000 11111111 00000000 G1 255, 0xFF00, set mask to B1 - * 100000 11111111 00000000 B1 255, 0xFF00 - * - * we will make a loop of 6 in which we prepare the mask, - * then write, then prepare the color for next write. - * first iteration will write mask only, - * but each next iteration color prepared in - * previous iteration will be written within new mask, - * the last component will written separately, - * mask is not changing between 6th and 7th write - * and color will be prepared by last iteration - */ - - /* write color, color values mask in CRTC_TEST_PATTERN_MASK - * is B1, G1, R1, B0, G0, R0 - */ - pattern_data = 0; - for (index = 0; index < 6; index++) { - /* prepare color mask, first write PATTERN_DATA - * will have all zeros - */ - pattern_mask = (1 << index); - - /* write color component */ - REG_SET_2(OTG_TEST_PATTERN_COLOR, 0, - OTG_TEST_PATTERN_MASK, pattern_mask, - OTG_TEST_PATTERN_DATA, pattern_data); - - /* prepare next color component, - * will be written in the next iteration - */ - pattern_data = dst_color[index]; - } - /* write last color component, - * it's been already prepared in the loop - */ - REG_SET_2(OTG_TEST_PATTERN_COLOR, 0, - OTG_TEST_PATTERN_MASK, pattern_mask, - OTG_TEST_PATTERN_DATA, pattern_data); - - /* enable test pattern */ - REG_UPDATE_4(OTG_TEST_PATTERN_CONTROL, - OTG_TEST_PATTERN_EN, 1, - OTG_TEST_PATTERN_MODE, mode, - OTG_TEST_PATTERN_DYNAMIC_RANGE, 0, - OTG_TEST_PATTERN_COLOR_FORMAT, bit_depth); - } - break; - - case CONTROLLER_DP_TEST_PATTERN_COLORRAMP: - { - mode = (bit_depth == - TEST_PATTERN_COLOR_FORMAT_BPC_10 ? - TEST_PATTERN_MODE_DUALRAMP_RGB : - TEST_PATTERN_MODE_SINGLERAMP_RGB); - - switch (bit_depth) { - case TEST_PATTERN_COLOR_FORMAT_BPC_6: - dst_bpc = 6; - break; - case TEST_PATTERN_COLOR_FORMAT_BPC_8: - dst_bpc = 8; - break; - case TEST_PATTERN_COLOR_FORMAT_BPC_10: - dst_bpc = 10; - break; - default: - dst_bpc = 8; - break; - } - - /* increment for the first ramp for one color gradation - * 1 gradation for 6-bit color is 2^10 - * gradations in 16-bit color - */ - inc_base = (src_bpc - dst_bpc); - - switch (bit_depth) { - case TEST_PATTERN_COLOR_FORMAT_BPC_6: - { - REG_UPDATE_5(OTG_TEST_PATTERN_PARAMETERS, - OTG_TEST_PATTERN_INC0, inc_base, - OTG_TEST_PATTERN_INC1, 0, - OTG_TEST_PATTERN_HRES, 6, - OTG_TEST_PATTERN_VRES, 6, - OTG_TEST_PATTERN_RAMP0_OFFSET, 0); - } - break; - case TEST_PATTERN_COLOR_FORMAT_BPC_8: - { - REG_UPDATE_5(OTG_TEST_PATTERN_PARAMETERS, - OTG_TEST_PATTERN_INC0, inc_base, - OTG_TEST_PATTERN_INC1, 0, - OTG_TEST_PATTERN_HRES, 8, - OTG_TEST_PATTERN_VRES, 6, - OTG_TEST_PATTERN_RAMP0_OFFSET, 0); - } - break; - case TEST_PATTERN_COLOR_FORMAT_BPC_10: - { - REG_UPDATE_5(OTG_TEST_PATTERN_PARAMETERS, - OTG_TEST_PATTERN_INC0, inc_base, - OTG_TEST_PATTERN_INC1, inc_base + 2, - OTG_TEST_PATTERN_HRES, 8, - OTG_TEST_PATTERN_VRES, 5, - OTG_TEST_PATTERN_RAMP0_OFFSET, 384 << 6); - } - break; - default: - break; - } - - REG_WRITE(OTG_TEST_PATTERN_COLOR, 0); - - /* enable test pattern */ - REG_WRITE(OTG_TEST_PATTERN_CONTROL, 0); - - REG_SET_4(OTG_TEST_PATTERN_CONTROL, 0, - OTG_TEST_PATTERN_EN, 1, - OTG_TEST_PATTERN_MODE, mode, - OTG_TEST_PATTERN_DYNAMIC_RANGE, 0, - OTG_TEST_PATTERN_COLOR_FORMAT, bit_depth); - } - break; - case CONTROLLER_DP_TEST_PATTERN_VIDEOMODE: - { - REG_WRITE(OTG_TEST_PATTERN_CONTROL, 0); - REG_WRITE(OTG_TEST_PATTERN_COLOR, 0); - REG_WRITE(OTG_TEST_PATTERN_PARAMETERS, 0); - } - break; - default: - break; - - } -} - -void optc1_get_crtc_scanoutpos( - struct timing_generator *optc, - uint32_t *v_blank_start, - uint32_t *v_blank_end, - uint32_t *h_position, - uint32_t *v_position) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - struct crtc_position position; - - REG_GET_2(OTG_V_BLANK_START_END, - OTG_V_BLANK_START, v_blank_start, - OTG_V_BLANK_END, v_blank_end); - - optc1_get_position(optc, &position); - - *h_position = position.horizontal_count; - *v_position = position.vertical_count; -} - -static void optc1_enable_stereo(struct timing_generator *optc, - const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - if (flags) { - uint32_t stereo_en; - stereo_en = flags->FRAME_PACKED == 0 ? 1 : 0; - - if (flags->PROGRAM_STEREO) - REG_UPDATE_3(OTG_STEREO_CONTROL, - OTG_STEREO_EN, stereo_en, - OTG_STEREO_SYNC_OUTPUT_LINE_NUM, 0, - OTG_STEREO_SYNC_OUTPUT_POLARITY, flags->RIGHT_EYE_POLARITY == 0 ? 0 : 1); - - if (flags->PROGRAM_POLARITY) - REG_UPDATE(OTG_STEREO_CONTROL, - OTG_STEREO_EYE_FLAG_POLARITY, - flags->RIGHT_EYE_POLARITY == 0 ? 0 : 1); - - if (flags->DISABLE_STEREO_DP_SYNC) - REG_UPDATE(OTG_STEREO_CONTROL, - OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, 1); - - if (flags->PROGRAM_STEREO) - REG_UPDATE_2(OTG_3D_STRUCTURE_CONTROL, - OTG_3D_STRUCTURE_EN, flags->FRAME_PACKED, - OTG_3D_STRUCTURE_STEREO_SEL_OVR, flags->FRAME_PACKED); - - } -} - -void optc1_program_stereo(struct timing_generator *optc, - const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags) -{ - if (flags->PROGRAM_STEREO) - optc1_enable_stereo(optc, timing, flags); - else - optc1_disable_stereo(optc); -} - - -bool optc1_is_stereo_left_eye(struct timing_generator *optc) -{ - bool ret = false; - uint32_t left_eye = 0; - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_GET(OTG_STEREO_STATUS, - OTG_STEREO_CURRENT_EYE, &left_eye); - if (left_eye == 1) - ret = true; - else - ret = false; - - return ret; -} - -bool optc1_get_hw_timing(struct timing_generator *tg, - struct dc_crtc_timing *hw_crtc_timing) -{ - struct dcn_otg_state s = {0}; - - if (tg == NULL || hw_crtc_timing == NULL) - return false; - - optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s); - - hw_crtc_timing->h_total = s.h_total + 1; - hw_crtc_timing->h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end); - hw_crtc_timing->h_front_porch = s.h_total + 1 - s.h_blank_start; - hw_crtc_timing->h_sync_width = s.h_sync_a_end - s.h_sync_a_start; - - hw_crtc_timing->v_total = s.v_total + 1; - hw_crtc_timing->v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end); - hw_crtc_timing->v_front_porch = s.v_total + 1 - s.v_blank_start; - hw_crtc_timing->v_sync_width = s.v_sync_a_end - s.v_sync_a_start; - - return true; -} - - -void optc1_read_otg_state(struct optc *optc1, - struct dcn_otg_state *s) -{ - REG_GET(OTG_CONTROL, - OTG_MASTER_EN, &s->otg_enabled); - - REG_GET_2(OTG_V_BLANK_START_END, - OTG_V_BLANK_START, &s->v_blank_start, - OTG_V_BLANK_END, &s->v_blank_end); - - REG_GET(OTG_V_SYNC_A_CNTL, - OTG_V_SYNC_A_POL, &s->v_sync_a_pol); - - REG_GET(OTG_V_TOTAL, - OTG_V_TOTAL, &s->v_total); - - REG_GET(OTG_V_TOTAL_MAX, - OTG_V_TOTAL_MAX, &s->v_total_max); - - REG_GET(OTG_V_TOTAL_MIN, - OTG_V_TOTAL_MIN, &s->v_total_min); - - REG_GET(OTG_V_TOTAL_CONTROL, - OTG_V_TOTAL_MAX_SEL, &s->v_total_max_sel); - - REG_GET(OTG_V_TOTAL_CONTROL, - OTG_V_TOTAL_MIN_SEL, &s->v_total_min_sel); - - REG_GET_2(OTG_V_SYNC_A, - OTG_V_SYNC_A_START, &s->v_sync_a_start, - OTG_V_SYNC_A_END, &s->v_sync_a_end); - - REG_GET_2(OTG_H_BLANK_START_END, - OTG_H_BLANK_START, &s->h_blank_start, - OTG_H_BLANK_END, &s->h_blank_end); - - REG_GET_2(OTG_H_SYNC_A, - OTG_H_SYNC_A_START, &s->h_sync_a_start, - OTG_H_SYNC_A_END, &s->h_sync_a_end); - - REG_GET(OTG_H_SYNC_A_CNTL, - OTG_H_SYNC_A_POL, &s->h_sync_a_pol); - - REG_GET(OTG_H_TOTAL, - OTG_H_TOTAL, &s->h_total); - - REG_GET(OPTC_INPUT_GLOBAL_CONTROL, - OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status); - - REG_GET(OTG_VERTICAL_INTERRUPT1_CONTROL, - OTG_VERTICAL_INTERRUPT1_INT_ENABLE, &s->vertical_interrupt1_en); - - REG_GET(OTG_VERTICAL_INTERRUPT1_POSITION, - OTG_VERTICAL_INTERRUPT1_LINE_START, &s->vertical_interrupt1_line); - - REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL, - OTG_VERTICAL_INTERRUPT2_INT_ENABLE, &s->vertical_interrupt2_en); - - REG_GET(OTG_VERTICAL_INTERRUPT2_POSITION, - OTG_VERTICAL_INTERRUPT2_LINE_START, &s->vertical_interrupt2_line); -} - -bool optc1_get_otg_active_size(struct timing_generator *optc, - uint32_t *otg_active_width, - uint32_t *otg_active_height) -{ - uint32_t otg_enabled; - uint32_t v_blank_start; - uint32_t v_blank_end; - uint32_t h_blank_start; - uint32_t h_blank_end; - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - - REG_GET(OTG_CONTROL, - OTG_MASTER_EN, &otg_enabled); - - if (otg_enabled == 0) - return false; - - REG_GET_2(OTG_V_BLANK_START_END, - OTG_V_BLANK_START, &v_blank_start, - OTG_V_BLANK_END, &v_blank_end); - - REG_GET_2(OTG_H_BLANK_START_END, - OTG_H_BLANK_START, &h_blank_start, - OTG_H_BLANK_END, &h_blank_end); - - *otg_active_width = v_blank_start - v_blank_end; - *otg_active_height = h_blank_start - h_blank_end; - return true; -} - -void optc1_clear_optc_underflow(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_UPDATE(OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, 1); -} - -void optc1_tg_init(struct timing_generator *optc) -{ - optc1_set_blank_data_double_buffer(optc, true); - optc1_set_timing_double_buffer(optc, true); - optc1_clear_optc_underflow(optc); -} - -bool optc1_is_tg_enabled(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - uint32_t otg_enabled = 0; - - REG_GET(OTG_CONTROL, OTG_MASTER_EN, &otg_enabled); - - return (otg_enabled != 0); - -} - -bool optc1_is_optc_underflow_occurred(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - uint32_t underflow_occurred = 0; - - REG_GET(OPTC_INPUT_GLOBAL_CONTROL, - OPTC_UNDERFLOW_OCCURRED_STATUS, - &underflow_occurred); - - return (underflow_occurred == 1); -} - -bool optc1_configure_crc(struct timing_generator *optc, - const struct crc_params *params) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - /* Cannot configure crc on a CRTC that is disabled */ - if (!optc1_is_tg_enabled(optc)) - return false; - - REG_WRITE(OTG_CRC_CNTL, 0); - - if (!params->enable) - return true; - - /* Program frame boundaries */ - /* Window A x axis start and end. */ - REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL, - OTG_CRC0_WINDOWA_X_START, params->windowa_x_start, - OTG_CRC0_WINDOWA_X_END, params->windowa_x_end); - - /* Window A y axis start and end. */ - REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL, - OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start, - OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end); - - /* Window B x axis start and end. */ - REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL, - OTG_CRC0_WINDOWB_X_START, params->windowb_x_start, - OTG_CRC0_WINDOWB_X_END, params->windowb_x_end); - - /* Window B y axis start and end. */ - REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL, - OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start, - OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end); - - /* Set crc mode and selection, and enable. Only using CRC0*/ - REG_UPDATE_3(OTG_CRC_CNTL, - OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0, - OTG_CRC0_SELECT, params->selection, - OTG_CRC_EN, 1); - - return true; -} - -/** - * optc1_get_crc - Capture CRC result per component - * - * @optc: timing_generator instance. - * @r_cr: 16-bit primary CRC signature for red data. - * @g_y: 16-bit primary CRC signature for green data. - * @b_cb: 16-bit primary CRC signature for blue data. - * - * This function reads the CRC signature from the OPTC registers. Notice that - * we have three registers to keep the CRC result per color component (RGB). - * - * Returns: - * If CRC is disabled, return false; otherwise, return true, and the CRC - * results in the parameters. - */ -bool optc1_get_crc(struct timing_generator *optc, - uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) -{ - uint32_t field = 0; - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_GET(OTG_CRC_CNTL, OTG_CRC_EN, &field); - - /* Early return if CRC is not enabled for this CRTC */ - if (!field) - return false; - - /* OTG_CRC0_DATA_RG has the CRC16 results for the red and green component */ - REG_GET_2(OTG_CRC0_DATA_RG, - CRC0_R_CR, r_cr, - CRC0_G_Y, g_y); - - /* OTG_CRC0_DATA_B has the CRC16 results for the blue component */ - REG_GET(OTG_CRC0_DATA_B, - CRC0_B_CB, b_cb); - - return true; -} - -static const struct timing_generator_funcs dcn10_tg_funcs = { - .validate_timing = optc1_validate_timing, - .program_timing = optc1_program_timing, - .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, - .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, - .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, - .program_global_sync = optc1_program_global_sync, - .enable_crtc = optc1_enable_crtc, - .disable_crtc = optc1_disable_crtc, - /* used by enable_timing_synchronization. Not need for FPGA */ - .is_counter_moving = optc1_is_counter_moving, - .get_position = optc1_get_position, - .get_frame_count = optc1_get_vblank_counter, - .get_scanoutpos = optc1_get_crtc_scanoutpos, - .get_otg_active_size = optc1_get_otg_active_size, - .set_early_control = optc1_set_early_control, - /* used by enable_timing_synchronization. Not need for FPGA */ - .wait_for_state = optc1_wait_for_state, - .set_blank = optc1_set_blank, - .is_blanked = optc1_is_blanked, - .set_blank_color = optc1_program_blank_color, - .did_triggered_reset_occur = optc1_did_triggered_reset_occur, - .enable_reset_trigger = optc1_enable_reset_trigger, - .enable_crtc_reset = optc1_enable_crtc_reset, - .disable_reset_trigger = optc1_disable_reset_trigger, - .lock = optc1_lock, - .unlock = optc1_unlock, - .enable_optc_clock = optc1_enable_optc_clock, - .set_drr = optc1_set_drr, - .get_last_used_drr_vtotal = NULL, - .set_vtotal_min_max = optc1_set_vtotal_min_max, - .set_static_screen_control = optc1_set_static_screen_control, - .set_test_pattern = optc1_set_test_pattern, - .program_stereo = optc1_program_stereo, - .is_stereo_left_eye = optc1_is_stereo_left_eye, - .set_blank_data_double_buffer = optc1_set_blank_data_double_buffer, - .tg_init = optc1_tg_init, - .is_tg_enabled = optc1_is_tg_enabled, - .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, - .clear_optc_underflow = optc1_clear_optc_underflow, - .get_crc = optc1_get_crc, - .configure_crc = optc1_configure_crc, - .set_vtg_params = optc1_set_vtg_params, - .program_manual_trigger = optc1_program_manual_trigger, - .setup_manual_trigger = optc1_setup_manual_trigger, - .get_hw_timing = optc1_get_hw_timing, -}; - -void dcn10_timing_generator_init(struct optc *optc1) -{ - optc1->base.funcs = &dcn10_tg_funcs; - - optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; - optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; - - optc1->min_h_blank = 32; - optc1->min_v_blank = 3; - optc1->min_v_blank_interlace = 5; - optc1->min_h_sync_width = 4; - optc1->min_v_sync_width = 1; -} - -/* "Containter" vs. "pixel" is a concept within HW blocks, mostly those closer to the back-end. It works like this: - * - * - In most of the formats (RGB or YCbCr 4:4:4, 4:2:2 uncompressed and DSC 4:2:2 Simple) pixel rate is the same as - * containter rate. - * - * - In 4:2:0 (DSC or uncompressed) there are two pixels per container, hence the target container rate has to be - * halved to maintain the correct pixel rate. - * - * - Unlike 4:2:2 uncompressed, DSC 4:2:2 Native also has two pixels per container (this happens when DSC is applied - * to it) and has to be treated the same as 4:2:0, i.e. target containter rate has to be halved in this case as well. - * - */ -bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing) -{ - bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420; - - two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422 - && !timing->dsc_cfg.ycbcr422_simple); - return two_pix; -} - diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h deleted file mode 100644 index ab81594a7f..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ /dev/null @@ -1,599 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_TIMING_GENERATOR_DCN10_H__ -#define __DC_TIMING_GENERATOR_DCN10_H__ - -#include "optc.h" - -#define DCN10TG_FROM_TG(tg)\ - container_of(tg, struct optc, base) - -#define TG_COMMON_REG_LIST_DCN(inst) \ - SRI(OTG_VSTARTUP_PARAM, OTG, inst),\ - SRI(OTG_VUPDATE_PARAM, OTG, inst),\ - SRI(OTG_VREADY_PARAM, OTG, inst),\ - SRI(OTG_BLANK_CONTROL, OTG, inst),\ - SRI(OTG_MASTER_UPDATE_LOCK, OTG, inst),\ - SRI(OTG_GLOBAL_CONTROL0, OTG, inst),\ - SRI(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst),\ - SRI(OTG_H_TOTAL, OTG, inst),\ - SRI(OTG_H_BLANK_START_END, OTG, inst),\ - SRI(OTG_H_SYNC_A, OTG, inst),\ - SRI(OTG_H_SYNC_A_CNTL, OTG, inst),\ - SRI(OTG_H_TIMING_CNTL, OTG, inst),\ - SRI(OTG_V_TOTAL, OTG, inst),\ - SRI(OTG_V_BLANK_START_END, OTG, inst),\ - SRI(OTG_V_SYNC_A, OTG, inst),\ - SRI(OTG_V_SYNC_A_CNTL, OTG, inst),\ - SRI(OTG_INTERLACE_CONTROL, OTG, inst),\ - SRI(OTG_CONTROL, OTG, inst),\ - SRI(OTG_STEREO_CONTROL, OTG, inst),\ - SRI(OTG_3D_STRUCTURE_CONTROL, OTG, inst),\ - SRI(OTG_STEREO_STATUS, OTG, inst),\ - SRI(OTG_V_TOTAL_MAX, OTG, inst),\ - SRI(OTG_V_TOTAL_MID, OTG, inst),\ - SRI(OTG_V_TOTAL_MIN, OTG, inst),\ - SRI(OTG_V_TOTAL_CONTROL, OTG, inst),\ - SRI(OTG_TRIGA_CNTL, OTG, inst),\ - SRI(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\ - SRI(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\ - SRI(OTG_STATUS_FRAME_COUNT, OTG, inst),\ - SRI(OTG_STATUS, OTG, inst),\ - SRI(OTG_STATUS_POSITION, OTG, inst),\ - SRI(OTG_NOM_VERT_POSITION, OTG, inst),\ - SRI(OTG_BLACK_COLOR, OTG, inst),\ - SRI(OTG_CLOCK_CONTROL, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\ - SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\ - SRI(OPTC_DATA_SOURCE_SELECT, ODM, inst),\ - SRI(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\ - SRI(CONTROL, VTG, inst),\ - SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\ - SRI(OTG_MASTER_UPDATE_MODE, OTG, inst),\ - SRI(OTG_GSL_CONTROL, OTG, inst),\ - SRI(OTG_CRC_CNTL, OTG, inst),\ - SRI(OTG_CRC0_DATA_RG, OTG, inst),\ - SRI(OTG_CRC0_DATA_B, OTG, inst),\ - SRI(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\ - SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\ - SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\ - SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst),\ - SR(GSL_SOURCE_SELECT),\ - SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\ - SRI(OTG_TRIGA_MANUAL_TRIG, OTG, inst) - -#define TG_COMMON_REG_LIST_DCN1_0(inst) \ - TG_COMMON_REG_LIST_DCN(inst),\ - SRI(OTG_TEST_PATTERN_PARAMETERS, OTG, inst),\ - SRI(OTG_TEST_PATTERN_CONTROL, OTG, inst),\ - SRI(OTG_TEST_PATTERN_COLOR, OTG, inst),\ - SRI(OTG_MANUAL_FLOW_CONTROL, OTG, inst) - - -struct dcn_optc_registers { - uint32_t OTG_GLOBAL_CONTROL1; - uint32_t OTG_GLOBAL_CONTROL2; - uint32_t OTG_VERT_SYNC_CONTROL; - uint32_t OTG_MASTER_UPDATE_MODE; - uint32_t OTG_GSL_CONTROL; - uint32_t OTG_VSTARTUP_PARAM; - uint32_t OTG_VUPDATE_PARAM; - uint32_t OTG_VREADY_PARAM; - uint32_t OTG_BLANK_CONTROL; - uint32_t OTG_MASTER_UPDATE_LOCK; - uint32_t OTG_GLOBAL_CONTROL0; - uint32_t OTG_DOUBLE_BUFFER_CONTROL; - uint32_t OTG_H_TOTAL; - uint32_t OTG_H_BLANK_START_END; - uint32_t OTG_H_SYNC_A; - uint32_t OTG_H_SYNC_A_CNTL; - uint32_t OTG_H_TIMING_CNTL; - uint32_t OTG_V_TOTAL; - uint32_t OTG_V_BLANK_START_END; - uint32_t OTG_V_SYNC_A; - uint32_t OTG_V_SYNC_A_CNTL; - uint32_t OTG_INTERLACE_CONTROL; - uint32_t OTG_CONTROL; - uint32_t OTG_STEREO_CONTROL; - uint32_t OTG_3D_STRUCTURE_CONTROL; - uint32_t OTG_STEREO_STATUS; - uint32_t OTG_V_TOTAL_MAX; - uint32_t OTG_V_TOTAL_MID; - uint32_t OTG_V_TOTAL_MIN; - uint32_t OTG_V_TOTAL_CONTROL; - uint32_t OTG_TRIGA_CNTL; - uint32_t OTG_TRIGA_MANUAL_TRIG; - uint32_t OTG_MANUAL_FLOW_CONTROL; - uint32_t OTG_FORCE_COUNT_NOW_CNTL; - uint32_t OTG_STATIC_SCREEN_CONTROL; - uint32_t OTG_STATUS_FRAME_COUNT; - uint32_t OTG_STATUS; - uint32_t OTG_STATUS_POSITION; - uint32_t OTG_NOM_VERT_POSITION; - uint32_t OTG_BLACK_COLOR; - uint32_t OTG_TEST_PATTERN_PARAMETERS; - uint32_t OTG_TEST_PATTERN_CONTROL; - uint32_t OTG_TEST_PATTERN_COLOR; - uint32_t OTG_CLOCK_CONTROL; - uint32_t OTG_VERTICAL_INTERRUPT0_CONTROL; - uint32_t OTG_VERTICAL_INTERRUPT0_POSITION; - uint32_t OTG_VERTICAL_INTERRUPT1_CONTROL; - uint32_t OTG_VERTICAL_INTERRUPT1_POSITION; - uint32_t OTG_VERTICAL_INTERRUPT2_CONTROL; - uint32_t OTG_VERTICAL_INTERRUPT2_POSITION; - uint32_t OPTC_INPUT_CLOCK_CONTROL; - uint32_t OPTC_DATA_SOURCE_SELECT; - uint32_t OPTC_MEMORY_CONFIG; - uint32_t OPTC_INPUT_GLOBAL_CONTROL; - uint32_t CONTROL; - uint32_t OTG_GSL_WINDOW_X; - uint32_t OTG_GSL_WINDOW_Y; - uint32_t OTG_VUPDATE_KEEPOUT; - uint32_t OTG_CRC_CNTL; - uint32_t OTG_CRC_CNTL2; - uint32_t OTG_CRC0_DATA_RG; - uint32_t OTG_CRC0_DATA_B; - uint32_t OTG_CRC1_DATA_B; - uint32_t OTG_CRC2_DATA_B; - uint32_t OTG_CRC3_DATA_B; - uint32_t OTG_CRC1_DATA_RG; - uint32_t OTG_CRC2_DATA_RG; - uint32_t OTG_CRC3_DATA_RG; - uint32_t OTG_CRC0_WINDOWA_X_CONTROL; - uint32_t OTG_CRC0_WINDOWA_Y_CONTROL; - uint32_t OTG_CRC0_WINDOWB_X_CONTROL; - uint32_t OTG_CRC0_WINDOWB_Y_CONTROL; - uint32_t OTG_CRC1_WINDOWA_X_CONTROL; - uint32_t OTG_CRC1_WINDOWA_Y_CONTROL; - uint32_t OTG_CRC1_WINDOWB_X_CONTROL; - uint32_t OTG_CRC1_WINDOWB_Y_CONTROL; - uint32_t GSL_SOURCE_SELECT; - uint32_t DWB_SOURCE_SELECT; - uint32_t OTG_DSC_START_POSITION; - uint32_t OPTC_DATA_FORMAT_CONTROL; - uint32_t OPTC_BYTES_PER_PIXEL; - uint32_t OPTC_WIDTH_CONTROL; - uint32_t OTG_DRR_CONTROL; - uint32_t OTG_BLANK_DATA_COLOR; - uint32_t OTG_BLANK_DATA_COLOR_EXT; - uint32_t OTG_DRR_TRIGGER_WINDOW; - uint32_t OTG_M_CONST_DTO0; - uint32_t OTG_M_CONST_DTO1; - uint32_t OTG_DRR_V_TOTAL_CHANGE; - uint32_t OTG_GLOBAL_CONTROL4; - uint32_t OTG_CRC0_WINDOWA_X_CONTROL_READBACK; - uint32_t OTG_CRC0_WINDOWA_Y_CONTROL_READBACK; - uint32_t OTG_CRC0_WINDOWB_X_CONTROL_READBACK; - uint32_t OTG_CRC0_WINDOWB_Y_CONTROL_READBACK; - uint32_t OTG_CRC1_WINDOWA_X_CONTROL_READBACK; - uint32_t OTG_CRC1_WINDOWA_Y_CONTROL_READBACK; - uint32_t OTG_CRC1_WINDOWB_X_CONTROL_READBACK; - uint32_t OTG_CRC1_WINDOWB_Y_CONTROL_READBACK; - uint32_t OPTC_CLOCK_CONTROL; -}; - -#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\ - SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\ - SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_OFFSET, mask_sh),\ - SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_WIDTH, mask_sh),\ - SF(OTG0_OTG_VREADY_PARAM, VREADY_OFFSET, mask_sh),\ - SF(OTG0_OTG_BLANK_CONTROL, OTG_BLANK_DATA_EN, mask_sh),\ - SF(OTG0_OTG_BLANK_CONTROL, OTG_BLANK_DE_MODE, mask_sh),\ - SF(OTG0_OTG_BLANK_CONTROL, OTG_CURRENT_BLANK_STATE, mask_sh),\ - SF(OTG0_OTG_MASTER_UPDATE_LOCK, OTG_MASTER_UPDATE_LOCK, mask_sh),\ - SF(OTG0_OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL0, OTG_MASTER_UPDATE_LOCK_SEL, mask_sh),\ - SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\ - SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_BLANK_DATA_DOUBLE_BUFFER_EN, mask_sh),\ - SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\ - SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \ - SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \ - SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \ - SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\ - SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\ - SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\ - SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_START, mask_sh),\ - SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_END, mask_sh),\ - SF(OTG0_OTG_H_SYNC_A_CNTL, OTG_H_SYNC_A_POL, mask_sh),\ - SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_BY2, mask_sh),\ - SF(OTG0_OTG_V_TOTAL, OTG_V_TOTAL, mask_sh),\ - SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_START, mask_sh),\ - SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_END, mask_sh),\ - SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_START, mask_sh),\ - SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_END, mask_sh),\ - SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_A_POL, mask_sh),\ - SF(OTG0_OTG_INTERLACE_CONTROL, OTG_INTERLACE_ENABLE, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_MASTER_EN, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_START_POINT_CNTL, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_CURRENT_MASTER_EN_STATE, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EYE_FLAG_POLARITY, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, mask_sh),\ - SF(OTG0_OTG_STEREO_STATUS, OTG_STEREO_CURRENT_EYE, mask_sh),\ - SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_EN, mask_sh),\ - SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_V_UPDATE_MODE, mask_sh),\ - SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_STEREO_SEL_OVR, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_MAX, OTG_V_TOTAL_MAX, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_MID, OTG_V_TOTAL_MID, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_MIN, OTG_V_TOTAL_MIN, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MIN_SEL, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MAX_SEL, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_FORCE_LOCK_ON_EVENT, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK_EN, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MAX_EN, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_FRAME_NUM, mask_sh),\ - SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_CLEAR, mask_sh),\ - SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_MODE, mask_sh),\ - SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_OCCURRED, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_SELECT, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_PIPE_SELECT, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_RISING_EDGE_DETECT_CNTL, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_POLARITY_SELECT, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FREQUENCY_SELECT, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_DELAY, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_CLEAR, mask_sh),\ - SF(OTG0_OTG_TRIGA_MANUAL_TRIG, OTG_TRIGA_MANUAL_TRIG, mask_sh),\ - SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_EVENT_MASK, mask_sh),\ - SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_FRAME_COUNT, mask_sh),\ - SF(OTG0_OTG_STATUS_FRAME_COUNT, OTG_FRAME_COUNT, mask_sh),\ - SF(OTG0_OTG_STATUS, OTG_V_BLANK, mask_sh),\ - SF(OTG0_OTG_STATUS, OTG_V_ACTIVE_DISP, mask_sh),\ - SF(OTG0_OTG_STATUS_POSITION, OTG_HORZ_COUNT, mask_sh),\ - SF(OTG0_OTG_STATUS_POSITION, OTG_VERT_COUNT, mask_sh),\ - SF(OTG0_OTG_NOM_VERT_POSITION, OTG_VERT_COUNT_NOM, mask_sh),\ - SF(OTG0_OTG_BLACK_COLOR, OTG_BLACK_COLOR_B_CB, mask_sh),\ - SF(OTG0_OTG_BLACK_COLOR, OTG_BLACK_COLOR_G_Y, mask_sh),\ - SF(OTG0_OTG_BLACK_COLOR, OTG_BLACK_COLOR_R_CR, mask_sh),\ - SF(OTG0_OTG_CLOCK_CONTROL, OTG_BUSY, mask_sh),\ - SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_EN, mask_sh),\ - SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_ON, mask_sh),\ - SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_START, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_END, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT1_POSITION, OTG_VERTICAL_INTERRUPT1_LINE_START, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\ - SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\ - SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_ON, mask_sh),\ - SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\ - SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\ - SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\ - SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\ - SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\ - SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\ - SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED, mask_sh),\ - SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_CLEAR, mask_sh),\ - SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_AUTO_FORCE_VSYNC_MODE, mask_sh),\ - SF(OTG0_OTG_MASTER_UPDATE_MODE, MASTER_UPDATE_INTERLACED_MODE, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL0_EN, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL1_EN, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\ - SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\ - SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\ - SF(OTG0_OTG_CRC0_DATA_B, CRC0_B_CB, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_START, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_END, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_START, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_END, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_START, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh),\ - SF(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, mask_sh),\ - SF(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, mask_sh),\ - SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL2, MANUAL_FLOW_CONTROL_SEL, mask_sh) - - - -#define TG_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\ - TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\ - SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_INC0, mask_sh),\ - SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_INC1, mask_sh),\ - SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_VRES, mask_sh),\ - SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_HRES, mask_sh),\ - SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_RAMP0_OFFSET, mask_sh),\ - SF(OTG0_OTG_TEST_PATTERN_CONTROL, OTG_TEST_PATTERN_EN, mask_sh),\ - SF(OTG0_OTG_TEST_PATTERN_CONTROL, OTG_TEST_PATTERN_MODE, mask_sh),\ - SF(OTG0_OTG_TEST_PATTERN_CONTROL, OTG_TEST_PATTERN_DYNAMIC_RANGE, mask_sh),\ - SF(OTG0_OTG_TEST_PATTERN_CONTROL, OTG_TEST_PATTERN_COLOR_FORMAT, mask_sh),\ - SF(OTG0_OTG_TEST_PATTERN_COLOR, OTG_TEST_PATTERN_MASK, mask_sh),\ - SF(OTG0_OTG_TEST_PATTERN_COLOR, OTG_TEST_PATTERN_DATA, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SRC_SEL, mask_sh),\ - SF(OTG0_OTG_MANUAL_FLOW_CONTROL, MANUAL_FLOW_CONTROL, mask_sh),\ - -#define TG_REG_FIELD_LIST_DCN1_0(type) \ - type VSTARTUP_START;\ - type VUPDATE_OFFSET;\ - type VUPDATE_WIDTH;\ - type VREADY_OFFSET;\ - type OTG_BLANK_DATA_EN;\ - type OTG_BLANK_DE_MODE;\ - type OTG_CURRENT_BLANK_STATE;\ - type OTG_MASTER_UPDATE_LOCK;\ - type UPDATE_LOCK_STATUS;\ - type OTG_UPDATE_PENDING;\ - type OTG_MASTER_UPDATE_LOCK_SEL;\ - type OTG_BLANK_DATA_DOUBLE_BUFFER_EN;\ - type OTG_H_TOTAL;\ - type OTG_H_BLANK_START;\ - type OTG_H_BLANK_END;\ - type OTG_H_SYNC_A_START;\ - type OTG_H_SYNC_A_END;\ - type OTG_H_SYNC_A_POL;\ - type OTG_H_TIMING_DIV_BY2;\ - type OTG_V_TOTAL;\ - type OTG_V_BLANK_START;\ - type OTG_V_BLANK_END;\ - type OTG_V_SYNC_A_START;\ - type OTG_V_SYNC_A_END;\ - type OTG_V_SYNC_A_POL;\ - type OTG_INTERLACE_ENABLE;\ - type OTG_MASTER_EN;\ - type OTG_START_POINT_CNTL;\ - type OTG_DISABLE_POINT_CNTL;\ - type OTG_FIELD_NUMBER_CNTL;\ - type OTG_CURRENT_MASTER_EN_STATE;\ - type OTG_STEREO_EN;\ - type OTG_STEREO_SYNC_OUTPUT_LINE_NUM;\ - type OTG_STEREO_SYNC_OUTPUT_POLARITY;\ - type OTG_STEREO_EYE_FLAG_POLARITY;\ - type OTG_STEREO_CURRENT_EYE;\ - type OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP;\ - type OTG_3D_STRUCTURE_EN;\ - type OTG_3D_STRUCTURE_V_UPDATE_MODE;\ - type OTG_3D_STRUCTURE_STEREO_SEL_OVR;\ - type OTG_V_TOTAL_MAX;\ - type OTG_V_TOTAL_MID;\ - type OTG_V_TOTAL_MIN;\ - type OTG_V_TOTAL_MIN_SEL;\ - type OTG_V_TOTAL_MAX_SEL;\ - type OTG_VTOTAL_MID_REPLACING_MAX_EN;\ - type OTG_VTOTAL_MID_FRAME_NUM;\ - type OTG_FORCE_LOCK_ON_EVENT;\ - type OTG_SET_V_TOTAL_MIN_MASK_EN;\ - type OTG_SET_V_TOTAL_MIN_MASK;\ - type OTG_FORCE_COUNT_NOW_CLEAR;\ - type OTG_FORCE_COUNT_NOW_MODE;\ - type OTG_FORCE_COUNT_NOW_OCCURRED;\ - type OTG_TRIGA_SOURCE_SELECT;\ - type OTG_TRIGA_SOURCE_PIPE_SELECT;\ - type OTG_TRIGA_RISING_EDGE_DETECT_CNTL;\ - type OTG_TRIGA_FALLING_EDGE_DETECT_CNTL;\ - type OTG_TRIGA_POLARITY_SELECT;\ - type OTG_TRIGA_FREQUENCY_SELECT;\ - type OTG_TRIGA_DELAY;\ - type OTG_TRIGA_CLEAR;\ - type OTG_TRIGA_MANUAL_TRIG;\ - type OTG_STATIC_SCREEN_EVENT_MASK;\ - type OTG_STATIC_SCREEN_FRAME_COUNT;\ - type OTG_FRAME_COUNT;\ - type OTG_V_BLANK;\ - type OTG_V_ACTIVE_DISP;\ - type OTG_HORZ_COUNT;\ - type OTG_VERT_COUNT;\ - type OTG_VERT_COUNT_NOM;\ - type OTG_BLACK_COLOR_B_CB;\ - type OTG_BLACK_COLOR_G_Y;\ - type OTG_BLACK_COLOR_R_CR;\ - type OTG_BLANK_DATA_COLOR_BLUE_CB;\ - type OTG_BLANK_DATA_COLOR_GREEN_Y;\ - type OTG_BLANK_DATA_COLOR_RED_CR;\ - type OTG_BLANK_DATA_COLOR_BLUE_CB_EXT;\ - type OTG_BLANK_DATA_COLOR_GREEN_Y_EXT;\ - type OTG_BLANK_DATA_COLOR_RED_CR_EXT;\ - type OTG_VTOTAL_MID_REPLACING_MIN_EN;\ - type OTG_TEST_PATTERN_INC0;\ - type OTG_TEST_PATTERN_INC1;\ - type OTG_TEST_PATTERN_VRES;\ - type OTG_TEST_PATTERN_HRES;\ - type OTG_TEST_PATTERN_RAMP0_OFFSET;\ - type OTG_TEST_PATTERN_EN;\ - type OTG_TEST_PATTERN_MODE;\ - type OTG_TEST_PATTERN_DYNAMIC_RANGE;\ - type OTG_TEST_PATTERN_COLOR_FORMAT;\ - type OTG_TEST_PATTERN_MASK;\ - type OTG_TEST_PATTERN_DATA;\ - type OTG_BUSY;\ - type OTG_CLOCK_EN;\ - type OTG_CLOCK_ON;\ - type OTG_CLOCK_GATE_DIS;\ - type OTG_VERTICAL_INTERRUPT0_INT_ENABLE;\ - type OTG_VERTICAL_INTERRUPT0_LINE_START;\ - type OTG_VERTICAL_INTERRUPT0_LINE_END;\ - type OTG_VERTICAL_INTERRUPT1_INT_ENABLE;\ - type OTG_VERTICAL_INTERRUPT1_LINE_START;\ - type OTG_VERTICAL_INTERRUPT2_INT_ENABLE;\ - type OTG_VERTICAL_INTERRUPT2_LINE_START;\ - type OPTC_INPUT_CLK_EN;\ - type OPTC_INPUT_CLK_ON;\ - type OPTC_INPUT_CLK_GATE_DIS;\ - type OPTC_UNDERFLOW_OCCURRED_STATUS;\ - type OPTC_UNDERFLOW_CLEAR;\ - type OPTC_SRC_SEL;\ - type VTG0_ENABLE;\ - type VTG0_FP2;\ - type VTG0_VCOUNT_INIT;\ - type OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED;\ - type OTG_FORCE_VSYNC_NEXT_LINE_CLEAR;\ - type OTG_AUTO_FORCE_VSYNC_MODE;\ - type MASTER_UPDATE_INTERLACED_MODE;\ - type OTG_GSL0_EN;\ - type OTG_GSL1_EN;\ - type OTG_GSL2_EN;\ - type OTG_GSL_MASTER_EN;\ - type OTG_GSL_FORCE_DELAY;\ - type OTG_GSL_CHECK_ALL_FIELDS;\ - type OTG_GSL_WINDOW_START_X;\ - type OTG_GSL_WINDOW_END_X;\ - type OTG_GSL_WINDOW_START_Y;\ - type OTG_GSL_WINDOW_END_Y;\ - type OTG_RANGE_TIMING_DBUF_UPDATE_MODE;\ - type OTG_GSL_MASTER_MODE;\ - type OTG_MASTER_UPDATE_LOCK_GSL_EN;\ - type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET;\ - type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET;\ - type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN;\ - type OTG_CRC_CONT_EN;\ - type OTG_CRC0_SELECT;\ - type OTG_CRC_EN;\ - type CRC0_R_CR;\ - type CRC0_G_Y;\ - type CRC0_B_CB;\ - type CRC1_R_CR;\ - type CRC1_G_Y;\ - type CRC1_B_CB;\ - type CRC2_R_CR;\ - type CRC2_G_Y;\ - type CRC2_B_CB;\ - type CRC3_R_CR;\ - type CRC3_G_Y;\ - type CRC3_B_CB;\ - type OTG_CRC0_WINDOWA_X_START;\ - type OTG_CRC0_WINDOWA_X_END;\ - type OTG_CRC0_WINDOWA_Y_START;\ - type OTG_CRC0_WINDOWA_Y_END;\ - type OTG_CRC0_WINDOWB_X_START;\ - type OTG_CRC0_WINDOWB_X_END;\ - type OTG_CRC0_WINDOWB_Y_START;\ - type OTG_CRC0_WINDOWB_Y_END;\ - type OTG_CRC_WINDOW_DB_EN;\ - type OTG_CRC1_WINDOWA_X_START;\ - type OTG_CRC1_WINDOWA_X_END;\ - type OTG_CRC1_WINDOWA_Y_START;\ - type OTG_CRC1_WINDOWA_Y_END;\ - type OTG_CRC1_WINDOWB_X_START;\ - type OTG_CRC1_WINDOWB_X_END;\ - type OTG_CRC1_WINDOWB_Y_START;\ - type OTG_CRC1_WINDOWB_Y_END;\ - type GSL0_READY_SOURCE_SEL;\ - type GSL1_READY_SOURCE_SEL;\ - type GSL2_READY_SOURCE_SEL;\ - type MANUAL_FLOW_CONTROL;\ - type MANUAL_FLOW_CONTROL_SEL; - -#define TG_REG_FIELD_LIST(type) \ - TG_REG_FIELD_LIST_DCN1_0(type)\ - type OTG_V_SYNC_MODE;\ - type OTG_DRR_TRIGGER_WINDOW_START_X;\ - type OTG_DRR_TRIGGER_WINDOW_END_X;\ - type OTG_DRR_V_TOTAL_CHANGE_LIMIT;\ - type OTG_OUT_MUX;\ - type OTG_M_CONST_DTO_PHASE;\ - type OTG_M_CONST_DTO_MODULO;\ - type MASTER_UPDATE_LOCK_DB_X;\ - type MASTER_UPDATE_LOCK_DB_Y;\ - type MASTER_UPDATE_LOCK_DB_EN;\ - type GLOBAL_UPDATE_LOCK_EN;\ - type DIG_UPDATE_LOCATION;\ - type OTG_DSC_START_POSITION_X;\ - type OTG_DSC_START_POSITION_LINE_NUM;\ - type OPTC_NUM_OF_INPUT_SEGMENT;\ - type OPTC_SEG0_SRC_SEL;\ - type OPTC_SEG1_SRC_SEL;\ - type OPTC_SEG2_SRC_SEL;\ - type OPTC_SEG3_SRC_SEL;\ - type OPTC_MEM_SEL;\ - type OPTC_DATA_FORMAT;\ - type OPTC_DSC_MODE;\ - type OPTC_DSC_BYTES_PER_PIXEL;\ - type OPTC_DSC_SLICE_WIDTH;\ - type OPTC_SEGMENT_WIDTH;\ - type OPTC_DWB0_SOURCE_SELECT;\ - type OPTC_DWB1_SOURCE_SELECT;\ - type MASTER_UPDATE_LOCK_DB_START_X;\ - type MASTER_UPDATE_LOCK_DB_END_X;\ - type MASTER_UPDATE_LOCK_DB_START_Y;\ - type MASTER_UPDATE_LOCK_DB_END_Y;\ - type DIG_UPDATE_POSITION_X;\ - type DIG_UPDATE_POSITION_Y;\ - type OTG_H_TIMING_DIV_MODE;\ - type OTG_DRR_TIMING_DBUF_UPDATE_MODE;\ - type OTG_CRC_DSC_MODE;\ - type OTG_CRC_DATA_STREAM_COMBINE_MODE;\ - type OTG_CRC_DATA_STREAM_SPLIT_MODE;\ - type OTG_CRC_DATA_FORMAT;\ - type OTG_V_TOTAL_LAST_USED_BY_DRR;\ - type OTG_DRR_TIMING_DBUF_UPDATE_PENDING; - -#define TG_REG_FIELD_LIST_DCN3_2(type) \ - type OTG_H_TIMING_DIV_MODE_MANUAL; - - -#define TG_REG_FIELD_LIST_DCN3_5(type) \ - type OTG_CRC0_WINDOWA_X_START_READBACK;\ - type OTG_CRC0_WINDOWA_X_END_READBACK;\ - type OTG_CRC0_WINDOWA_Y_START_READBACK;\ - type OTG_CRC0_WINDOWA_Y_END_READBACK;\ - type OTG_CRC0_WINDOWB_X_START_READBACK;\ - type OTG_CRC0_WINDOWB_X_END_READBACK;\ - type OTG_CRC0_WINDOWB_Y_START_READBACK;\ - type OTG_CRC0_WINDOWB_Y_END_READBACK; \ - type OTG_CRC1_WINDOWA_X_START_READBACK;\ - type OTG_CRC1_WINDOWA_X_END_READBACK;\ - type OTG_CRC1_WINDOWA_Y_START_READBACK;\ - type OTG_CRC1_WINDOWA_Y_END_READBACK;\ - type OTG_CRC1_WINDOWB_X_START_READBACK;\ - type OTG_CRC1_WINDOWB_X_END_READBACK;\ - type OTG_CRC1_WINDOWB_Y_START_READBACK;\ - type OTG_CRC1_WINDOWB_Y_END_READBACK;\ - type OPTC_FGCG_REP_DIS; - -struct dcn_optc_shift { - TG_REG_FIELD_LIST(uint8_t) - TG_REG_FIELD_LIST_DCN3_2(uint8_t) - TG_REG_FIELD_LIST_DCN3_5(uint8_t) -}; - -struct dcn_optc_mask { - TG_REG_FIELD_LIST(uint32_t) - TG_REG_FIELD_LIST_DCN3_2(uint32_t) - TG_REG_FIELD_LIST_DCN3_5(uint32_t) -}; - -void dcn10_timing_generator_init(struct optc *optc); - -#endif /* __DC_TIMING_GENERATOR_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c deleted file mode 100644 index b94c5c97ee..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ /dev/null @@ -1,1686 +0,0 @@ -/* -* Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" -#include "dc.h" - -#include "dcn10_init.h" - -#include "resource.h" -#include "include/irq_service_interface.h" -#include "dcn10_resource.h" -#include "dcn10_ipp.h" -#include "dcn10_mpc.h" -#include "irq/dcn10/irq_service_dcn10.h" -#include "dcn10_dpp.h" -#include "dcn10_optc.h" -#include "dcn10/dcn10_hwseq.h" -#include "dce110/dce110_hwseq.h" -#include "dcn10_opp.h" -#include "dcn10_link_encoder.h" -#include "dcn10_stream_encoder.h" -#include "dce/dce_clock_source.h" -#include "dce/dce_audio.h" -#include "dce/dce_hwseq.h" -#include "virtual/virtual_stream_encoder.h" -#include "dce110/dce110_resource.h" -#include "dce112/dce112_resource.h" -#include "dcn10_hubp.h" -#include "dcn10_hubbub.h" -#include "dce/dce_panel_cntl.h" - -#include "soc15_hw_ip.h" -#include "vega10_ip_offset.h" - -#include "dcn/dcn_1_0_offset.h" -#include "dcn/dcn_1_0_sh_mask.h" - -#include "nbio/nbio_7_0_offset.h" - -#include "mmhub/mmhub_9_1_offset.h" -#include "mmhub/mmhub_9_1_sh_mask.h" - -#include "reg_helper.h" -#include "dce/dce_abm.h" -#include "dce/dce_dmcu.h" -#include "dce/dce_aux.h" -#include "dce/dce_i2c.h" - -#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL - #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f - #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 - #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f - #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 - #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f - #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 - #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f - #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 - #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f - #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 - #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f - #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 - #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f - #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 -#endif - - -enum dcn10_clk_src_array_id { - DCN10_CLK_SRC_PLL0, - DCN10_CLK_SRC_PLL1, - DCN10_CLK_SRC_PLL2, - DCN10_CLK_SRC_PLL3, - DCN10_CLK_SRC_TOTAL, - DCN101_CLK_SRC_TOTAL = DCN10_CLK_SRC_PLL3 -}; - -/* begin ********************* - * macros to expend register list macro defined in HW object header file */ - -/* DCN */ -#define BASE_INNER(seg) \ - DCE_BASE__INST0_SEG ## seg - -#define BASE(seg) \ - BASE_INNER(seg) - -#define SR(reg_name)\ - .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -#define SRI(reg_name, block, id)\ - .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - - -#define SRII(reg_name, block, id)\ - .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define VUPDATE_SRII(reg_name, block, id)\ - .reg_name[id] = BASE(mm ## reg_name ## 0 ## _ ## block ## id ## _BASE_IDX) + \ - mm ## reg_name ## 0 ## _ ## block ## id - -/* set field/register/bitfield name */ -#define SFRB(field_name, reg_name, bitfield, post_fix)\ - .field_name = reg_name ## __ ## bitfield ## post_fix - -/* NBIO */ -#define NBIO_BASE_INNER(seg) \ - NBIF_BASE__INST0_SEG ## seg - -#define NBIO_BASE(seg) \ - NBIO_BASE_INNER(seg) - -#define NBIO_SR(reg_name)\ - .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -/* MMHUB */ -#define MMHUB_BASE_INNER(seg) \ - MMHUB_BASE__INST0_SEG ## seg - -#define MMHUB_BASE(seg) \ - MMHUB_BASE_INNER(seg) - -#define MMHUB_SR(reg_name)\ - .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -/* macros to expend register list macro defined in HW object header file - * end *********************/ - - -static const struct dce_dmcu_registers dmcu_regs = { - DMCU_DCN10_REG_LIST() -}; - -static const struct dce_dmcu_shift dmcu_shift = { - DMCU_MASK_SH_LIST_DCN10(__SHIFT) -}; - -static const struct dce_dmcu_mask dmcu_mask = { - DMCU_MASK_SH_LIST_DCN10(_MASK) -}; - -static const struct dce_abm_registers abm_regs = { - ABM_DCN10_REG_LIST(0) -}; - -static const struct dce_abm_shift abm_shift = { - ABM_MASK_SH_LIST_DCN10(__SHIFT) -}; - -static const struct dce_abm_mask abm_mask = { - ABM_MASK_SH_LIST_DCN10(_MASK) -}; - -#define stream_enc_regs(id)\ -[id] = {\ - SE_DCN_REG_LIST(id)\ -} - -static const struct dcn10_stream_enc_registers stream_enc_regs[] = { - stream_enc_regs(0), - stream_enc_regs(1), - stream_enc_regs(2), - stream_enc_regs(3), -}; - -static const struct dcn10_stream_encoder_shift se_shift = { - SE_COMMON_MASK_SH_LIST_DCN10(__SHIFT) -}; - -static const struct dcn10_stream_encoder_mask se_mask = { - SE_COMMON_MASK_SH_LIST_DCN10(_MASK) -}; - -#define audio_regs(id)\ -[id] = {\ - AUD_COMMON_REG_LIST(id)\ -} - -static const struct dce_audio_registers audio_regs[] = { - audio_regs(0), - audio_regs(1), - audio_regs(2), - audio_regs(3), -}; - -#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ - AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) - -static const struct dce_audio_shift audio_shift = { - DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_audio_mask audio_mask = { - DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) -}; - -#define aux_regs(id)\ -[id] = {\ - AUX_REG_LIST(id)\ -} - -static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { - aux_regs(0), - aux_regs(1), - aux_regs(2), - aux_regs(3) -}; - -#define hpd_regs(id)\ -[id] = {\ - HPD_REG_LIST(id)\ -} - -static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { - hpd_regs(0), - hpd_regs(1), - hpd_regs(2), - hpd_regs(3) -}; - -#define link_regs(id)\ -[id] = {\ - LE_DCN10_REG_LIST(id), \ - SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ -} - -static const struct dcn10_link_enc_registers link_enc_regs[] = { - link_regs(0), - link_regs(1), - link_regs(2), - link_regs(3) -}; - -static const struct dcn10_link_enc_shift le_shift = { - LINK_ENCODER_MASK_SH_LIST_DCN10(__SHIFT) -}; - -static const struct dcn10_link_enc_mask le_mask = { - LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK) -}; - -static const struct dce_panel_cntl_registers panel_cntl_regs[] = { - { DCN_PANEL_CNTL_REG_LIST() } -}; - -static const struct dce_panel_cntl_shift panel_cntl_shift = { - DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_panel_cntl_mask panel_cntl_mask = { - DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) -}; - -static const struct dce110_aux_registers_shift aux_shift = { - DCN10_AUX_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce110_aux_registers_mask aux_mask = { - DCN10_AUX_MASK_SH_LIST(_MASK) -}; - -#define ipp_regs(id)\ -[id] = {\ - IPP_REG_LIST_DCN10(id),\ -} - -static const struct dcn10_ipp_registers ipp_regs[] = { - ipp_regs(0), - ipp_regs(1), - ipp_regs(2), - ipp_regs(3), -}; - -static const struct dcn10_ipp_shift ipp_shift = { - IPP_MASK_SH_LIST_DCN10(__SHIFT) -}; - -static const struct dcn10_ipp_mask ipp_mask = { - IPP_MASK_SH_LIST_DCN10(_MASK), -}; - -#define opp_regs(id)\ -[id] = {\ - OPP_REG_LIST_DCN10(id),\ -} - -static const struct dcn10_opp_registers opp_regs[] = { - opp_regs(0), - opp_regs(1), - opp_regs(2), - opp_regs(3), -}; - -static const struct dcn10_opp_shift opp_shift = { - OPP_MASK_SH_LIST_DCN10(__SHIFT) -}; - -static const struct dcn10_opp_mask opp_mask = { - OPP_MASK_SH_LIST_DCN10(_MASK), -}; - -#define aux_engine_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST(id), \ - .AUX_RESET_MASK = 0 \ -} - -static const struct dce110_aux_registers aux_engine_regs[] = { - aux_engine_regs(0), - aux_engine_regs(1), - aux_engine_regs(2), - aux_engine_regs(3), - aux_engine_regs(4), - aux_engine_regs(5) -}; - -#define tf_regs(id)\ -[id] = {\ - TF_REG_LIST_DCN10(id),\ -} - -static const struct dcn_dpp_registers tf_regs[] = { - tf_regs(0), - tf_regs(1), - tf_regs(2), - tf_regs(3), -}; - -static const struct dcn_dpp_shift tf_shift = { - TF_REG_LIST_SH_MASK_DCN10(__SHIFT), - TF_DEBUG_REG_LIST_SH_DCN10 - -}; - -static const struct dcn_dpp_mask tf_mask = { - TF_REG_LIST_SH_MASK_DCN10(_MASK), - TF_DEBUG_REG_LIST_MASK_DCN10 -}; - -static const struct dcn_mpc_registers mpc_regs = { - MPC_COMMON_REG_LIST_DCN1_0(0), - MPC_COMMON_REG_LIST_DCN1_0(1), - MPC_COMMON_REG_LIST_DCN1_0(2), - MPC_COMMON_REG_LIST_DCN1_0(3), - MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(0), - MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(1), - MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(2), - MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(3) -}; - -static const struct dcn_mpc_shift mpc_shift = { - MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT),\ - SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, __SHIFT) -}; - -static const struct dcn_mpc_mask mpc_mask = { - MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),\ - SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, _MASK) -}; - -#define tg_regs(id)\ -[id] = {TG_COMMON_REG_LIST_DCN1_0(id)} - -static const struct dcn_optc_registers tg_regs[] = { - tg_regs(0), - tg_regs(1), - tg_regs(2), - tg_regs(3), -}; - -static const struct dcn_optc_shift tg_shift = { - TG_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) -}; - -static const struct dcn_optc_mask tg_mask = { - TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK) -}; - -static const struct bios_registers bios_regs = { - NBIO_SR(BIOS_SCRATCH_3), - NBIO_SR(BIOS_SCRATCH_6) -}; - -#define hubp_regs(id)\ -[id] = {\ - HUBP_REG_LIST_DCN10(id)\ -} - -static const struct dcn_mi_registers hubp_regs[] = { - hubp_regs(0), - hubp_regs(1), - hubp_regs(2), - hubp_regs(3), -}; - -static const struct dcn_mi_shift hubp_shift = { - HUBP_MASK_SH_LIST_DCN10(__SHIFT) -}; - -static const struct dcn_mi_mask hubp_mask = { - HUBP_MASK_SH_LIST_DCN10(_MASK) -}; - -static const struct dcn_hubbub_registers hubbub_reg = { - HUBBUB_REG_LIST_DCN10(0) -}; - -static const struct dcn_hubbub_shift hubbub_shift = { - HUBBUB_MASK_SH_LIST_DCN10(__SHIFT) -}; - -static const struct dcn_hubbub_mask hubbub_mask = { - HUBBUB_MASK_SH_LIST_DCN10(_MASK) -}; - -static int map_transmitter_id_to_phy_instance( - enum transmitter transmitter) -{ - switch (transmitter) { - case TRANSMITTER_UNIPHY_A: - return 0; - break; - case TRANSMITTER_UNIPHY_B: - return 1; - break; - case TRANSMITTER_UNIPHY_C: - return 2; - break; - case TRANSMITTER_UNIPHY_D: - return 3; - break; - default: - ASSERT(0); - return 0; - } -} - -#define clk_src_regs(index, pllid)\ -[index] = {\ - CS_COMMON_REG_LIST_DCN1_0(index, pllid),\ -} - -static const struct dce110_clk_src_regs clk_src_regs[] = { - clk_src_regs(0, A), - clk_src_regs(1, B), - clk_src_regs(2, C), - clk_src_regs(3, D) -}; - -static const struct dce110_clk_src_shift cs_shift = { - CS_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) -}; - -static const struct dce110_clk_src_mask cs_mask = { - CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK) -}; - -static const struct resource_caps res_cap = { - .num_timing_generator = 4, - .num_opp = 4, - .num_video_plane = 4, - .num_audio = 4, - .num_stream_encoder = 4, - .num_pll = 4, - .num_ddc = 4, -}; - -static const struct resource_caps rv2_res_cap = { - .num_timing_generator = 3, - .num_opp = 3, - .num_video_plane = 3, - .num_audio = 3, - .num_stream_encoder = 3, - .num_pll = 3, - .num_ddc = 4, -}; - -static const struct dc_plane_cap plane_cap = { - .type = DC_PLANE_TYPE_DCN_UNIVERSAL, - .per_pixel_alpha = true, - - .pixel_format_support = { - .argb8888 = true, - .nv12 = true, - .fp16 = true, - .p010 = true - }, - - .max_upscale_factor = { - .argb8888 = 16000, - .nv12 = 16000, - .fp16 = 1 - }, - - .max_downscale_factor = { - .argb8888 = 250, - .nv12 = 250, - .fp16 = 1 - } -}; - -static const struct dc_debug_options debug_defaults_drv = { - .sanity_checks = true, - .disable_dmcu = false, - .force_abm_enable = false, - .timing_trace = false, - .clock_trace = true, - - /* raven smu dones't allow 0 disp clk, - * smu min disp clk limit is 50Mhz - * keep min disp clk 100Mhz avoid smu hang - */ - .min_disp_clk_khz = 100000, - - .disable_pplib_clock_request = false, - .disable_pplib_wm_range = false, - .pplib_wm_report_mode = WM_REPORT_DEFAULT, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, - .force_single_disp_pipe_split = true, - .disable_dcc = DCC_ENABLE, - .voltage_align_fclk = true, - .disable_stereo_support = true, - .vsr_support = true, - .performance_trace = false, - .az_endpoint_mute_only = true, - .recovery_enabled = false, /*enable this by default after testing.*/ - .max_downscale_src_width = 3840, - .underflow_assert_delay_us = 0xFFFFFFFF, - .enable_legacy_fast_update = true, - .using_dml2 = false, -}; - -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = false, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_stutter = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .underflow_assert_delay_us = 0xFFFFFFFF, -}; - -static void dcn10_dpp_destroy(struct dpp **dpp) -{ - kfree(TO_DCN10_DPP(*dpp)); - *dpp = NULL; -} - -static struct dpp *dcn10_dpp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn10_dpp *dpp = - kzalloc(sizeof(struct dcn10_dpp), GFP_KERNEL); - - if (!dpp) - return NULL; - - dpp1_construct(dpp, ctx, inst, - &tf_regs[inst], &tf_shift, &tf_mask); - return &dpp->base; -} - -static struct input_pixel_processor *dcn10_ipp_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn10_ipp *ipp = - kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL); - - if (!ipp) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dcn10_ipp_construct(ipp, ctx, inst, - &ipp_regs[inst], &ipp_shift, &ipp_mask); - return &ipp->base; -} - - -static struct output_pixel_processor *dcn10_opp_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn10_opp *opp = - kzalloc(sizeof(struct dcn10_opp), GFP_KERNEL); - - if (!opp) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dcn10_opp_construct(opp, ctx, inst, - &opp_regs[inst], &opp_shift, &opp_mask); - return &opp->base; -} - -static struct dce_aux *dcn10_aux_engine_create(struct dc_context *ctx, - uint32_t inst) -{ - struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); - - if (!aux_engine) - return NULL; - - dce110_aux_engine_construct(aux_engine, ctx, inst, - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst], - &aux_mask, - &aux_shift, - ctx->dc->caps.extended_aux_timeout_support); - - return &aux_engine->base; -} -#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } - -static const struct dce_i2c_registers i2c_hw_regs[] = { - i2c_inst_regs(1), - i2c_inst_regs(2), - i2c_inst_regs(3), - i2c_inst_regs(4), - i2c_inst_regs(5), - i2c_inst_regs(6), -}; - -static const struct dce_i2c_shift i2c_shifts = { - I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) -}; - -static const struct dce_i2c_mask i2c_masks = { - I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) -}; - -static struct dce_i2c_hw *dcn10_i2c_hw_create(struct dc_context *ctx, - uint32_t inst) -{ - struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); - - if (!dce_i2c_hw) - return NULL; - - dcn1_i2c_hw_construct(dce_i2c_hw, ctx, inst, - &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); - - return dce_i2c_hw; -} -static struct mpc *dcn10_mpc_create(struct dc_context *ctx) -{ - struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc), - GFP_KERNEL); - - if (!mpc10) - return NULL; - - dcn10_mpc_construct(mpc10, ctx, - &mpc_regs, - &mpc_shift, - &mpc_mask, - 4); - - return &mpc10->base; -} - -static struct hubbub *dcn10_hubbub_create(struct dc_context *ctx) -{ - struct dcn10_hubbub *dcn10_hubbub = kzalloc(sizeof(struct dcn10_hubbub), - GFP_KERNEL); - - if (!dcn10_hubbub) - return NULL; - - hubbub1_construct(&dcn10_hubbub->base, ctx, - &hubbub_reg, - &hubbub_shift, - &hubbub_mask); - - return &dcn10_hubbub->base; -} - -static struct timing_generator *dcn10_timing_generator_create( - struct dc_context *ctx, - uint32_t instance) -{ - struct optc *tgn10 = - kzalloc(sizeof(struct optc), GFP_KERNEL); - - if (!tgn10) - return NULL; - - tgn10->base.inst = instance; - tgn10->base.ctx = ctx; - - tgn10->tg_regs = &tg_regs[instance]; - tgn10->tg_shift = &tg_shift; - tgn10->tg_mask = &tg_mask; - - dcn10_timing_generator_init(tgn10); - - return &tgn10->base; -} - -static const struct encoder_feature_support link_enc_feature = { - .max_hdmi_deep_color = COLOR_DEPTH_121212, - .max_hdmi_pixel_clock = 600000, - .hdmi_ycbcr420_supported = true, - .dp_ycbcr420_supported = true, - .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_HBR3_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true, - .flags.bits.IS_TPS4_CAPABLE = true -}; - -static struct link_encoder *dcn10_link_encoder_create( - struct dc_context *ctx, - const struct encoder_init_data *enc_init_data) -{ - struct dcn10_link_encoder *enc10 = - kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL); - int link_regs_id; - - if (!enc10) - return NULL; - - link_regs_id = - map_transmitter_id_to_phy_instance(enc_init_data->transmitter); - - dcn10_link_encoder_construct(enc10, - enc_init_data, - &link_enc_feature, - &link_enc_regs[link_regs_id], - &link_enc_aux_regs[enc_init_data->channel - 1], - &link_enc_hpd_regs[enc_init_data->hpd_source], - &le_shift, - &le_mask); - - return &enc10->base; -} - -static struct panel_cntl *dcn10_panel_cntl_create(const struct panel_cntl_init_data *init_data) -{ - struct dce_panel_cntl *panel_cntl = - kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); - - if (!panel_cntl) - return NULL; - - dce_panel_cntl_construct(panel_cntl, - init_data, - &panel_cntl_regs[init_data->inst], - &panel_cntl_shift, - &panel_cntl_mask); - - return &panel_cntl->base; -} - -static struct clock_source *dcn10_clock_source_create( - struct dc_context *ctx, - struct dc_bios *bios, - enum clock_source_id id, - const struct dce110_clk_src_regs *regs, - bool dp_clk_src) -{ - struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); - - if (!clk_src) - return NULL; - - if (dce112_clk_src_construct(clk_src, ctx, bios, id, - regs, &cs_shift, &cs_mask)) { - clk_src->base.dp_clk_src = dp_clk_src; - return &clk_src->base; - } - - kfree(clk_src); - BREAK_TO_DEBUGGER(); - return NULL; -} - -static void read_dce_straps( - struct dc_context *ctx, - struct resource_straps *straps) -{ - generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), - FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); -} - -static struct audio *create_audio( - struct dc_context *ctx, unsigned int inst) -{ - return dce_audio_create(ctx, inst, - &audio_regs[inst], &audio_shift, &audio_mask); -} - -static struct stream_encoder *dcn10_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dcn10_stream_encoder *enc1 = - kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); - - if (!enc1) - return NULL; - - dcn10_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, - &stream_enc_regs[eng_id], - &se_shift, &se_mask); - return &enc1->base; -} - -static const struct dce_hwseq_registers hwseq_reg = { - HWSEQ_DCN1_REG_LIST() -}; - -static const struct dce_hwseq_shift hwseq_shift = { - HWSEQ_DCN1_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_hwseq_mask hwseq_mask = { - HWSEQ_DCN1_MASK_SH_LIST(_MASK) -}; - -static struct dce_hwseq *dcn10_hwseq_create( - struct dc_context *ctx) -{ - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); - - if (hws) { - hws->ctx = ctx; - hws->regs = &hwseq_reg; - hws->shifts = &hwseq_shift; - hws->masks = &hwseq_mask; - hws->wa.DEGVIDCN10_253 = true; - hws->wa.false_optc_underflow = true; - hws->wa.DEGVIDCN10_254 = true; - - if ((ctx->asic_id.chip_family == FAMILY_RV) && - ASICREV_IS_RAVEN2(ctx->asic_id.hw_internal_rev)) - switch (ctx->asic_id.pci_revision_id) { - case PRID_POLLOCK_94: - case PRID_POLLOCK_95: - case PRID_POLLOCK_E9: - case PRID_POLLOCK_EA: - case PRID_POLLOCK_EB: - hws->wa.wait_hubpret_read_start_during_mpo_transition = true; - break; - default: - hws->wa.wait_hubpret_read_start_during_mpo_transition = false; - break; - } - } - return hws; -} - -static const struct resource_create_funcs res_create_funcs = { - .read_dce_straps = read_dce_straps, - .create_audio = create_audio, - .create_stream_encoder = dcn10_stream_encoder_create, - .create_hwseq = dcn10_hwseq_create, -}; - -static void dcn10_clock_source_destroy(struct clock_source **clk_src) -{ - kfree(TO_DCE110_CLK_SRC(*clk_src)); - *clk_src = NULL; -} - -static struct pp_smu_funcs *dcn10_pp_smu_create(struct dc_context *ctx) -{ - struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); - - if (!pp_smu) - return pp_smu; - - dm_pp_get_funcs(ctx, pp_smu); - return pp_smu; -} - -static void dcn10_resource_destruct(struct dcn10_resource_pool *pool) -{ - unsigned int i; - - for (i = 0; i < pool->base.stream_enc_count; i++) { - if (pool->base.stream_enc[i] != NULL) { - kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); - pool->base.stream_enc[i] = NULL; - } - } - - if (pool->base.mpc != NULL) { - kfree(TO_DCN10_MPC(pool->base.mpc)); - pool->base.mpc = NULL; - } - - kfree(pool->base.hubbub); - pool->base.hubbub = NULL; - - for (i = 0; i < pool->base.pipe_count; i++) { - if (pool->base.opps[i] != NULL) - pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); - - if (pool->base.dpps[i] != NULL) - dcn10_dpp_destroy(&pool->base.dpps[i]); - - if (pool->base.ipps[i] != NULL) - pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); - - if (pool->base.hubps[i] != NULL) { - kfree(TO_DCN10_HUBP(pool->base.hubps[i])); - pool->base.hubps[i] = NULL; - } - - if (pool->base.irqs != NULL) { - dal_irq_service_destroy(&pool->base.irqs); - } - - if (pool->base.timing_generators[i] != NULL) { - kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); - pool->base.timing_generators[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - if (pool->base.engines[i] != NULL) - dce110_engine_destroy(&pool->base.engines[i]); - kfree(pool->base.hw_i2cs[i]); - pool->base.hw_i2cs[i] = NULL; - kfree(pool->base.sw_i2cs[i]); - pool->base.sw_i2cs[i] = NULL; - } - - for (i = 0; i < pool->base.audio_count; i++) { - if (pool->base.audios[i]) - dce_aud_destroy(&pool->base.audios[i]); - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] != NULL) { - dcn10_clock_source_destroy(&pool->base.clock_sources[i]); - pool->base.clock_sources[i] = NULL; - } - } - - if (pool->base.dp_clock_source != NULL) { - dcn10_clock_source_destroy(&pool->base.dp_clock_source); - pool->base.dp_clock_source = NULL; - } - - if (pool->base.abm != NULL) - dce_abm_destroy(&pool->base.abm); - - if (pool->base.dmcu != NULL) - dce_dmcu_destroy(&pool->base.dmcu); - - kfree(pool->base.pp_smu); -} - -static struct hubp *dcn10_hubp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn10_hubp *hubp1 = - kzalloc(sizeof(struct dcn10_hubp), GFP_KERNEL); - - if (!hubp1) - return NULL; - - dcn10_hubp_construct(hubp1, ctx, inst, - &hubp_regs[inst], &hubp_shift, &hubp_mask); - return &hubp1->base; -} - -static void get_pixel_clock_parameters( - const struct pipe_ctx *pipe_ctx, - struct pixel_clk_params *pixel_clk_params) -{ - const struct dc_stream_state *stream = pipe_ctx->stream; - pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; - pixel_clk_params->encoder_object_id = stream->link->link_enc->id; - pixel_clk_params->signal_type = pipe_ctx->stream->signal; - pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; - /* TODO: un-hardcode*/ - pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * - LINK_RATE_REF_FREQ_IN_KHZ; - pixel_clk_params->flags.ENABLE_SS = 0; - pixel_clk_params->color_depth = - stream->timing.display_color_depth; - pixel_clk_params->flags.DISPLAY_BLANKED = 1; - pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding; - - if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) - pixel_clk_params->color_depth = COLOR_DEPTH_888; - - if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) - pixel_clk_params->requested_pix_clk_100hz /= 2; - if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) - pixel_clk_params->requested_pix_clk_100hz *= 2; - -} - -static void build_clamping_params(struct dc_stream_state *stream) -{ - stream->clamping.clamping_level = CLAMPING_FULL_RANGE; - stream->clamping.c_depth = stream->timing.display_color_depth; - stream->clamping.pixel_encoding = stream->timing.pixel_encoding; -} - -static void build_pipe_hw_param(struct pipe_ctx *pipe_ctx) -{ - - get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); - - pipe_ctx->clock_source->funcs->get_pix_clk_dividers( - pipe_ctx->clock_source, - &pipe_ctx->stream_res.pix_clk_params, - &pipe_ctx->pll_settings); - - pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; - - resource_build_bit_depth_reduction_params(pipe_ctx->stream, - &pipe_ctx->stream->bit_depth_params); - build_clamping_params(pipe_ctx->stream); -} - -static enum dc_status build_mapped_resource( - const struct dc *dc, - struct dc_state *context, - struct dc_stream_state *stream) -{ - struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); - - if (!pipe_ctx) - return DC_ERROR_UNEXPECTED; - - build_pipe_hw_param(pipe_ctx); - return DC_OK; -} - -static enum dc_status dcn10_add_stream_to_ctx( - struct dc *dc, - struct dc_state *new_ctx, - struct dc_stream_state *dc_stream) -{ - enum dc_status result = DC_ERROR_UNEXPECTED; - - result = resource_map_pool_resources(dc, new_ctx, dc_stream); - - if (result == DC_OK) - result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream); - - - if (result == DC_OK) - result = build_mapped_resource(dc, new_ctx, dc_stream); - - return result; -} - -static struct pipe_ctx *dcn10_acquire_free_pipe_for_layer( - const struct dc_state *cur_ctx, - struct dc_state *new_ctx, - const struct resource_pool *pool, - const struct pipe_ctx *opp_head_pipe) -{ - struct resource_context *res_ctx = &new_ctx->res_ctx; - struct pipe_ctx *head_pipe = resource_get_otg_master_for_stream(res_ctx, opp_head_pipe->stream); - struct pipe_ctx *idle_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, head_pipe); - - if (!head_pipe) { - ASSERT(0); - return NULL; - } - - if (!idle_pipe) - return NULL; - - idle_pipe->stream = head_pipe->stream; - idle_pipe->stream_res.tg = head_pipe->stream_res.tg; - idle_pipe->stream_res.abm = head_pipe->stream_res.abm; - idle_pipe->stream_res.opp = head_pipe->stream_res.opp; - - idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; - idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx]; - idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx]; - idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst; - - return idle_pipe; -} - -static bool dcn10_get_dcc_compression_cap(const struct dc *dc, - const struct dc_dcc_surface_param *input, - struct dc_surface_dcc_cap *output) -{ - return dc->res_pool->hubbub->funcs->get_dcc_compression_cap( - dc->res_pool->hubbub, - input, - output); -} - -static void dcn10_destroy_resource_pool(struct resource_pool **pool) -{ - struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool); - - dcn10_resource_destruct(dcn10_pool); - kfree(dcn10_pool); - *pool = NULL; -} - -static bool dcn10_validate_bandwidth( - struct dc *dc, - struct dc_state *context, - bool fast_validate) -{ - bool voltage_supported; - - DC_FP_START(); - voltage_supported = dcn_validate_bandwidth(dc, context, fast_validate); - DC_FP_END(); - - return voltage_supported; -} - -static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps) -{ - if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN - && caps->max_video_width != 0 - && plane_state->src_rect.width > caps->max_video_width) - return DC_FAIL_SURFACE_VALIDATE; - - return DC_OK; -} - -static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *context) -{ - int i, j; - bool video_down_scaled = false; - bool video_large = false; - bool desktop_large = false; - bool dcc_disabled = false; - bool mpo_enabled = false; - - for (i = 0; i < context->stream_count; i++) { - if (context->stream_status[i].plane_count == 0) - continue; - - if (context->stream_status[i].plane_count > 2) - return DC_FAIL_UNSUPPORTED_1; - - if (context->stream_status[i].plane_count > 1) - mpo_enabled = true; - - for (j = 0; j < context->stream_status[i].plane_count; j++) { - struct dc_plane_state *plane = - context->stream_status[i].plane_states[j]; - - - if (plane->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { - - if (plane->src_rect.width > plane->dst_rect.width || - plane->src_rect.height > plane->dst_rect.height) - video_down_scaled = true; - - if (plane->src_rect.width >= 3840) - video_large = true; - - } else { - if (plane->src_rect.width >= 3840) - desktop_large = true; - if (!plane->dcc.enable) - dcc_disabled = true; - } - } - } - - /* Disable MPO in multi-display configurations. */ - if (context->stream_count > 1 && mpo_enabled) - return DC_FAIL_UNSUPPORTED_1; - - /* - * Workaround: On DCN10 there is UMC issue that causes underflow when - * playing 4k video on 4k desktop with video downscaled and single channel - * memory - */ - if (video_large && desktop_large && video_down_scaled && dcc_disabled && - dc->dcn_soc->number_of_channels == 1) - return DC_FAIL_SURFACE_VALIDATE; - - return DC_OK; -} - -static enum dc_status dcn10_patch_unknown_plane_state(struct dc_plane_state *plane_state) -{ - enum surface_pixel_format surf_pix_format = plane_state->format; - unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format); - - enum swizzle_mode_values swizzle = DC_SW_LINEAR; - - if (bpp == 64) - swizzle = DC_SW_64KB_D; - else - swizzle = DC_SW_64KB_S; - - plane_state->tiling_info.gfx9.swizzle = swizzle; - return DC_OK; -} - -struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link( - struct resource_context *res_ctx, - const struct resource_pool *pool, - struct dc_stream_state *stream) -{ - int i; - int j = -1; - struct dc_link *link = stream->link; - - for (i = 0; i < pool->stream_enc_count; i++) { - if (!res_ctx->is_stream_enc_acquired[i] && - pool->stream_enc[i]) { - /* Store first available for MST second display - * in daisy chain use case - */ - j = i; - if (link->ep_type == DISPLAY_ENDPOINT_PHY && pool->stream_enc[i]->id == - link->link_enc->preferred_engine) - return pool->stream_enc[i]; - } - } - - /* - * For CZ and later, we can allow DIG FE and BE to differ for all display types - */ - - if (j >= 0) - return pool->stream_enc[j]; - - return NULL; -} - -static const struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn10_get_dcc_compression_cap -}; - -static const struct resource_funcs dcn10_res_pool_funcs = { - .destroy = dcn10_destroy_resource_pool, - .link_enc_create = dcn10_link_encoder_create, - .panel_cntl_create = dcn10_panel_cntl_create, - .validate_bandwidth = dcn10_validate_bandwidth, - .acquire_free_pipe_as_secondary_dpp_pipe = dcn10_acquire_free_pipe_for_layer, - .validate_plane = dcn10_validate_plane, - .validate_global = dcn10_validate_global, - .add_stream_to_ctx = dcn10_add_stream_to_ctx, - .patch_unknown_plane_state = dcn10_patch_unknown_plane_state, - .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link -}; - -static uint32_t read_pipe_fuses(struct dc_context *ctx) -{ - uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0); - /* RV1 support max 4 pipes */ - value = value & 0xf; - return value; -} - -static bool verify_clock_values(struct dm_pp_clock_levels_with_voltage *clks) -{ - int i; - - if (clks->num_levels == 0) - return false; - - for (i = 0; i < clks->num_levels; i++) - /* Ensure that the result is sane */ - if (clks->data[i].clocks_in_khz == 0) - return false; - - return true; -} - -static bool dcn10_resource_construct( - uint8_t num_virtual_links, - struct dc *dc, - struct dcn10_resource_pool *pool) -{ - int i; - int j; - struct dc_context *ctx = dc->ctx; - uint32_t pipe_fuses = read_pipe_fuses(ctx); - struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0}; - int min_fclk_khz, min_dcfclk_khz, socclk_khz; - bool res; - - ctx->dc_bios->regs = &bios_regs; - - if (ctx->dce_version == DCN_VERSION_1_01) - pool->base.res_cap = &rv2_res_cap; - else - pool->base.res_cap = &res_cap; - pool->base.funcs = &dcn10_res_pool_funcs; - - /* - * TODO fill in from actual raven resource when we create - * more than virtual encoder - */ - - /************************************************* - * Resource + asic cap harcoding * - *************************************************/ - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - - /* max pipe num for ASIC before check pipe fuses */ - pool->base.pipe_count = pool->base.res_cap->num_timing_generator; - - if (dc->ctx->dce_version == DCN_VERSION_1_01) - pool->base.pipe_count = 3; - dc->caps.max_video_width = 3840; - dc->caps.max_downscale_ratio = 200; - dc->caps.i2c_speed_in_khz = 100; - dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a not applied by default*/ - dc->caps.max_cursor_size = 256; - dc->caps.min_horizontal_blanking_period = 80; - dc->caps.max_slave_planes = 1; - dc->caps.max_slave_yuv_planes = 1; - dc->caps.max_slave_rgb_planes = 0; - dc->caps.is_apu = true; - dc->caps.post_blend_color_processing = false; - dc->caps.extended_aux_timeout_support = false; - - /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */ - dc->caps.force_dp_tps4_for_cp2520 = true; - - /* Color pipeline capabilities */ - dc->caps.color.dpp.dcn_arch = 1; - dc->caps.color.dpp.input_lut_shared = 1; - dc->caps.color.dpp.icsc = 1; - dc->caps.color.dpp.dgam_ram = 1; - dc->caps.color.dpp.dgam_rom_caps.srgb = 1; - dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; - dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0; - dc->caps.color.dpp.dgam_rom_caps.pq = 0; - dc->caps.color.dpp.dgam_rom_caps.hlg = 0; - dc->caps.color.dpp.post_csc = 0; - dc->caps.color.dpp.gamma_corr = 0; - dc->caps.color.dpp.dgam_rom_for_yuv = 1; - - dc->caps.color.dpp.hw_3d_lut = 0; - dc->caps.color.dpp.ogam_ram = 1; // RGAM on DCN1 - dc->caps.color.dpp.ogam_rom_caps.srgb = 1; - dc->caps.color.dpp.ogam_rom_caps.bt2020 = 1; - dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.dpp.ogam_rom_caps.pq = 0; - dc->caps.color.dpp.ogam_rom_caps.hlg = 0; - dc->caps.color.dpp.ocsc = 1; - - /* no post-blend color operations */ - dc->caps.color.mpc.gamut_remap = 0; - dc->caps.color.mpc.num_3dluts = 0; - dc->caps.color.mpc.shared_3d_lut = 0; - dc->caps.color.mpc.ogam_ram = 0; - dc->caps.color.mpc.ogam_rom_caps.srgb = 0; - dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; - dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.mpc.ogam_rom_caps.pq = 0; - dc->caps.color.mpc.ogam_rom_caps.hlg = 0; - dc->caps.color.mpc.ocsc = 0; - - if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) - dc->debug = debug_defaults_drv; - else - dc->debug = debug_defaults_diags; - - /************************************************* - * Create resources * - *************************************************/ - - pool->base.clock_sources[DCN10_CLK_SRC_PLL0] = - dcn10_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL0, - &clk_src_regs[0], false); - pool->base.clock_sources[DCN10_CLK_SRC_PLL1] = - dcn10_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL1, - &clk_src_regs[1], false); - pool->base.clock_sources[DCN10_CLK_SRC_PLL2] = - dcn10_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL2, - &clk_src_regs[2], false); - - if (dc->ctx->dce_version == DCN_VERSION_1_0) { - pool->base.clock_sources[DCN10_CLK_SRC_PLL3] = - dcn10_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL3, - &clk_src_regs[3], false); - } - - pool->base.clk_src_count = DCN10_CLK_SRC_TOTAL; - - if (dc->ctx->dce_version == DCN_VERSION_1_01) - pool->base.clk_src_count = DCN101_CLK_SRC_TOTAL; - - pool->base.dp_clock_source = - dcn10_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_ID_DP_DTO, - /* todo: not reuse phy_pll registers */ - &clk_src_regs[0], true); - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - BREAK_TO_DEBUGGER(); - goto fail; - } - } - - pool->base.dmcu = dcn10_dmcu_create(ctx, - &dmcu_regs, - &dmcu_shift, - &dmcu_mask); - if (pool->base.dmcu == NULL) { - dm_error("DC: failed to create dmcu!\n"); - BREAK_TO_DEBUGGER(); - goto fail; - } - - pool->base.abm = dce_abm_create(ctx, - &abm_regs, - &abm_shift, - &abm_mask); - if (pool->base.abm == NULL) { - dm_error("DC: failed to create abm!\n"); - BREAK_TO_DEBUGGER(); - goto fail; - } - - dml_init_instance(&dc->dml, &dcn1_0_soc, &dcn1_0_ip, DML_PROJECT_RAVEN1); - memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults)); - memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults)); - - DC_FP_START(); - dcn10_resource_construct_fp(dc); - DC_FP_END(); - - if (!dc->config.is_vmin_only_asic) - if (ASICREV_IS_RAVEN2(dc->ctx->asic_id.hw_internal_rev)) - switch (dc->ctx->asic_id.pci_revision_id) { - case PRID_DALI_DE: - case PRID_DALI_DF: - case PRID_DALI_E3: - case PRID_DALI_E4: - case PRID_POLLOCK_94: - case PRID_POLLOCK_95: - case PRID_POLLOCK_E9: - case PRID_POLLOCK_EA: - case PRID_POLLOCK_EB: - dc->config.is_vmin_only_asic = true; - break; - default: - break; - } - - pool->base.pp_smu = dcn10_pp_smu_create(ctx); - - /* - * Right now SMU/PPLIB and DAL all have the AZ D3 force PME notification * - * implemented. So AZ D3 should work.For issue 197007. * - */ - if (pool->base.pp_smu != NULL - && pool->base.pp_smu->rv_funcs.set_pme_wa_enable != NULL) - dc->debug.az_endpoint_mute_only = false; - - - if (!dc->debug.disable_pplib_clock_request) { - /* - * TODO: This is not the proper way to obtain - * fabric_and_dram_bandwidth, should be min(fclk, memclk). - */ - res = dm_pp_get_clock_levels_by_type_with_voltage( - ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks); - - DC_FP_START(); - - if (res) - res = verify_clock_values(&fclks); - - if (res) - dcn_bw_update_from_pplib_fclks(dc, &fclks); - else - BREAK_TO_DEBUGGER(); - - DC_FP_END(); - - res = dm_pp_get_clock_levels_by_type_with_voltage( - ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks); - - DC_FP_START(); - - if (res) - res = verify_clock_values(&dcfclks); - - if (res) - dcn_bw_update_from_pplib_dcfclks(dc, &dcfclks); - else - BREAK_TO_DEBUGGER(); - - DC_FP_END(); - } - - dcn_bw_sync_calcs_and_dml(dc); - if (!dc->debug.disable_pplib_wm_range) { - dc->res_pool = &pool->base; - DC_FP_START(); - dcn_get_soc_clks( - dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz); - DC_FP_END(); - dcn_bw_notify_pplib_of_wm_ranges( - dc, min_fclk_khz, min_dcfclk_khz, socclk_khz); - } - - { - struct irq_service_init_data init_data; - init_data.ctx = dc->ctx; - pool->base.irqs = dal_irq_service_dcn10_create(&init_data); - if (!pool->base.irqs) - goto fail; - } - - /* index to valid pipe resource */ - j = 0; - /* mem input -> ipp -> dpp -> opp -> TG */ - for (i = 0; i < pool->base.pipe_count; i++) { - /* if pipe is disabled, skip instance of HW pipe, - * i.e, skip ASIC register instance - */ - if ((pipe_fuses & (1 << i)) != 0) - continue; - - pool->base.hubps[j] = dcn10_hubp_create(ctx, i); - if (pool->base.hubps[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create memory input!\n"); - goto fail; - } - - pool->base.ipps[j] = dcn10_ipp_create(ctx, i); - if (pool->base.ipps[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create input pixel processor!\n"); - goto fail; - } - - pool->base.dpps[j] = dcn10_dpp_create(ctx, i); - if (pool->base.dpps[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create dpp!\n"); - goto fail; - } - - pool->base.opps[j] = dcn10_opp_create(ctx, i); - if (pool->base.opps[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create output pixel processor!\n"); - goto fail; - } - - pool->base.timing_generators[j] = dcn10_timing_generator_create( - ctx, i); - if (pool->base.timing_generators[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto fail; - } - /* check next valid pipe */ - j++; - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - pool->base.engines[i] = dcn10_aux_engine_create(ctx, i); - if (pool->base.engines[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create aux engine!!\n"); - goto fail; - } - pool->base.hw_i2cs[i] = dcn10_i2c_hw_create(ctx, i); - if (pool->base.hw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create hw i2c!!\n"); - goto fail; - } - pool->base.sw_i2cs[i] = NULL; - } - - /* valid pipe num */ - pool->base.pipe_count = j; - pool->base.timing_generator_count = j; - - /* within dml lib, it is hard code to 4. If ASIC pipe is fused, - * the value may be changed - */ - dc->dml.ip.max_num_dpp = pool->base.pipe_count; - dc->dcn_ip->max_num_dpp = pool->base.pipe_count; - - pool->base.mpc = dcn10_mpc_create(ctx); - if (pool->base.mpc == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mpc!\n"); - goto fail; - } - - pool->base.hubbub = dcn10_hubbub_create(ctx); - if (pool->base.hubbub == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create hubbub!\n"); - goto fail; - } - - if (!resource_construct(num_virtual_links, dc, &pool->base, - &res_create_funcs)) - goto fail; - - dcn10_hw_sequencer_construct(dc); - dc->caps.max_planes = pool->base.pipe_count; - - for (i = 0; i < dc->caps.max_planes; ++i) - dc->caps.planes[i] = plane_cap; - - dc->cap_funcs = cap_funcs; - - return true; - -fail: - - dcn10_resource_destruct(pool); - - return false; -} - -struct resource_pool *dcn10_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc) -{ - struct dcn10_resource_pool *pool = - kzalloc(sizeof(struct dcn10_resource_pool), GFP_KERNEL); - - if (!pool) - return NULL; - - if (dcn10_resource_construct(init_data->num_virtual_links, dc, pool)) - return &pool->base; - - kfree(pool); - BREAK_TO_DEBUGGER(); - return NULL; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h deleted file mode 100644 index bf8e33cd81..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h +++ /dev/null @@ -1,56 +0,0 @@ -/* -* Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_RESOURCE_DCN10_H__ -#define __DC_RESOURCE_DCN10_H__ - -#include "core_types.h" -#include "dml/dcn10/dcn10_fpu.h" - -#define TO_DCN10_RES_POOL(pool)\ - container_of(pool, struct dcn10_resource_pool, base) - -struct dc; -struct resource_pool; -struct _vcs_dpi_display_pipe_params_st; - -extern struct _vcs_dpi_ip_params_st dcn1_0_ip; -extern struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc; - -struct dcn10_resource_pool { - struct resource_pool base; -}; -struct resource_pool *dcn10_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc); - -struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link( - struct resource_context *res_ctx, - const struct resource_pool *pool, - struct dc_stream_state *stream); - - -#endif /* __DC_RESOURCE_DCN10_H__ */ - diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile index d7dc9696a8..3dae3943b0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile @@ -2,13 +2,11 @@ # # Makefile for DCN. -DCN20 = dcn20_resource.o dcn20_init.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \ - dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_optc.o dcn20_mmhubbub.o \ +DCN20 = dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \ + dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_mmhubbub.o \ dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \ dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o -DCN20 += dcn20_dsc.o - AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20)) AMD_DISPLAY_FILES += $(AMD_DAL_DCN20) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h index ab6d09c6fe..ef5c22f415 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h @@ -291,7 +291,43 @@ type SYMCLKB_FE_SRC_SEL;\ type SYMCLKC_FE_SRC_SEL;\ type SYMCLKD_FE_SRC_SEL;\ - type SYMCLKE_FE_SRC_SEL; + type SYMCLKE_FE_SRC_SEL;\ + type DTBCLK_P0_GATE_DISABLE;\ + type DTBCLK_P1_GATE_DISABLE;\ + type DTBCLK_P2_GATE_DISABLE;\ + type DTBCLK_P3_GATE_DISABLE;\ + type DSCCLK0_ROOT_GATE_DISABLE;\ + type DSCCLK1_ROOT_GATE_DISABLE;\ + type DSCCLK2_ROOT_GATE_DISABLE;\ + type DSCCLK3_ROOT_GATE_DISABLE;\ + type SYMCLKA_FE_ROOT_GATE_DISABLE;\ + type SYMCLKB_FE_ROOT_GATE_DISABLE;\ + type SYMCLKC_FE_ROOT_GATE_DISABLE;\ + type SYMCLKD_FE_ROOT_GATE_DISABLE;\ + type SYMCLKE_FE_ROOT_GATE_DISABLE;\ + type DPPCLK0_ROOT_GATE_DISABLE;\ + type DPPCLK1_ROOT_GATE_DISABLE;\ + type DPPCLK2_ROOT_GATE_DISABLE;\ + type DPPCLK3_ROOT_GATE_DISABLE;\ + type HDMISTREAMCLK0_ROOT_GATE_DISABLE;\ + type SYMCLKA_ROOT_GATE_DISABLE;\ + type SYMCLKB_ROOT_GATE_DISABLE;\ + type SYMCLKC_ROOT_GATE_DISABLE;\ + type SYMCLKD_ROOT_GATE_DISABLE;\ + type SYMCLKE_ROOT_GATE_DISABLE;\ + type PHYA_REFCLK_ROOT_GATE_DISABLE;\ + type PHYB_REFCLK_ROOT_GATE_DISABLE;\ + type PHYC_REFCLK_ROOT_GATE_DISABLE;\ + type PHYD_REFCLK_ROOT_GATE_DISABLE;\ + type PHYE_REFCLK_ROOT_GATE_DISABLE;\ + type DPSTREAMCLK0_ROOT_GATE_DISABLE;\ + type DPSTREAMCLK1_ROOT_GATE_DISABLE;\ + type DPSTREAMCLK2_ROOT_GATE_DISABLE;\ + type DPSTREAMCLK3_ROOT_GATE_DISABLE;\ + type DPSTREAMCLK0_GATE_DISABLE;\ + type DPSTREAMCLK1_GATE_DISABLE;\ + type DPSTREAMCLK2_GATE_DISABLE;\ + type DPSTREAMCLK3_GATE_DISABLE;\ struct dccg_shift { DCCG_REG_FIELD_LIST(uint8_t) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c deleted file mode 100644 index c9ae2d8f00..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ /dev/null @@ -1,780 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include - -#include "reg_helper.h" -#include "dcn20_dsc.h" -#include "dsc/dscc_types.h" -#include "dsc/rc_calc.h" - -static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals); - -/* Object I/F functions */ -static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s); -static bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg); -static void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, - struct dsc_optc_config *dsc_optc_cfg); -static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe); -static void dsc2_disable(struct display_stream_compressor *dsc); -static void dsc2_disconnect(struct display_stream_compressor *dsc); - -static const struct dsc_funcs dcn20_dsc_funcs = { - .dsc_get_enc_caps = dsc2_get_enc_caps, - .dsc_read_state = dsc2_read_state, - .dsc_validate_stream = dsc2_validate_stream, - .dsc_set_config = dsc2_set_config, - .dsc_get_packed_pps = dsc2_get_packed_pps, - .dsc_enable = dsc2_enable, - .dsc_disable = dsc2_disable, - .dsc_disconnect = dsc2_disconnect, -}; - -/* Macro definitios for REG_SET macros*/ -#define CTX \ - dsc20->base.ctx - -#define REG(reg)\ - dsc20->dsc_regs->reg - -#undef FN -#define FN(reg_name, field_name) \ - dsc20->dsc_shift->field_name, dsc20->dsc_mask->field_name -#define DC_LOGGER \ - dsc->ctx->logger - -enum dsc_bits_per_comp { - DSC_BPC_8 = 8, - DSC_BPC_10 = 10, - DSC_BPC_12 = 12, - DSC_BPC_UNKNOWN -}; - -/* API functions (external or via structure->function_pointer) */ - -void dsc2_construct(struct dcn20_dsc *dsc, - struct dc_context *ctx, - int inst, - const struct dcn20_dsc_registers *dsc_regs, - const struct dcn20_dsc_shift *dsc_shift, - const struct dcn20_dsc_mask *dsc_mask) -{ - dsc->base.ctx = ctx; - dsc->base.inst = inst; - dsc->base.funcs = &dcn20_dsc_funcs; - - dsc->dsc_regs = dsc_regs; - dsc->dsc_shift = dsc_shift; - dsc->dsc_mask = dsc_mask; - - dsc->max_image_width = 5184; -} - - -#define DCN20_MAX_PIXEL_CLOCK_Mhz 1188 -#define DCN20_MAX_DISPLAY_CLOCK_Mhz 1200 - -/* This returns the capabilities for a single DSC encoder engine. Number of slices and total throughput - * can be doubled, tripled etc. by using additional DSC engines. - */ -void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz) -{ - dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */ - - dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 1; - dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = 1; - dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = 1; - dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 = 1; - - dsc_enc_caps->lb_bit_depth = 13; - dsc_enc_caps->is_block_pred_supported = true; - - dsc_enc_caps->color_formats.bits.RGB = 1; - dsc_enc_caps->color_formats.bits.YCBCR_444 = 1; - dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1; - dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0; - dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1; - - dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1; - dsc_enc_caps->color_depth.bits.COLOR_DEPTH_10_BPC = 1; - dsc_enc_caps->color_depth.bits.COLOR_DEPTH_12_BPC = 1; - - /* Maximum total throughput with all the slices combined. This is different from how DP spec specifies it. - * Our decoder's total throughput in Pix/s is equal to DISPCLK. This is then shared between slices. - * The value below is the absolute maximum value. The actual throughput may be lower, but it'll always - * be sufficient to process the input pixel rate fed into a single DSC engine. - */ - dsc_enc_caps->max_total_throughput_mps = DCN20_MAX_DISPLAY_CLOCK_Mhz; - - /* For pixel clock bigger than a single-pipe limit we'll need two engines, which then doubles our - * throughput and number of slices, but also introduces a lower limit of 2 slices - */ - if (pixel_clock_100Hz >= DCN20_MAX_PIXEL_CLOCK_Mhz*10000) { - dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 0; - dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 = 1; - dsc_enc_caps->max_total_throughput_mps = DCN20_MAX_DISPLAY_CLOCK_Mhz * 2; - } - - /* For pixel clock bigger than a single-pipe limit needing four engines ODM 4:1, which then quardruples our - * throughput and number of slices - */ - if (pixel_clock_100Hz > DCN20_MAX_PIXEL_CLOCK_Mhz*10000*2) { - dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 = 1; - dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 = 1; - dsc_enc_caps->max_total_throughput_mps = DCN20_MAX_DISPLAY_CLOCK_Mhz * 4; - } - - dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */ - dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */ -} - - -/* this function read dsc related register fields to be logged later in dcn10_log_hw_state - * into a dcn_dsc_state struct. - */ -static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s) -{ - struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); - - REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &s->dsc_clock_en); - REG_GET(DSCC_PPS_CONFIG3, SLICE_WIDTH, &s->dsc_slice_width); - REG_GET(DSCC_PPS_CONFIG1, BITS_PER_PIXEL, &s->dsc_bits_per_pixel); - REG_GET(DSCC_PPS_CONFIG3, SLICE_HEIGHT, &s->dsc_slice_height); - REG_GET(DSCC_PPS_CONFIG1, CHUNK_SIZE, &s->dsc_chunk_size); - REG_GET(DSCC_PPS_CONFIG2, PIC_WIDTH, &s->dsc_pic_width); - REG_GET(DSCC_PPS_CONFIG2, PIC_HEIGHT, &s->dsc_pic_height); - REG_GET(DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, &s->dsc_slice_bpg_offset); - REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &s->dsc_fw_en, - DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source); -} - - -static bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg) -{ - struct dsc_optc_config dsc_optc_cfg; - struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); - - if (dsc_cfg->pic_width > dsc20->max_image_width) - return false; - - return dsc_prepare_config(dsc_cfg, &dsc20->reg_vals, &dsc_optc_cfg); -} - - -void dsc_config_log(struct display_stream_compressor *dsc, const struct dsc_config *config) -{ - DC_LOG_DSC("\tnum_slices_h %d", config->dc_dsc_cfg.num_slices_h); - DC_LOG_DSC("\tnum_slices_v %d", config->dc_dsc_cfg.num_slices_v); - DC_LOG_DSC("\tbits_per_pixel %d (%d.%04d)", - config->dc_dsc_cfg.bits_per_pixel, - config->dc_dsc_cfg.bits_per_pixel / 16, - ((config->dc_dsc_cfg.bits_per_pixel % 16) * 10000) / 16); - DC_LOG_DSC("\tcolor_depth %d", config->color_depth); -} - -static void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, - struct dsc_optc_config *dsc_optc_cfg) -{ - bool is_config_ok; - struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); - - DC_LOG_DSC("Setting DSC Config at DSC inst %d", dsc->inst); - dsc_config_log(dsc, dsc_cfg); - is_config_ok = dsc_prepare_config(dsc_cfg, &dsc20->reg_vals, dsc_optc_cfg); - ASSERT(is_config_ok); - DC_LOG_DSC("programming DSC Picture Parameter Set (PPS):"); - dsc_log_pps(dsc, &dsc20->reg_vals.pps); - dsc_write_to_registers(dsc, &dsc20->reg_vals); -} - - -bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps) -{ - bool is_config_ok; - struct dsc_reg_values dsc_reg_vals; - struct dsc_optc_config dsc_optc_cfg; - - memset(&dsc_reg_vals, 0, sizeof(dsc_reg_vals)); - memset(&dsc_optc_cfg, 0, sizeof(dsc_optc_cfg)); - - DC_LOG_DSC("Getting packed DSC PPS for DSC Config:"); - dsc_config_log(dsc, dsc_cfg); - DC_LOG_DSC("DSC Picture Parameter Set (PPS):"); - is_config_ok = dsc_prepare_config(dsc_cfg, &dsc_reg_vals, &dsc_optc_cfg); - ASSERT(is_config_ok); - drm_dsc_pps_payload_pack((struct drm_dsc_picture_parameter_set *)dsc_packed_pps, &dsc_reg_vals.pps); - dsc_log_pps(dsc, &dsc_reg_vals.pps); - - return is_config_ok; -} - - -static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe) -{ - struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); - int dsc_clock_en; - int dsc_fw_config; - int enabled_opp_pipe; - - DC_LOG_DSC("enable DSC %d at opp pipe %d", dsc->inst, opp_pipe); - - REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en); - REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe); - if ((dsc_clock_en || dsc_fw_config) && enabled_opp_pipe != opp_pipe) { - DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already enabled!", dsc->inst, enabled_opp_pipe); - ASSERT(0); - } - - REG_UPDATE(DSC_TOP_CONTROL, - DSC_CLOCK_EN, 1); - - REG_UPDATE_2(DSCRM_DSC_FORWARD_CONFIG, - DSCRM_DSC_FORWARD_EN, 1, - DSCRM_DSC_OPP_PIPE_SOURCE, opp_pipe); -} - - -static void dsc2_disable(struct display_stream_compressor *dsc) -{ - struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); - int dsc_clock_en; - int dsc_fw_config; - int enabled_opp_pipe; - - DC_LOG_DSC("disable DSC %d", dsc->inst); - - REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en); - REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe); - if (!dsc_clock_en || !dsc_fw_config) { - DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already disabled!", dsc->inst, enabled_opp_pipe); - ASSERT(0); - } - - REG_UPDATE(DSCRM_DSC_FORWARD_CONFIG, - DSCRM_DSC_FORWARD_EN, 0); - - REG_UPDATE(DSC_TOP_CONTROL, - DSC_CLOCK_EN, 0); -} - -static void dsc2_disconnect(struct display_stream_compressor *dsc) -{ - struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); - - DC_LOG_DSC("disconnect DSC %d", dsc->inst); - - REG_UPDATE(DSCRM_DSC_FORWARD_CONFIG, - DSCRM_DSC_FORWARD_EN, 0); -} - -/* This module's internal functions */ -void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps) -{ - int i; - int bits_per_pixel = pps->bits_per_pixel; - - DC_LOG_DSC("\tdsc_version_major %d", pps->dsc_version_major); - DC_LOG_DSC("\tdsc_version_minor %d", pps->dsc_version_minor); - DC_LOG_DSC("\tbits_per_component %d", pps->bits_per_component); - DC_LOG_DSC("\tline_buf_depth %d", pps->line_buf_depth); - DC_LOG_DSC("\tblock_pred_enable %d", pps->block_pred_enable); - DC_LOG_DSC("\tconvert_rgb %d", pps->convert_rgb); - DC_LOG_DSC("\tsimple_422 %d", pps->simple_422); - DC_LOG_DSC("\tvbr_enable %d", pps->vbr_enable); - DC_LOG_DSC("\tbits_per_pixel %d (%d.%04d)", bits_per_pixel, bits_per_pixel / 16, ((bits_per_pixel % 16) * 10000) / 16); - DC_LOG_DSC("\tpic_height %d", pps->pic_height); - DC_LOG_DSC("\tpic_width %d", pps->pic_width); - DC_LOG_DSC("\tslice_height %d", pps->slice_height); - DC_LOG_DSC("\tslice_width %d", pps->slice_width); - DC_LOG_DSC("\tslice_chunk_size %d", pps->slice_chunk_size); - DC_LOG_DSC("\tinitial_xmit_delay %d", pps->initial_xmit_delay); - DC_LOG_DSC("\tinitial_dec_delay %d", pps->initial_dec_delay); - DC_LOG_DSC("\tinitial_scale_value %d", pps->initial_scale_value); - DC_LOG_DSC("\tscale_increment_interval %d", pps->scale_increment_interval); - DC_LOG_DSC("\tscale_decrement_interval %d", pps->scale_decrement_interval); - DC_LOG_DSC("\tfirst_line_bpg_offset %d", pps->first_line_bpg_offset); - DC_LOG_DSC("\tnfl_bpg_offset %d", pps->nfl_bpg_offset); - DC_LOG_DSC("\tslice_bpg_offset %d", pps->slice_bpg_offset); - DC_LOG_DSC("\tinitial_offset %d", pps->initial_offset); - DC_LOG_DSC("\tfinal_offset %d", pps->final_offset); - DC_LOG_DSC("\tflatness_min_qp %d", pps->flatness_min_qp); - DC_LOG_DSC("\tflatness_max_qp %d", pps->flatness_max_qp); - /* DC_LOG_DSC("\trc_parameter_set %d", pps->rc_parameter_set); */ - DC_LOG_DSC("\tnative_420 %d", pps->native_420); - DC_LOG_DSC("\tnative_422 %d", pps->native_422); - DC_LOG_DSC("\tsecond_line_bpg_offset %d", pps->second_line_bpg_offset); - DC_LOG_DSC("\tnsl_bpg_offset %d", pps->nsl_bpg_offset); - DC_LOG_DSC("\tsecond_line_offset_adj %d", pps->second_line_offset_adj); - DC_LOG_DSC("\trc_model_size %d", pps->rc_model_size); - DC_LOG_DSC("\trc_edge_factor %d", pps->rc_edge_factor); - DC_LOG_DSC("\trc_quant_incr_limit0 %d", pps->rc_quant_incr_limit0); - DC_LOG_DSC("\trc_quant_incr_limit1 %d", pps->rc_quant_incr_limit1); - DC_LOG_DSC("\trc_tgt_offset_high %d", pps->rc_tgt_offset_high); - DC_LOG_DSC("\trc_tgt_offset_low %d", pps->rc_tgt_offset_low); - - for (i = 0; i < NUM_BUF_RANGES - 1; i++) - DC_LOG_DSC("\trc_buf_thresh[%d] %d", i, pps->rc_buf_thresh[i]); - - for (i = 0; i < NUM_BUF_RANGES; i++) { - DC_LOG_DSC("\trc_range_parameters[%d].range_min_qp %d", i, pps->rc_range_params[i].range_min_qp); - DC_LOG_DSC("\trc_range_parameters[%d].range_max_qp %d", i, pps->rc_range_params[i].range_max_qp); - DC_LOG_DSC("\trc_range_parameters[%d].range_bpg_offset %d", i, pps->rc_range_params[i].range_bpg_offset); - } -} - -void dsc_override_rc_params(struct rc_params *rc, const struct dc_dsc_rc_params_override *override) -{ - uint8_t i; - - rc->rc_model_size = override->rc_model_size; - for (i = 0; i < DC_DSC_RC_BUF_THRESH_SIZE; i++) - rc->rc_buf_thresh[i] = override->rc_buf_thresh[i]; - for (i = 0; i < DC_DSC_QP_SET_SIZE; i++) { - rc->qp_min[i] = override->rc_minqp[i]; - rc->qp_max[i] = override->rc_maxqp[i]; - rc->ofs[i] = override->rc_offset[i]; - } - - rc->rc_tgt_offset_hi = override->rc_tgt_offset_hi; - rc->rc_tgt_offset_lo = override->rc_tgt_offset_lo; - rc->rc_edge_factor = override->rc_edge_factor; - rc->rc_quant_incr_limit0 = override->rc_quant_incr_limit0; - rc->rc_quant_incr_limit1 = override->rc_quant_incr_limit1; - - rc->initial_fullness_offset = override->initial_fullness_offset; - rc->initial_xmit_delay = override->initial_delay; - - rc->flatness_min_qp = override->flatness_min_qp; - rc->flatness_max_qp = override->flatness_max_qp; - rc->flatness_det_thresh = override->flatness_det_thresh; -} - -bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals, - struct dsc_optc_config *dsc_optc_cfg) -{ - struct dsc_parameters dsc_params; - struct rc_params rc; - - /* Validate input parameters */ - ASSERT(dsc_cfg->dc_dsc_cfg.num_slices_h); - ASSERT(dsc_cfg->dc_dsc_cfg.num_slices_v); - ASSERT(dsc_cfg->dc_dsc_cfg.version_minor == 1 || dsc_cfg->dc_dsc_cfg.version_minor == 2); - ASSERT(dsc_cfg->pic_width); - ASSERT(dsc_cfg->pic_height); - ASSERT((dsc_cfg->dc_dsc_cfg.version_minor == 1 && - (8 <= dsc_cfg->dc_dsc_cfg.linebuf_depth && dsc_cfg->dc_dsc_cfg.linebuf_depth <= 13)) || - (dsc_cfg->dc_dsc_cfg.version_minor == 2 && - ((8 <= dsc_cfg->dc_dsc_cfg.linebuf_depth && dsc_cfg->dc_dsc_cfg.linebuf_depth <= 15) || - dsc_cfg->dc_dsc_cfg.linebuf_depth == 0))); - ASSERT(96 <= dsc_cfg->dc_dsc_cfg.bits_per_pixel && dsc_cfg->dc_dsc_cfg.bits_per_pixel <= 0x3ff); // 6.0 <= bits_per_pixel <= 63.9375 - - if (!dsc_cfg->dc_dsc_cfg.num_slices_v || !dsc_cfg->dc_dsc_cfg.num_slices_h || - !(dsc_cfg->dc_dsc_cfg.version_minor == 1 || dsc_cfg->dc_dsc_cfg.version_minor == 2) || - !dsc_cfg->pic_width || !dsc_cfg->pic_height || - !((dsc_cfg->dc_dsc_cfg.version_minor == 1 && // v1.1 line buffer depth range: - 8 <= dsc_cfg->dc_dsc_cfg.linebuf_depth && dsc_cfg->dc_dsc_cfg.linebuf_depth <= 13) || - (dsc_cfg->dc_dsc_cfg.version_minor == 2 && // v1.2 line buffer depth range: - ((8 <= dsc_cfg->dc_dsc_cfg.linebuf_depth && dsc_cfg->dc_dsc_cfg.linebuf_depth <= 15) || - dsc_cfg->dc_dsc_cfg.linebuf_depth == 0))) || - !(96 <= dsc_cfg->dc_dsc_cfg.bits_per_pixel && dsc_cfg->dc_dsc_cfg.bits_per_pixel <= 0x3ff)) { - dm_output_to_console("%s: Invalid parameters\n", __func__); - return false; - } - - dsc_init_reg_values(dsc_reg_vals); - - /* Copy input config */ - dsc_reg_vals->pixel_format = dsc_dc_pixel_encoding_to_dsc_pixel_format(dsc_cfg->pixel_encoding, dsc_cfg->dc_dsc_cfg.ycbcr422_simple); - dsc_reg_vals->num_slices_h = dsc_cfg->dc_dsc_cfg.num_slices_h; - dsc_reg_vals->num_slices_v = dsc_cfg->dc_dsc_cfg.num_slices_v; - dsc_reg_vals->pps.dsc_version_minor = dsc_cfg->dc_dsc_cfg.version_minor; - dsc_reg_vals->pps.pic_width = dsc_cfg->pic_width; - dsc_reg_vals->pps.pic_height = dsc_cfg->pic_height; - dsc_reg_vals->pps.bits_per_component = dsc_dc_color_depth_to_dsc_bits_per_comp(dsc_cfg->color_depth); - dsc_reg_vals->pps.block_pred_enable = dsc_cfg->dc_dsc_cfg.block_pred_enable; - dsc_reg_vals->pps.line_buf_depth = dsc_cfg->dc_dsc_cfg.linebuf_depth; - dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1; - dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0; - - // TODO: in addition to validating slice height (pic height must be divisible by slice height), - // see what happens when the same condition doesn't apply for slice_width/pic_width. - dsc_reg_vals->pps.slice_width = dsc_cfg->pic_width / dsc_cfg->dc_dsc_cfg.num_slices_h; - dsc_reg_vals->pps.slice_height = dsc_cfg->pic_height / dsc_cfg->dc_dsc_cfg.num_slices_v; - - ASSERT(dsc_reg_vals->pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height); - if (!(dsc_reg_vals->pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height)) { - dm_output_to_console("%s: pix height %d not divisible by num_slices_v %d\n\n", __func__, dsc_cfg->pic_height, dsc_cfg->dc_dsc_cfg.num_slices_v); - return false; - } - - dsc_reg_vals->bpp_x32 = dsc_cfg->dc_dsc_cfg.bits_per_pixel << 1; - if (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420 || dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422) - dsc_reg_vals->pps.bits_per_pixel = dsc_reg_vals->bpp_x32; - else - dsc_reg_vals->pps.bits_per_pixel = dsc_reg_vals->bpp_x32 >> 1; - - dsc_reg_vals->pps.convert_rgb = dsc_reg_vals->pixel_format == DSC_PIXFMT_RGB ? 1 : 0; - dsc_reg_vals->pps.native_422 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422); - dsc_reg_vals->pps.native_420 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420); - dsc_reg_vals->pps.simple_422 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422); - - calc_rc_params(&rc, &dsc_reg_vals->pps); - - if (dsc_cfg->dc_dsc_cfg.rc_params_ovrd) - dsc_override_rc_params(&rc, dsc_cfg->dc_dsc_cfg.rc_params_ovrd); - - if (dscc_compute_dsc_parameters(&dsc_reg_vals->pps, &rc, &dsc_params)) { - dm_output_to_console("%s: DSC config failed\n", __func__); - return false; - } - - dsc_update_from_dsc_parameters(dsc_reg_vals, &dsc_params); - - dsc_optc_cfg->bytes_per_pixel = dsc_params.bytes_per_pixel; - dsc_optc_cfg->slice_width = dsc_reg_vals->pps.slice_width; - dsc_optc_cfg->is_pixel_format_444 = dsc_reg_vals->pixel_format == DSC_PIXFMT_RGB || - dsc_reg_vals->pixel_format == DSC_PIXFMT_YCBCR444 || - dsc_reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422; - - return true; -} - - -enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_pixel_encoding dc_pix_enc, bool is_ycbcr422_simple) -{ - enum dsc_pixel_format dsc_pix_fmt = DSC_PIXFMT_UNKNOWN; - - /* NOTE: We don't support DSC_PIXFMT_SIMPLE_YCBCR422 */ - - switch (dc_pix_enc) { - case PIXEL_ENCODING_RGB: - dsc_pix_fmt = DSC_PIXFMT_RGB; - break; - case PIXEL_ENCODING_YCBCR422: - if (is_ycbcr422_simple) - dsc_pix_fmt = DSC_PIXFMT_SIMPLE_YCBCR422; - else - dsc_pix_fmt = DSC_PIXFMT_NATIVE_YCBCR422; - break; - case PIXEL_ENCODING_YCBCR444: - dsc_pix_fmt = DSC_PIXFMT_YCBCR444; - break; - case PIXEL_ENCODING_YCBCR420: - dsc_pix_fmt = DSC_PIXFMT_NATIVE_YCBCR420; - break; - default: - dsc_pix_fmt = DSC_PIXFMT_UNKNOWN; - break; - } - - ASSERT(dsc_pix_fmt != DSC_PIXFMT_UNKNOWN); - return dsc_pix_fmt; -} - - -enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_color_depth dc_color_depth) -{ - enum dsc_bits_per_comp bpc = DSC_BPC_UNKNOWN; - - switch (dc_color_depth) { - case COLOR_DEPTH_888: - bpc = DSC_BPC_8; - break; - case COLOR_DEPTH_101010: - bpc = DSC_BPC_10; - break; - case COLOR_DEPTH_121212: - bpc = DSC_BPC_12; - break; - default: - bpc = DSC_BPC_UNKNOWN; - break; - } - - return bpc; -} - - -void dsc_init_reg_values(struct dsc_reg_values *reg_vals) -{ - int i; - - memset(reg_vals, 0, sizeof(struct dsc_reg_values)); - - /* Non-PPS values */ - reg_vals->dsc_clock_enable = 1; - reg_vals->dsc_clock_gating_disable = 0; - reg_vals->underflow_recovery_en = 0; - reg_vals->underflow_occurred_int_en = 0; - reg_vals->underflow_occurred_status = 0; - reg_vals->ich_reset_at_eol = 0; - reg_vals->alternate_ich_encoding_en = 0; - reg_vals->rc_buffer_model_size = 0; - /*reg_vals->disable_ich = 0;*/ - reg_vals->dsc_dbg_en = 0; - - for (i = 0; i < 4; i++) - reg_vals->rc_buffer_model_overflow_int_en[i] = 0; - - /* PPS values */ - reg_vals->pps.dsc_version_minor = 2; - reg_vals->pps.dsc_version_major = 1; - reg_vals->pps.line_buf_depth = 9; - reg_vals->pps.bits_per_component = 8; - reg_vals->pps.block_pred_enable = 1; - reg_vals->pps.slice_chunk_size = 0; - reg_vals->pps.pic_width = 0; - reg_vals->pps.pic_height = 0; - reg_vals->pps.slice_width = 0; - reg_vals->pps.slice_height = 0; - reg_vals->pps.initial_xmit_delay = 170; - reg_vals->pps.initial_dec_delay = 0; - reg_vals->pps.initial_scale_value = 0; - reg_vals->pps.scale_increment_interval = 0; - reg_vals->pps.scale_decrement_interval = 0; - reg_vals->pps.nfl_bpg_offset = 0; - reg_vals->pps.slice_bpg_offset = 0; - reg_vals->pps.nsl_bpg_offset = 0; - reg_vals->pps.initial_offset = 6144; - reg_vals->pps.final_offset = 0; - reg_vals->pps.flatness_min_qp = 3; - reg_vals->pps.flatness_max_qp = 12; - reg_vals->pps.rc_model_size = 8192; - reg_vals->pps.rc_edge_factor = 6; - reg_vals->pps.rc_quant_incr_limit0 = 11; - reg_vals->pps.rc_quant_incr_limit1 = 11; - reg_vals->pps.rc_tgt_offset_low = 3; - reg_vals->pps.rc_tgt_offset_high = 3; -} - -/* Updates dsc_reg_values::reg_vals::xxx fields based on the values from computed params. - * This is required because dscc_compute_dsc_parameters returns a modified PPS, which in turn - * affects non-PPS register values. - */ -void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, const struct dsc_parameters *dsc_params) -{ - int i; - - reg_vals->pps = dsc_params->pps; - - // pps_computed will have the "expanded" values; need to shift them to make them fit for regs. - for (i = 0; i < NUM_BUF_RANGES - 1; i++) - reg_vals->pps.rc_buf_thresh[i] = reg_vals->pps.rc_buf_thresh[i] >> 6; - - reg_vals->rc_buffer_model_size = dsc_params->rc_buffer_model_size; -} - -static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals) -{ - uint32_t temp_int; - struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); - - REG_SET(DSC_DEBUG_CONTROL, 0, - DSC_DBG_EN, reg_vals->dsc_dbg_en); - - // dsccif registers - REG_SET_5(DSCCIF_CONFIG0, 0, - INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN, reg_vals->underflow_recovery_en, - INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN, reg_vals->underflow_occurred_int_en, - INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS, reg_vals->underflow_occurred_status, - INPUT_PIXEL_FORMAT, reg_vals->pixel_format, - DSCCIF_CONFIG0__BITS_PER_COMPONENT, reg_vals->pps.bits_per_component); - - REG_SET_2(DSCCIF_CONFIG1, 0, - PIC_WIDTH, reg_vals->pps.pic_width, - PIC_HEIGHT, reg_vals->pps.pic_height); - - // dscc registers - if (dsc20->dsc_mask->ICH_RESET_AT_END_OF_LINE == 0) { - REG_SET_3(DSCC_CONFIG0, 0, - NUMBER_OF_SLICES_PER_LINE, reg_vals->num_slices_h - 1, - ALTERNATE_ICH_ENCODING_EN, reg_vals->alternate_ich_encoding_en, - NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, reg_vals->num_slices_v - 1); - } else { - REG_SET_4(DSCC_CONFIG0, 0, ICH_RESET_AT_END_OF_LINE, - reg_vals->ich_reset_at_eol, NUMBER_OF_SLICES_PER_LINE, - reg_vals->num_slices_h - 1, ALTERNATE_ICH_ENCODING_EN, - reg_vals->alternate_ich_encoding_en, NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, - reg_vals->num_slices_v - 1); - } - - REG_SET(DSCC_CONFIG1, 0, - DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, reg_vals->rc_buffer_model_size); - /*REG_SET_2(DSCC_CONFIG1, 0, - DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, reg_vals->rc_buffer_model_size, - DSCC_DISABLE_ICH, reg_vals->disable_ich);*/ - - REG_SET_4(DSCC_INTERRUPT_CONTROL_STATUS, 0, - DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN, reg_vals->rc_buffer_model_overflow_int_en[0], - DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN, reg_vals->rc_buffer_model_overflow_int_en[1], - DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN, reg_vals->rc_buffer_model_overflow_int_en[2], - DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN, reg_vals->rc_buffer_model_overflow_int_en[3]); - - REG_SET_3(DSCC_PPS_CONFIG0, 0, - DSC_VERSION_MINOR, reg_vals->pps.dsc_version_minor, - LINEBUF_DEPTH, reg_vals->pps.line_buf_depth, - DSCC_PPS_CONFIG0__BITS_PER_COMPONENT, reg_vals->pps.bits_per_component); - - if (reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420 || reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422) - temp_int = reg_vals->bpp_x32; - else - temp_int = reg_vals->bpp_x32 >> 1; - - REG_SET_7(DSCC_PPS_CONFIG1, 0, - BITS_PER_PIXEL, temp_int, - SIMPLE_422, reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422, - CONVERT_RGB, reg_vals->pixel_format == DSC_PIXFMT_RGB, - BLOCK_PRED_ENABLE, reg_vals->pps.block_pred_enable, - NATIVE_422, reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422, - NATIVE_420, reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420, - CHUNK_SIZE, reg_vals->pps.slice_chunk_size); - - REG_SET_2(DSCC_PPS_CONFIG2, 0, - PIC_WIDTH, reg_vals->pps.pic_width, - PIC_HEIGHT, reg_vals->pps.pic_height); - - REG_SET_2(DSCC_PPS_CONFIG3, 0, - SLICE_WIDTH, reg_vals->pps.slice_width, - SLICE_HEIGHT, reg_vals->pps.slice_height); - - REG_SET(DSCC_PPS_CONFIG4, 0, - INITIAL_XMIT_DELAY, reg_vals->pps.initial_xmit_delay); - - REG_SET_2(DSCC_PPS_CONFIG5, 0, - INITIAL_SCALE_VALUE, reg_vals->pps.initial_scale_value, - SCALE_INCREMENT_INTERVAL, reg_vals->pps.scale_increment_interval); - - REG_SET_3(DSCC_PPS_CONFIG6, 0, - SCALE_DECREMENT_INTERVAL, reg_vals->pps.scale_decrement_interval, - FIRST_LINE_BPG_OFFSET, reg_vals->pps.first_line_bpg_offset, - SECOND_LINE_BPG_OFFSET, reg_vals->pps.second_line_bpg_offset); - - REG_SET_2(DSCC_PPS_CONFIG7, 0, - NFL_BPG_OFFSET, reg_vals->pps.nfl_bpg_offset, - SLICE_BPG_OFFSET, reg_vals->pps.slice_bpg_offset); - - REG_SET_2(DSCC_PPS_CONFIG8, 0, - NSL_BPG_OFFSET, reg_vals->pps.nsl_bpg_offset, - SECOND_LINE_OFFSET_ADJ, reg_vals->pps.second_line_offset_adj); - - REG_SET_2(DSCC_PPS_CONFIG9, 0, - INITIAL_OFFSET, reg_vals->pps.initial_offset, - FINAL_OFFSET, reg_vals->pps.final_offset); - - REG_SET_3(DSCC_PPS_CONFIG10, 0, - FLATNESS_MIN_QP, reg_vals->pps.flatness_min_qp, - FLATNESS_MAX_QP, reg_vals->pps.flatness_max_qp, - RC_MODEL_SIZE, reg_vals->pps.rc_model_size); - - REG_SET_5(DSCC_PPS_CONFIG11, 0, - RC_EDGE_FACTOR, reg_vals->pps.rc_edge_factor, - RC_QUANT_INCR_LIMIT0, reg_vals->pps.rc_quant_incr_limit0, - RC_QUANT_INCR_LIMIT1, reg_vals->pps.rc_quant_incr_limit1, - RC_TGT_OFFSET_LO, reg_vals->pps.rc_tgt_offset_low, - RC_TGT_OFFSET_HI, reg_vals->pps.rc_tgt_offset_high); - - REG_SET_4(DSCC_PPS_CONFIG12, 0, - RC_BUF_THRESH0, reg_vals->pps.rc_buf_thresh[0], - RC_BUF_THRESH1, reg_vals->pps.rc_buf_thresh[1], - RC_BUF_THRESH2, reg_vals->pps.rc_buf_thresh[2], - RC_BUF_THRESH3, reg_vals->pps.rc_buf_thresh[3]); - - REG_SET_4(DSCC_PPS_CONFIG13, 0, - RC_BUF_THRESH4, reg_vals->pps.rc_buf_thresh[4], - RC_BUF_THRESH5, reg_vals->pps.rc_buf_thresh[5], - RC_BUF_THRESH6, reg_vals->pps.rc_buf_thresh[6], - RC_BUF_THRESH7, reg_vals->pps.rc_buf_thresh[7]); - - REG_SET_4(DSCC_PPS_CONFIG14, 0, - RC_BUF_THRESH8, reg_vals->pps.rc_buf_thresh[8], - RC_BUF_THRESH9, reg_vals->pps.rc_buf_thresh[9], - RC_BUF_THRESH10, reg_vals->pps.rc_buf_thresh[10], - RC_BUF_THRESH11, reg_vals->pps.rc_buf_thresh[11]); - - REG_SET_5(DSCC_PPS_CONFIG15, 0, - RC_BUF_THRESH12, reg_vals->pps.rc_buf_thresh[12], - RC_BUF_THRESH13, reg_vals->pps.rc_buf_thresh[13], - RANGE_MIN_QP0, reg_vals->pps.rc_range_params[0].range_min_qp, - RANGE_MAX_QP0, reg_vals->pps.rc_range_params[0].range_max_qp, - RANGE_BPG_OFFSET0, reg_vals->pps.rc_range_params[0].range_bpg_offset); - - REG_SET_6(DSCC_PPS_CONFIG16, 0, - RANGE_MIN_QP1, reg_vals->pps.rc_range_params[1].range_min_qp, - RANGE_MAX_QP1, reg_vals->pps.rc_range_params[1].range_max_qp, - RANGE_BPG_OFFSET1, reg_vals->pps.rc_range_params[1].range_bpg_offset, - RANGE_MIN_QP2, reg_vals->pps.rc_range_params[2].range_min_qp, - RANGE_MAX_QP2, reg_vals->pps.rc_range_params[2].range_max_qp, - RANGE_BPG_OFFSET2, reg_vals->pps.rc_range_params[2].range_bpg_offset); - - REG_SET_6(DSCC_PPS_CONFIG17, 0, - RANGE_MIN_QP3, reg_vals->pps.rc_range_params[3].range_min_qp, - RANGE_MAX_QP3, reg_vals->pps.rc_range_params[3].range_max_qp, - RANGE_BPG_OFFSET3, reg_vals->pps.rc_range_params[3].range_bpg_offset, - RANGE_MIN_QP4, reg_vals->pps.rc_range_params[4].range_min_qp, - RANGE_MAX_QP4, reg_vals->pps.rc_range_params[4].range_max_qp, - RANGE_BPG_OFFSET4, reg_vals->pps.rc_range_params[4].range_bpg_offset); - - REG_SET_6(DSCC_PPS_CONFIG18, 0, - RANGE_MIN_QP5, reg_vals->pps.rc_range_params[5].range_min_qp, - RANGE_MAX_QP5, reg_vals->pps.rc_range_params[5].range_max_qp, - RANGE_BPG_OFFSET5, reg_vals->pps.rc_range_params[5].range_bpg_offset, - RANGE_MIN_QP6, reg_vals->pps.rc_range_params[6].range_min_qp, - RANGE_MAX_QP6, reg_vals->pps.rc_range_params[6].range_max_qp, - RANGE_BPG_OFFSET6, reg_vals->pps.rc_range_params[6].range_bpg_offset); - - REG_SET_6(DSCC_PPS_CONFIG19, 0, - RANGE_MIN_QP7, reg_vals->pps.rc_range_params[7].range_min_qp, - RANGE_MAX_QP7, reg_vals->pps.rc_range_params[7].range_max_qp, - RANGE_BPG_OFFSET7, reg_vals->pps.rc_range_params[7].range_bpg_offset, - RANGE_MIN_QP8, reg_vals->pps.rc_range_params[8].range_min_qp, - RANGE_MAX_QP8, reg_vals->pps.rc_range_params[8].range_max_qp, - RANGE_BPG_OFFSET8, reg_vals->pps.rc_range_params[8].range_bpg_offset); - - REG_SET_6(DSCC_PPS_CONFIG20, 0, - RANGE_MIN_QP9, reg_vals->pps.rc_range_params[9].range_min_qp, - RANGE_MAX_QP9, reg_vals->pps.rc_range_params[9].range_max_qp, - RANGE_BPG_OFFSET9, reg_vals->pps.rc_range_params[9].range_bpg_offset, - RANGE_MIN_QP10, reg_vals->pps.rc_range_params[10].range_min_qp, - RANGE_MAX_QP10, reg_vals->pps.rc_range_params[10].range_max_qp, - RANGE_BPG_OFFSET10, reg_vals->pps.rc_range_params[10].range_bpg_offset); - - REG_SET_6(DSCC_PPS_CONFIG21, 0, - RANGE_MIN_QP11, reg_vals->pps.rc_range_params[11].range_min_qp, - RANGE_MAX_QP11, reg_vals->pps.rc_range_params[11].range_max_qp, - RANGE_BPG_OFFSET11, reg_vals->pps.rc_range_params[11].range_bpg_offset, - RANGE_MIN_QP12, reg_vals->pps.rc_range_params[12].range_min_qp, - RANGE_MAX_QP12, reg_vals->pps.rc_range_params[12].range_max_qp, - RANGE_BPG_OFFSET12, reg_vals->pps.rc_range_params[12].range_bpg_offset); - - REG_SET_6(DSCC_PPS_CONFIG22, 0, - RANGE_MIN_QP13, reg_vals->pps.rc_range_params[13].range_min_qp, - RANGE_MAX_QP13, reg_vals->pps.rc_range_params[13].range_max_qp, - RANGE_BPG_OFFSET13, reg_vals->pps.rc_range_params[13].range_bpg_offset, - RANGE_MIN_QP14, reg_vals->pps.rc_range_params[14].range_min_qp, - RANGE_MAX_QP14, reg_vals->pps.rc_range_params[14].range_max_qp, - RANGE_BPG_OFFSET14, reg_vals->pps.rc_range_params[14].range_bpg_offset); - -} - diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h deleted file mode 100644 index ba869387c3..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h +++ /dev/null @@ -1,589 +0,0 @@ -/* Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ -#ifndef __DCN20_DSC_H__ -#define __DCN20_DSC_H__ - -#include "dsc.h" -#include "dsc/dscc_types.h" -#include - -#define TO_DCN20_DSC(dsc)\ - container_of(dsc, struct dcn20_dsc, base) - -#define DSC_REG_LIST_DCN20(id) \ - SRI(DSC_TOP_CONTROL, DSC_TOP, id),\ - SRI(DSC_DEBUG_CONTROL, DSC_TOP, id),\ - SRI(DSCC_CONFIG0, DSCC, id),\ - SRI(DSCC_CONFIG1, DSCC, id),\ - SRI(DSCC_STATUS, DSCC, id),\ - SRI(DSCC_INTERRUPT_CONTROL_STATUS, DSCC, id),\ - SRI(DSCC_PPS_CONFIG0, DSCC, id),\ - SRI(DSCC_PPS_CONFIG1, DSCC, id),\ - SRI(DSCC_PPS_CONFIG2, DSCC, id),\ - SRI(DSCC_PPS_CONFIG3, DSCC, id),\ - SRI(DSCC_PPS_CONFIG4, DSCC, id),\ - SRI(DSCC_PPS_CONFIG5, DSCC, id),\ - SRI(DSCC_PPS_CONFIG6, DSCC, id),\ - SRI(DSCC_PPS_CONFIG7, DSCC, id),\ - SRI(DSCC_PPS_CONFIG8, DSCC, id),\ - SRI(DSCC_PPS_CONFIG9, DSCC, id),\ - SRI(DSCC_PPS_CONFIG10, DSCC, id),\ - SRI(DSCC_PPS_CONFIG11, DSCC, id),\ - SRI(DSCC_PPS_CONFIG12, DSCC, id),\ - SRI(DSCC_PPS_CONFIG13, DSCC, id),\ - SRI(DSCC_PPS_CONFIG14, DSCC, id),\ - SRI(DSCC_PPS_CONFIG15, DSCC, id),\ - SRI(DSCC_PPS_CONFIG16, DSCC, id),\ - SRI(DSCC_PPS_CONFIG17, DSCC, id),\ - SRI(DSCC_PPS_CONFIG18, DSCC, id),\ - SRI(DSCC_PPS_CONFIG19, DSCC, id),\ - SRI(DSCC_PPS_CONFIG20, DSCC, id),\ - SRI(DSCC_PPS_CONFIG21, DSCC, id),\ - SRI(DSCC_PPS_CONFIG22, DSCC, id),\ - SRI(DSCC_MEM_POWER_CONTROL, DSCC, id),\ - SRI(DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC, id),\ - SRI(DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC, id),\ - SRI(DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC, id),\ - SRI(DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC, id),\ - SRI(DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC, id),\ - SRI(DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC, id),\ - SRI(DSCC_MAX_ABS_ERROR0, DSCC, id),\ - SRI(DSCC_MAX_ABS_ERROR1, DSCC, id),\ - SRI(DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id),\ - SRI(DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\ - SRI(DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\ - SRI(DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\ - SRI(DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id),\ - SRI(DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\ - SRI(DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\ - SRI(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\ - SRI(DSCCIF_CONFIG0, DSCCIF, id),\ - SRI(DSCCIF_CONFIG1, DSCCIF, id),\ - SRI(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id) - - -#define DSC_SF(reg_name, field_name, post_fix)\ - .field_name = reg_name ## __ ## field_name ## post_fix - -//Used in resolving the corner case with duplicate field name -#define DSC2_SF(reg_name, field_name, post_fix)\ - .field_name = reg_name ## _ ## field_name ## post_fix - -#define DSC_REG_LIST_SH_MASK_DCN20(mask_sh)\ - DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_CLOCK_EN, mask_sh), \ - DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DISPCLK_R_GATE_DIS, mask_sh), \ - DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DSCCLK_R_GATE_DIS, mask_sh), \ - DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_DBG_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_CONFIG0, ICH_RESET_AT_END_OF_LINE, mask_sh), \ - DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_PER_LINE, mask_sh), \ - DSC_SF(DSCC0_DSCC_CONFIG0, ALTERNATE_ICH_ENCODING_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, mask_sh), \ - DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, mask_sh), \ - /*DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_DISABLE_ICH, mask_sh),*/ \ - DSC_SF(DSCC0_DSCC_STATUS, DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MINOR, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MAJOR, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG0, PPS_IDENTIFIER, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG0, LINEBUF_DEPTH, mask_sh), \ - DSC2_SF(DSCC0, DSCC_PPS_CONFIG0__BITS_PER_COMPONENT, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BITS_PER_PIXEL, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG1, VBR_ENABLE, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG1, SIMPLE_422, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CONVERT_RGB, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BLOCK_PRED_ENABLE, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_422, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_420, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CHUNK_SIZE, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_WIDTH, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_HEIGHT, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_WIDTH, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_HEIGHT, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_XMIT_DELAY, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_DEC_DELAY, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG5, INITIAL_SCALE_VALUE, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG5, SCALE_INCREMENT_INTERVAL, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SCALE_DECREMENT_INTERVAL, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG6, FIRST_LINE_BPG_OFFSET, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SECOND_LINE_BPG_OFFSET, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG7, NFL_BPG_OFFSET, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG8, NSL_BPG_OFFSET, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG8, SECOND_LINE_OFFSET_ADJ, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG9, INITIAL_OFFSET, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG9, FINAL_OFFSET, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MIN_QP, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MAX_QP, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG10, RC_MODEL_SIZE, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_EDGE_FACTOR, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT0, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT1, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_LO, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_HI, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH0, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH1, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH2, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH3, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH4, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH5, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH6, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH7, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH8, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH9, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH10, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH11, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH12, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH13, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MIN_QP0, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MAX_QP0, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_BPG_OFFSET0, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP1, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP1, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET1, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP2, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP2, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET2, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP3, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP3, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET3, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP4, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP4, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET4, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP5, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP5, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET5, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP6, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP6, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET6, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP7, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP7, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET7, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP8, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP8, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET8, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP9, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP9, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET9, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP10, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP10, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET10, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP11, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP11, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET11, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP12, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP12, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET12, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP13, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP13, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET13, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP14, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP14, mask_sh), \ - DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET14, mask_sh), \ - DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_DEFAULT_MEM_LOW_POWER_STATE, mask_sh), \ - DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_FORCE, mask_sh), \ - DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_DIS, mask_sh), \ - DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_STATE, mask_sh), \ - DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_FORCE, mask_sh), \ - DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_DIS, mask_sh), \ - DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_STATE, mask_sh), \ - DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC_R_Y_SQUARED_ERROR_LOWER, mask_sh), \ - DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC_R_Y_SQUARED_ERROR_UPPER, mask_sh), \ - DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC_G_CB_SQUARED_ERROR_LOWER, mask_sh), \ - DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC_G_CB_SQUARED_ERROR_UPPER, mask_sh), \ - DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC_B_CR_SQUARED_ERROR_LOWER, mask_sh), \ - DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC_B_CR_SQUARED_ERROR_UPPER, mask_sh), \ - DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_R_Y_MAX_ABS_ERROR, mask_sh), \ - DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_G_CB_MAX_ABS_ERROR, mask_sh), \ - DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR1, DSCC_B_CR_MAX_ABS_ERROR, mask_sh), \ - DSC_SF(DSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, mask_sh), \ - DSC_SF(DSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \ - DSC_SF(DSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \ - DSC_SF(DSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \ - DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, mask_sh), \ - DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \ - DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \ - DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \ - DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN, mask_sh), \ - DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \ - DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS, mask_sh), \ - DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_PIXEL_FORMAT, mask_sh), \ - DSC2_SF(DSCCIF0, DSCCIF_CONFIG0__BITS_PER_COMPONENT, mask_sh), \ - DSC_SF(DSCCIF0_DSCCIF_CONFIG0, DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \ - DSC_SF(DSCCIF0_DSCCIF_CONFIG1, PIC_WIDTH, mask_sh), \ - DSC_SF(DSCCIF0_DSCCIF_CONFIG1, PIC_HEIGHT, mask_sh), \ - DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, mask_sh), \ - DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_OPP_PIPE_SOURCE, mask_sh) - - - -#define DSC_FIELD_LIST_DCN20(type)\ - type DSC_CLOCK_EN; \ - type DSC_DISPCLK_R_GATE_DIS; \ - type DSC_DSCCLK_R_GATE_DIS; \ - type DSC_DBG_EN; \ - type DSC_TEST_CLOCK_MUX_SEL; \ - type ICH_RESET_AT_END_OF_LINE; \ - type NUMBER_OF_SLICES_PER_LINE; \ - type ALTERNATE_ICH_ENCODING_EN; \ - type NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION; \ - type DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE; \ - /*type DSCC_DISABLE_ICH;*/ \ - type DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING; \ - type DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED; \ - type DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED; \ - type DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED; \ - type DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED; \ - type DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED; \ - type DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED; \ - type DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED; \ - type DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED; \ - type DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED; \ - type DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED; \ - type DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED; \ - type DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED; \ - type DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN; \ - type DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN; \ - type DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN; \ - type DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN; \ - type DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN; \ - type DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN; \ - type DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN; \ - type DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN; \ - type DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN; \ - type DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN; \ - type DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN; \ - type DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN; \ - type DSC_VERSION_MINOR; \ - type DSC_VERSION_MAJOR; \ - type PPS_IDENTIFIER; \ - type LINEBUF_DEPTH; \ - type DSCC_PPS_CONFIG0__BITS_PER_COMPONENT; \ - type BITS_PER_PIXEL; \ - type VBR_ENABLE; \ - type SIMPLE_422; \ - type CONVERT_RGB; \ - type BLOCK_PRED_ENABLE; \ - type NATIVE_422; \ - type NATIVE_420; \ - type CHUNK_SIZE; \ - type PIC_WIDTH; \ - type PIC_HEIGHT; \ - type SLICE_WIDTH; \ - type SLICE_HEIGHT; \ - type INITIAL_XMIT_DELAY; \ - type INITIAL_DEC_DELAY; \ - type INITIAL_SCALE_VALUE; \ - type SCALE_INCREMENT_INTERVAL; \ - type SCALE_DECREMENT_INTERVAL; \ - type FIRST_LINE_BPG_OFFSET; \ - type SECOND_LINE_BPG_OFFSET; \ - type NFL_BPG_OFFSET; \ - type SLICE_BPG_OFFSET; \ - type NSL_BPG_OFFSET; \ - type SECOND_LINE_OFFSET_ADJ; \ - type INITIAL_OFFSET; \ - type FINAL_OFFSET; \ - type FLATNESS_MIN_QP; \ - type FLATNESS_MAX_QP; \ - type RC_MODEL_SIZE; \ - type RC_EDGE_FACTOR; \ - type RC_QUANT_INCR_LIMIT0; \ - type RC_QUANT_INCR_LIMIT1; \ - type RC_TGT_OFFSET_LO; \ - type RC_TGT_OFFSET_HI; \ - type RC_BUF_THRESH0; \ - type RC_BUF_THRESH1; \ - type RC_BUF_THRESH2; \ - type RC_BUF_THRESH3; \ - type RC_BUF_THRESH4; \ - type RC_BUF_THRESH5; \ - type RC_BUF_THRESH6; \ - type RC_BUF_THRESH7; \ - type RC_BUF_THRESH8; \ - type RC_BUF_THRESH9; \ - type RC_BUF_THRESH10; \ - type RC_BUF_THRESH11; \ - type RC_BUF_THRESH12; \ - type RC_BUF_THRESH13; \ - type RANGE_MIN_QP0; \ - type RANGE_MAX_QP0; \ - type RANGE_BPG_OFFSET0; \ - type RANGE_MIN_QP1; \ - type RANGE_MAX_QP1; \ - type RANGE_BPG_OFFSET1; \ - type RANGE_MIN_QP2; \ - type RANGE_MAX_QP2; \ - type RANGE_BPG_OFFSET2; \ - type RANGE_MIN_QP3; \ - type RANGE_MAX_QP3; \ - type RANGE_BPG_OFFSET3; \ - type RANGE_MIN_QP4; \ - type RANGE_MAX_QP4; \ - type RANGE_BPG_OFFSET4; \ - type RANGE_MIN_QP5; \ - type RANGE_MAX_QP5; \ - type RANGE_BPG_OFFSET5; \ - type RANGE_MIN_QP6; \ - type RANGE_MAX_QP6; \ - type RANGE_BPG_OFFSET6; \ - type RANGE_MIN_QP7; \ - type RANGE_MAX_QP7; \ - type RANGE_BPG_OFFSET7; \ - type RANGE_MIN_QP8; \ - type RANGE_MAX_QP8; \ - type RANGE_BPG_OFFSET8; \ - type RANGE_MIN_QP9; \ - type RANGE_MAX_QP9; \ - type RANGE_BPG_OFFSET9; \ - type RANGE_MIN_QP10; \ - type RANGE_MAX_QP10; \ - type RANGE_BPG_OFFSET10; \ - type RANGE_MIN_QP11; \ - type RANGE_MAX_QP11; \ - type RANGE_BPG_OFFSET11; \ - type RANGE_MIN_QP12; \ - type RANGE_MAX_QP12; \ - type RANGE_BPG_OFFSET12; \ - type RANGE_MIN_QP13; \ - type RANGE_MAX_QP13; \ - type RANGE_BPG_OFFSET13; \ - type RANGE_MIN_QP14; \ - type RANGE_MAX_QP14; \ - type RANGE_BPG_OFFSET14; \ - type DSCC_DEFAULT_MEM_LOW_POWER_STATE; \ - type DSCC_MEM_PWR_FORCE; \ - type DSCC_MEM_PWR_DIS; \ - type DSCC_MEM_PWR_STATE; \ - type DSCC_NATIVE_422_MEM_PWR_FORCE; \ - type DSCC_NATIVE_422_MEM_PWR_DIS; \ - type DSCC_NATIVE_422_MEM_PWR_STATE; \ - type DSCC_R_Y_SQUARED_ERROR_LOWER; \ - type DSCC_R_Y_SQUARED_ERROR_UPPER; \ - type DSCC_G_CB_SQUARED_ERROR_LOWER; \ - type DSCC_G_CB_SQUARED_ERROR_UPPER; \ - type DSCC_B_CR_SQUARED_ERROR_LOWER; \ - type DSCC_B_CR_SQUARED_ERROR_UPPER; \ - type DSCC_R_Y_MAX_ABS_ERROR; \ - type DSCC_G_CB_MAX_ABS_ERROR; \ - type DSCC_B_CR_MAX_ABS_ERROR; \ - type DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL; \ - type DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL; \ - type DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL; \ - type DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL; \ - type DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL; \ - type DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL; \ - type DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL; \ - type DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL; \ - type DSCC_UPDATE_PENDING_STATUS; \ - type DSCC_UPDATE_TAKEN_STATUS; \ - type DSCC_UPDATE_TAKEN_ACK; \ - type DSCC_RATE_BUFFER0_FULLNESS_LEVEL; \ - type DSCC_RATE_BUFFER1_FULLNESS_LEVEL; \ - type DSCC_RATE_BUFFER2_FULLNESS_LEVEL; \ - type DSCC_RATE_BUFFER3_FULLNESS_LEVEL; \ - type DSCC_RATE_CONTROL_BUFFER0_FULLNESS_LEVEL; \ - type DSCC_RATE_CONTROL_BUFFER1_FULLNESS_LEVEL; \ - type DSCC_RATE_CONTROL_BUFFER2_FULLNESS_LEVEL; \ - type DSCC_RATE_CONTROL_BUFFER3_FULLNESS_LEVEL; \ - type DSCC_RATE_BUFFER0_INITIAL_XMIT_DELAY_REACHED; \ - type DSCC_RATE_BUFFER1_INITIAL_XMIT_DELAY_REACHED; \ - type DSCC_RATE_BUFFER2_INITIAL_XMIT_DELAY_REACHED; \ - type DSCC_RATE_BUFFER3_INITIAL_XMIT_DELAY_REACHED; \ - type INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN; \ - type INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN; \ - type INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS; \ - type INPUT_PIXEL_FORMAT; \ - type DSCCIF_CONFIG0__BITS_PER_COMPONENT; \ - type DOUBLE_BUFFER_REG_UPDATE_PENDING; \ - type DSCCIF_UPDATE_PENDING_STATUS; \ - type DSCCIF_UPDATE_TAKEN_STATUS; \ - type DSCCIF_UPDATE_TAKEN_ACK; \ - type DSCRM_DSC_FORWARD_EN; \ - type DSCRM_DSC_OPP_PIPE_SOURCE - -struct dcn20_dsc_registers { - uint32_t DSC_TOP_CONTROL; - uint32_t DSC_DEBUG_CONTROL; - uint32_t DSCC_CONFIG0; - uint32_t DSCC_CONFIG1; - uint32_t DSCC_STATUS; - uint32_t DSCC_INTERRUPT_CONTROL_STATUS; - uint32_t DSCC_PPS_CONFIG0; - uint32_t DSCC_PPS_CONFIG1; - uint32_t DSCC_PPS_CONFIG2; - uint32_t DSCC_PPS_CONFIG3; - uint32_t DSCC_PPS_CONFIG4; - uint32_t DSCC_PPS_CONFIG5; - uint32_t DSCC_PPS_CONFIG6; - uint32_t DSCC_PPS_CONFIG7; - uint32_t DSCC_PPS_CONFIG8; - uint32_t DSCC_PPS_CONFIG9; - uint32_t DSCC_PPS_CONFIG10; - uint32_t DSCC_PPS_CONFIG11; - uint32_t DSCC_PPS_CONFIG12; - uint32_t DSCC_PPS_CONFIG13; - uint32_t DSCC_PPS_CONFIG14; - uint32_t DSCC_PPS_CONFIG15; - uint32_t DSCC_PPS_CONFIG16; - uint32_t DSCC_PPS_CONFIG17; - uint32_t DSCC_PPS_CONFIG18; - uint32_t DSCC_PPS_CONFIG19; - uint32_t DSCC_PPS_CONFIG20; - uint32_t DSCC_PPS_CONFIG21; - uint32_t DSCC_PPS_CONFIG22; - uint32_t DSCC_MEM_POWER_CONTROL; - uint32_t DSCC_R_Y_SQUARED_ERROR_LOWER; - uint32_t DSCC_R_Y_SQUARED_ERROR_UPPER; - uint32_t DSCC_G_CB_SQUARED_ERROR_LOWER; - uint32_t DSCC_G_CB_SQUARED_ERROR_UPPER; - uint32_t DSCC_B_CR_SQUARED_ERROR_LOWER; - uint32_t DSCC_B_CR_SQUARED_ERROR_UPPER; - uint32_t DSCC_MAX_ABS_ERROR0; - uint32_t DSCC_MAX_ABS_ERROR1; - uint32_t DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL; - uint32_t DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL; - uint32_t DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL; - uint32_t DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL; - uint32_t DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL; - uint32_t DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL; - uint32_t DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL; - uint32_t DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL; - uint32_t DSCCIF_CONFIG0; - uint32_t DSCCIF_CONFIG1; - uint32_t DSCRM_DSC_FORWARD_CONFIG; -}; - - -struct dcn20_dsc_shift { - DSC_FIELD_LIST_DCN20(uint8_t); -}; - -struct dcn20_dsc_mask { - DSC_FIELD_LIST_DCN20(uint32_t); -}; - -/* DSCCIF_CONFIG.INPUT_PIXEL_FORMAT values */ -enum dsc_pixel_format { - DSC_PIXFMT_RGB, - DSC_PIXFMT_YCBCR444, - DSC_PIXFMT_SIMPLE_YCBCR422, - DSC_PIXFMT_NATIVE_YCBCR422, - DSC_PIXFMT_NATIVE_YCBCR420, - DSC_PIXFMT_UNKNOWN -}; - -struct dsc_reg_values { - /* PPS registers */ - struct drm_dsc_config pps; - - /* Additional registers */ - uint32_t dsc_clock_enable; - uint32_t dsc_clock_gating_disable; - uint32_t underflow_recovery_en; - uint32_t underflow_occurred_int_en; - uint32_t underflow_occurred_status; - enum dsc_pixel_format pixel_format; - uint32_t ich_reset_at_eol; - uint32_t alternate_ich_encoding_en; - uint32_t num_slices_h; - uint32_t num_slices_v; - uint32_t rc_buffer_model_size; - uint32_t disable_ich; - uint32_t bpp_x32; - uint32_t dsc_dbg_en; - uint32_t rc_buffer_model_overflow_int_en[4]; -}; - -struct dcn20_dsc { - struct display_stream_compressor base; - const struct dcn20_dsc_registers *dsc_regs; - const struct dcn20_dsc_shift *dsc_shift; - const struct dcn20_dsc_mask *dsc_mask; - - struct dsc_reg_values reg_vals; - - int max_image_width; -}; - -void dsc_config_log(struct display_stream_compressor *dsc, - const struct dsc_config *config); - -void dsc_log_pps(struct display_stream_compressor *dsc, - struct drm_dsc_config *pps); - -void dsc_override_rc_params(struct rc_params *rc, - const struct dc_dsc_rc_params_override *override); - -bool dsc_prepare_config(const struct dsc_config *dsc_cfg, - struct dsc_reg_values *dsc_reg_vals, - struct dsc_optc_config *dsc_optc_cfg); - -enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_pixel_encoding dc_pix_enc, - bool is_ycbcr422_simple); - -enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_color_depth dc_color_depth); - -void dsc_init_reg_values(struct dsc_reg_values *reg_vals); - -void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, const struct dsc_parameters *dsc_params); - -void dsc2_construct(struct dcn20_dsc *dsc, - struct dc_context *ctx, - int inst, - const struct dcn20_dsc_registers *dsc_regs, - const struct dcn20_dsc_shift *dsc_shift, - const struct dcn20_dsc_mask *dsc_mask); - -void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, - int pixel_clock_100Hz); - -bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, - const struct dsc_config *dsc_cfg, - uint8_t *dsc_packed_pps); - -#endif - diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c deleted file mode 100644 index 884e3e3233..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dce110/dce110_hwseq.h" -#include "dcn10/dcn10_hwseq.h" -#include "dcn20/dcn20_hwseq.h" - -#include "dcn20_init.h" - -static const struct hw_sequencer_funcs dcn20_funcs = { - .program_gamut_remap = dcn10_program_gamut_remap, - .init_hw = dcn10_init_hw, - .power_down_on_boot = dcn10_power_down_on_boot, - .apply_ctx_to_hw = dce110_apply_ctx_to_hw, - .apply_ctx_for_surface = NULL, - .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, - .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, - .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, - .update_plane_addr = dcn20_update_plane_addr, - .update_dchub = dcn10_update_dchub, - .update_pending_status = dcn10_update_pending_status, - .program_output_csc = dcn20_program_output_csc, - .enable_accelerated_mode = dce110_enable_accelerated_mode, - .enable_timing_synchronization = dcn10_enable_timing_synchronization, - .enable_vblanks_synchronization = dcn10_enable_vblanks_synchronization, - .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, - .update_info_frame = dce110_update_info_frame, - .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, - .enable_stream = dcn20_enable_stream, - .disable_stream = dce110_disable_stream, - .unblank_stream = dcn20_unblank_stream, - .blank_stream = dce110_blank_stream, - .enable_audio_stream = dce110_enable_audio_stream, - .disable_audio_stream = dce110_disable_audio_stream, - .disable_plane = dcn20_disable_plane, - .pipe_control_lock = dcn20_pipe_control_lock, - .interdependent_update_lock = dcn10_lock_all_pipes, - .cursor_lock = dcn10_cursor_lock, - .prepare_bandwidth = dcn20_prepare_bandwidth, - .optimize_bandwidth = dcn20_optimize_bandwidth, - .update_bandwidth = dcn20_update_bandwidth, - .set_drr = dcn10_set_drr, - .get_position = dcn10_get_position, - .set_static_screen_control = dcn10_set_static_screen_control, - .setup_stereo = dcn10_setup_stereo, - .set_avmute = dce110_set_avmute, - .log_hw_state = dcn10_log_hw_state, - .get_hw_state = dcn10_get_hw_state, - .clear_status_bits = dcn10_clear_status_bits, - .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, - .edp_backlight_control = dce110_edp_backlight_control, - .edp_power_control = dce110_edp_power_control, - .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, - .set_cursor_position = dcn10_set_cursor_position, - .set_cursor_attribute = dcn10_set_cursor_attribute, - .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, - .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, - .set_clock = dcn10_set_clock, - .get_clock = dcn10_get_clock, - .program_triplebuffer = dcn20_program_triple_buffer, - .enable_writeback = dcn20_enable_writeback, - .disable_writeback = dcn20_disable_writeback, - .dmdata_status_done = dcn20_dmdata_status_done, - .program_dmdata_engine = dcn20_program_dmdata_engine, - .set_dmdata_attributes = dcn20_set_dmdata_attributes, - .init_sys_ctx = dcn20_init_sys_ctx, - .init_vm_ctx = dcn20_init_vm_ctx, - .set_flip_control_gsl = dcn20_set_flip_control_gsl, - .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, - .calc_vupdate_position = dcn10_calc_vupdate_position, - .set_backlight_level = dce110_set_backlight_level, - .set_abm_immediate_disable = dce110_set_abm_immediate_disable, - .set_pipe = dce110_set_pipe, - .enable_lvds_link_output = dce110_enable_lvds_link_output, - .enable_tmds_link_output = dce110_enable_tmds_link_output, - .enable_dp_link_output = dce110_enable_dp_link_output, - .disable_link_output = dce110_disable_link_output, - .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, - .get_dcc_en_bits = dcn10_get_dcc_en_bits, - .update_visual_confirm_color = dcn10_update_visual_confirm_color, -}; - -static const struct hwseq_private_funcs dcn20_private_funcs = { - .init_pipes = dcn10_init_pipes, - .update_plane_addr = dcn20_update_plane_addr, - .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, - .update_mpcc = dcn20_update_mpcc, - .set_input_transfer_func = dcn20_set_input_transfer_func, - .set_output_transfer_func = dcn20_set_output_transfer_func, - .power_down = dce110_power_down, - .enable_display_power_gating = dcn10_dummy_display_power_gating, - .blank_pixel_data = dcn20_blank_pixel_data, - .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, - .enable_stream_timing = dcn20_enable_stream_timing, - .edp_backlight_control = dce110_edp_backlight_control, - .disable_stream_gating = dcn20_disable_stream_gating, - .enable_stream_gating = dcn20_enable_stream_gating, - .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, - .did_underflow_occur = dcn10_did_underflow_occur, - .init_blank = dcn20_init_blank, - .disable_vga = dcn20_disable_vga, - .bios_golden_init = dcn10_bios_golden_init, - .plane_atomic_disable = dcn20_plane_atomic_disable, - .plane_atomic_power_down = dcn10_plane_atomic_power_down, - .enable_power_gating_plane = dcn20_enable_power_gating_plane, - .dpp_pg_control = dcn20_dpp_pg_control, - .hubp_pg_control = dcn20_hubp_pg_control, - .update_odm = dcn20_update_odm, - .dsc_pg_control = dcn20_dsc_pg_control, - .set_hdr_multiplier = dcn10_set_hdr_multiplier, - .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, - .wait_for_blank_complete = dcn20_wait_for_blank_complete, - .dccg_init = dcn20_dccg_init, - .set_blend_lut = dcn20_set_blend_lut, - .set_shaper_3dlut = dcn20_set_shaper_3dlut, -}; - -void dcn20_hw_sequencer_construct(struct dc *dc) -{ - dc->hwss = dcn20_funcs; - dc->hwseq->funcs = dcn20_private_funcs; - -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h deleted file mode 100644 index 12277797cd..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_DCN20_INIT_H__ -#define __DC_DCN20_INIT_H__ - -struct dc; - -void dcn20_hw_sequencer_construct(struct dc *dc); - -#endif /* __DC_DCN20_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c index 0784d01986..fbf1b6370e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c @@ -337,6 +337,19 @@ bool opp2_dpg_is_blanked(struct output_pixel_processor *opp) (double_buffer_pending == 0); } +bool opp2_dpg_is_pending(struct output_pixel_processor *opp) +{ + struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); + uint32_t double_buffer_pending; + uint32_t dpg_en; + + REG_GET(DPG_CONTROL, DPG_EN, &dpg_en); + + REG_GET(DPG_STATUS, DPG_DOUBLE_BUFFER_PENDING, &double_buffer_pending); + + return (dpg_en == 1 && double_buffer_pending == 1); +} + void opp2_program_left_edge_extra_pixel ( struct output_pixel_processor *opp, bool count) @@ -363,6 +376,7 @@ static struct opp_funcs dcn20_opp_funcs = { .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator, .opp_program_dpg_dimensions = opp2_program_dpg_dimensions, .dpg_is_blanked = opp2_dpg_is_blanked, + .dpg_is_pending = opp2_dpg_is_pending, .opp_dpg_set_blank_color = opp2_dpg_set_blank_color, .opp_destroy = opp1_destroy, .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h index 3ab221bdd2..8f186abd55 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h @@ -159,6 +159,8 @@ void opp2_program_dpg_dimensions( bool opp2_dpg_is_blanked(struct output_pixel_processor *opp); +bool opp2_dpg_is_pending(struct output_pixel_processor *opp); + void opp2_dpg_set_blank_color( struct output_pixel_processor *opp, const struct tg_color *color); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c deleted file mode 100644 index 58bdbd859b..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ /dev/null @@ -1,587 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "reg_helper.h" -#include "dcn20_optc.h" -#include "dc.h" - -#define REG(reg)\ - optc1->tg_regs->reg - -#define CTX \ - optc1->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - optc1->tg_shift->field_name, optc1->tg_mask->field_name - -/** - * optc2_enable_crtc() - Enable CRTC - call ASIC Control Object to enable Timing generator. - * - * @optc: timing_generator instance. - * - * Return: If CRTC is enabled, return true. - * - */ -bool optc2_enable_crtc(struct timing_generator *optc) -{ - /* TODO FPGA wait for answer - * OTG_MASTER_UPDATE_MODE != CRTC_MASTER_UPDATE_MODE - * OTG_MASTER_UPDATE_LOCK != CRTC_MASTER_UPDATE_LOCK - */ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - /* opp instance for OTG. For DCN1.0, ODM is remoed. - * OPP and OPTC should 1:1 mapping - */ - REG_UPDATE(OPTC_DATA_SOURCE_SELECT, - OPTC_SEG0_SRC_SEL, optc->inst); - - /* VTG enable first is for HW workaround */ - REG_UPDATE(CONTROL, - VTG0_ENABLE, 1); - - REG_SEQ_START(); - - /* Enable CRTC */ - REG_UPDATE_2(OTG_CONTROL, - OTG_DISABLE_POINT_CNTL, 3, - OTG_MASTER_EN, 1); - - REG_SEQ_SUBMIT(); - REG_SEQ_WAIT_DONE(); - - return true; -} - -/** - * optc2_set_gsl() - Assign OTG to GSL groups, - * set one of the OTGs to be master & rest are slaves - * - * @optc: timing_generator instance. - * @params: pointer to gsl_params - */ -void optc2_set_gsl(struct timing_generator *optc, - const struct gsl_params *params) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - -/* - * There are (MAX_OPTC+1)/2 gsl groups available for use. - * In each group (assign an OTG to a group by setting OTG_GSLX_EN = 1, - * set one of the OTGs to be the master (OTG_GSL_MASTER_EN = 1) and the rest are slaves. - */ - REG_UPDATE_5(OTG_GSL_CONTROL, - OTG_GSL0_EN, params->gsl0_en, - OTG_GSL1_EN, params->gsl1_en, - OTG_GSL2_EN, params->gsl2_en, - OTG_GSL_MASTER_EN, params->gsl_master_en, - OTG_GSL_MASTER_MODE, params->gsl_master_mode); -} - - -void optc2_set_gsl_source_select( - struct timing_generator *optc, - int group_idx, - uint32_t gsl_ready_signal) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - switch (group_idx) { - case 1: - REG_UPDATE(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, gsl_ready_signal); - break; - case 2: - REG_UPDATE(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, gsl_ready_signal); - break; - case 3: - REG_UPDATE(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, gsl_ready_signal); - break; - default: - break; - } -} - -/* Set DSC-related configuration. - * dsc_mode: 0 disables DSC, other values enable DSC in specified format - * sc_bytes_per_pixel: Bytes per pixel in u3.28 format - * dsc_slice_width: Slice width in pixels - */ -void optc2_set_dsc_config(struct timing_generator *optc, - enum optc_dsc_mode dsc_mode, - uint32_t dsc_bytes_per_pixel, - uint32_t dsc_slice_width) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, - OPTC_DSC_MODE, dsc_mode); - - REG_SET(OPTC_BYTES_PER_PIXEL, 0, - OPTC_DSC_BYTES_PER_PIXEL, dsc_bytes_per_pixel); - - REG_UPDATE(OPTC_WIDTH_CONTROL, - OPTC_DSC_SLICE_WIDTH, dsc_slice_width); -} - -/* Get DSC-related configuration. - * dsc_mode: 0 disables DSC, other values enable DSC in specified format - */ -void optc2_get_dsc_status(struct timing_generator *optc, - uint32_t *dsc_mode) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_GET(OPTC_DATA_FORMAT_CONTROL, - OPTC_DSC_MODE, dsc_mode); -} - - -/*TEMP: Need to figure out inheritance model here.*/ -bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing) -{ - return optc1_is_two_pixels_per_containter(timing); -} - -void optc2_set_odm_bypass(struct timing_generator *optc, - const struct dc_crtc_timing *dc_crtc_timing) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - uint32_t h_div_2 = 0; - - REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0, - OPTC_NUM_OF_INPUT_SEGMENT, 0, - OPTC_SEG0_SRC_SEL, optc->inst, - OPTC_SEG1_SRC_SEL, 0xf); - REG_WRITE(OTG_H_TIMING_CNTL, 0); - - h_div_2 = optc2_is_two_pixels_per_containter(dc_crtc_timing); - REG_UPDATE(OTG_H_TIMING_CNTL, - OTG_H_TIMING_DIV_BY2, h_div_2); - REG_SET(OPTC_MEMORY_CONFIG, 0, - OPTC_MEM_SEL, 0); - optc1->opp_count = 1; -} - -void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, - struct dc_crtc_timing *timing) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right) - / opp_cnt; - uint32_t memory_mask; - - ASSERT(opp_cnt == 2); - - /* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic - * REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1); - * Program OTG register MASTER_UPDATE_LOCK_DB_X/Y to the position before DP frame start - * REG_SET_2(OTG_GLOBAL_CONTROL1, 0, - * MASTER_UPDATE_LOCK_DB_X, 160, - * MASTER_UPDATE_LOCK_DB_Y, 240); - */ - - /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192, - * however, for ODM combine we can simplify by always using 4. - * To make sure there's no overlap, each instance "reserves" 2 memories and - * they are uniquely combined here. - */ - memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2); - - if (REG(OPTC_MEMORY_CONFIG)) - REG_SET(OPTC_MEMORY_CONFIG, 0, - OPTC_MEM_SEL, memory_mask); - - REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0, - OPTC_NUM_OF_INPUT_SEGMENT, 1, - OPTC_SEG0_SRC_SEL, opp_id[0], - OPTC_SEG1_SRC_SEL, opp_id[1]); - - REG_UPDATE(OPTC_WIDTH_CONTROL, - OPTC_SEGMENT_WIDTH, mpcc_hactive); - - REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_BY2, 1); - optc1->opp_count = opp_cnt; -} - -void optc2_get_optc_source(struct timing_generator *optc, - uint32_t *num_of_src_opp, - uint32_t *src_opp_id_0, - uint32_t *src_opp_id_1) -{ - uint32_t num_of_input_segments; - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_GET_3(OPTC_DATA_SOURCE_SELECT, - OPTC_NUM_OF_INPUT_SEGMENT, &num_of_input_segments, - OPTC_SEG0_SRC_SEL, src_opp_id_0, - OPTC_SEG1_SRC_SEL, src_opp_id_1); - - if (num_of_input_segments == 1) - *num_of_src_opp = 2; - else - *num_of_src_opp = 1; - - /* Work around VBIOS not updating OPTC_NUM_OF_INPUT_SEGMENT */ - if (*src_opp_id_1 == 0xf) - *num_of_src_opp = 1; -} - -static void optc2_set_dwb_source(struct timing_generator *optc, - uint32_t dwb_pipe_inst) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - if (dwb_pipe_inst == 0) - REG_UPDATE(DWB_SOURCE_SELECT, - OPTC_DWB0_SOURCE_SELECT, optc->inst); - else if (dwb_pipe_inst == 1) - REG_UPDATE(DWB_SOURCE_SELECT, - OPTC_DWB1_SOURCE_SELECT, optc->inst); -} - -static void optc2_align_vblanks( - struct timing_generator *optc_master, - struct timing_generator *optc_slave, - uint32_t master_pixel_clock_100Hz, - uint32_t slave_pixel_clock_100Hz, - uint8_t master_clock_divider, - uint8_t slave_clock_divider) -{ - /* accessing slave OTG registers */ - struct optc *optc1 = DCN10TG_FROM_TG(optc_slave); - - uint32_t master_v_active = 0; - uint32_t master_h_total = 0; - uint32_t slave_h_total = 0; - uint64_t L, XY; - uint32_t X, Y, p = 10000; - uint32_t master_update_lock; - - /* disable slave OTG */ - REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0); - /* wait until disabled */ - REG_WAIT(OTG_CONTROL, - OTG_CURRENT_MASTER_EN_STATE, - 0, 10, 5000); - - REG_GET(OTG_H_TOTAL, OTG_H_TOTAL, &slave_h_total); - - /* assign slave OTG to be controlled by master update lock */ - REG_SET(OTG_GLOBAL_CONTROL0, 0, - OTG_MASTER_UPDATE_LOCK_SEL, optc_master->inst); - - /* accessing master OTG registers */ - optc1 = DCN10TG_FROM_TG(optc_master); - - /* saving update lock state, not sure if it's needed */ - REG_GET(OTG_MASTER_UPDATE_LOCK, - OTG_MASTER_UPDATE_LOCK, &master_update_lock); - /* unlocking master OTG */ - REG_SET(OTG_MASTER_UPDATE_LOCK, 0, - OTG_MASTER_UPDATE_LOCK, 0); - - REG_GET(OTG_V_BLANK_START_END, - OTG_V_BLANK_START, &master_v_active); - REG_GET(OTG_H_TOTAL, OTG_H_TOTAL, &master_h_total); - - /* calculate when to enable slave OTG */ - L = (uint64_t)p * slave_h_total * master_pixel_clock_100Hz; - L = div_u64(L, master_h_total); - L = div_u64(L, slave_pixel_clock_100Hz); - XY = div_u64(L, p); - Y = master_v_active - XY - 1; - X = div_u64(((XY + 1) * p - L) * master_h_total, p * master_clock_divider); - - /* - * set master OTG to unlock when V/H - * counters reach calculated values - */ - REG_UPDATE(OTG_GLOBAL_CONTROL1, - MASTER_UPDATE_LOCK_DB_EN, 1); - REG_UPDATE_2(OTG_GLOBAL_CONTROL1, - MASTER_UPDATE_LOCK_DB_X, - X, - MASTER_UPDATE_LOCK_DB_Y, - Y); - - /* lock master OTG */ - REG_SET(OTG_MASTER_UPDATE_LOCK, 0, - OTG_MASTER_UPDATE_LOCK, 1); - REG_WAIT(OTG_MASTER_UPDATE_LOCK, - UPDATE_LOCK_STATUS, 1, 1, 10); - - /* accessing slave OTG registers */ - optc1 = DCN10TG_FROM_TG(optc_slave); - - /* - * enable slave OTG, the OTG is locked with - * master's update lock, so it will not run - */ - REG_UPDATE(OTG_CONTROL, - OTG_MASTER_EN, 1); - - /* accessing master OTG registers */ - optc1 = DCN10TG_FROM_TG(optc_master); - - /* - * unlock master OTG. When master H/V counters reach - * DB_XY point, slave OTG will start - */ - REG_SET(OTG_MASTER_UPDATE_LOCK, 0, - OTG_MASTER_UPDATE_LOCK, 0); - - /* accessing slave OTG registers */ - optc1 = DCN10TG_FROM_TG(optc_slave); - - /* wait for slave OTG to start running*/ - REG_WAIT(OTG_CONTROL, - OTG_CURRENT_MASTER_EN_STATE, - 1, 10, 5000); - - /* accessing master OTG registers */ - optc1 = DCN10TG_FROM_TG(optc_master); - - /* disable the XY point*/ - REG_UPDATE(OTG_GLOBAL_CONTROL1, - MASTER_UPDATE_LOCK_DB_EN, 0); - REG_UPDATE_2(OTG_GLOBAL_CONTROL1, - MASTER_UPDATE_LOCK_DB_X, - 0, - MASTER_UPDATE_LOCK_DB_Y, - 0); - - /*restore master update lock*/ - REG_SET(OTG_MASTER_UPDATE_LOCK, 0, - OTG_MASTER_UPDATE_LOCK, master_update_lock); - - /* accessing slave OTG registers */ - optc1 = DCN10TG_FROM_TG(optc_slave); - /* restore slave to be controlled by it's own */ - REG_SET(OTG_GLOBAL_CONTROL0, 0, - OTG_MASTER_UPDATE_LOCK_SEL, optc_slave->inst); - -} - -void optc2_triplebuffer_lock(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET(OTG_GLOBAL_CONTROL0, 0, - OTG_MASTER_UPDATE_LOCK_SEL, optc->inst); - - REG_SET(OTG_VUPDATE_KEEPOUT, 0, - OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1); - - REG_SET(OTG_MASTER_UPDATE_LOCK, 0, - OTG_MASTER_UPDATE_LOCK, 1); - - REG_WAIT(OTG_MASTER_UPDATE_LOCK, - UPDATE_LOCK_STATUS, 1, - 1, 10); -} - -void optc2_triplebuffer_unlock(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET(OTG_MASTER_UPDATE_LOCK, 0, - OTG_MASTER_UPDATE_LOCK, 0); - - REG_SET(OTG_VUPDATE_KEEPOUT, 0, - OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 0); - -} - -void optc2_lock_doublebuffer_enable(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - uint32_t v_blank_start = 0; - uint32_t h_blank_start = 0; - - REG_UPDATE(OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_EN, 1); - - REG_UPDATE_2(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 1, - DIG_UPDATE_LOCATION, 20); - - REG_GET(OTG_V_BLANK_START_END, OTG_V_BLANK_START, &v_blank_start); - - REG_GET(OTG_H_BLANK_START_END, OTG_H_BLANK_START, &h_blank_start); - - REG_UPDATE_2(OTG_GLOBAL_CONTROL1, - MASTER_UPDATE_LOCK_DB_X, - (h_blank_start - 200 - 1) / optc1->opp_count, - MASTER_UPDATE_LOCK_DB_Y, - v_blank_start - 1); - - REG_SET_3(OTG_VUPDATE_KEEPOUT, 0, - MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0, - MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, 100, - OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1); -} - -void optc2_lock_doublebuffer_disable(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_UPDATE_2(OTG_GLOBAL_CONTROL1, - MASTER_UPDATE_LOCK_DB_X, - 0, - MASTER_UPDATE_LOCK_DB_Y, - 0); - - REG_UPDATE_2(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 0, - DIG_UPDATE_LOCATION, 0); - - REG_UPDATE(OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_EN, 0); -} - -void optc2_setup_manual_trigger(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - /* Set the min/max selectors unconditionally so that - * DMCUB fw may change OTG timings when necessary - * TODO: Remove the w/a after fixing the issue in DMCUB firmware - */ - REG_UPDATE_4(OTG_V_TOTAL_CONTROL, - OTG_V_TOTAL_MIN_SEL, 1, - OTG_V_TOTAL_MAX_SEL, 1, - OTG_FORCE_LOCK_ON_EVENT, 0, - OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */ - - REG_SET_8(OTG_TRIGA_CNTL, 0, - OTG_TRIGA_SOURCE_SELECT, 21, - OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst, - OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1, - OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0, - OTG_TRIGA_POLARITY_SELECT, 0, - OTG_TRIGA_FREQUENCY_SELECT, 0, - OTG_TRIGA_DELAY, 0, - OTG_TRIGA_CLEAR, 1); -} - -void optc2_program_manual_trigger(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET(OTG_TRIGA_MANUAL_TRIG, 0, - OTG_TRIGA_MANUAL_TRIG, 1); -} - -bool optc2_configure_crc(struct timing_generator *optc, - const struct crc_params *params) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET_2(OTG_CRC_CNTL2, 0, - OTG_CRC_DSC_MODE, params->dsc_mode, - OTG_CRC_DATA_STREAM_COMBINE_MODE, params->odm_mode); - - return optc1_configure_crc(optc, params); -} - - -void optc2_get_last_used_drr_vtotal(struct timing_generator *optc, uint32_t *refresh_rate) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_GET(OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, refresh_rate); -} - -static struct timing_generator_funcs dcn20_tg_funcs = { - .validate_timing = optc1_validate_timing, - .program_timing = optc1_program_timing, - .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, - .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, - .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, - .program_global_sync = optc1_program_global_sync, - .enable_crtc = optc2_enable_crtc, - .disable_crtc = optc1_disable_crtc, - /* used by enable_timing_synchronization. Not need for FPGA */ - .is_counter_moving = optc1_is_counter_moving, - .get_position = optc1_get_position, - .get_frame_count = optc1_get_vblank_counter, - .get_scanoutpos = optc1_get_crtc_scanoutpos, - .get_otg_active_size = optc1_get_otg_active_size, - .set_early_control = optc1_set_early_control, - /* used by enable_timing_synchronization. Not need for FPGA */ - .wait_for_state = optc1_wait_for_state, - .set_blank = optc1_set_blank, - .is_blanked = optc1_is_blanked, - .set_blank_color = optc1_program_blank_color, - .enable_reset_trigger = optc1_enable_reset_trigger, - .enable_crtc_reset = optc1_enable_crtc_reset, - .did_triggered_reset_occur = optc1_did_triggered_reset_occur, - .triplebuffer_lock = optc2_triplebuffer_lock, - .triplebuffer_unlock = optc2_triplebuffer_unlock, - .disable_reset_trigger = optc1_disable_reset_trigger, - .lock = optc1_lock, - .unlock = optc1_unlock, - .lock_doublebuffer_enable = optc2_lock_doublebuffer_enable, - .lock_doublebuffer_disable = optc2_lock_doublebuffer_disable, - .enable_optc_clock = optc1_enable_optc_clock, - .set_drr = optc1_set_drr, - .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, - .set_vtotal_min_max = optc1_set_vtotal_min_max, - .set_static_screen_control = optc1_set_static_screen_control, - .program_stereo = optc1_program_stereo, - .is_stereo_left_eye = optc1_is_stereo_left_eye, - .set_blank_data_double_buffer = optc1_set_blank_data_double_buffer, - .tg_init = optc1_tg_init, - .is_tg_enabled = optc1_is_tg_enabled, - .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, - .clear_optc_underflow = optc1_clear_optc_underflow, - .setup_global_swap_lock = NULL, - .get_crc = optc1_get_crc, - .configure_crc = optc2_configure_crc, - .set_dsc_config = optc2_set_dsc_config, - .get_dsc_status = optc2_get_dsc_status, - .set_dwb_source = optc2_set_dwb_source, - .set_odm_bypass = optc2_set_odm_bypass, - .set_odm_combine = optc2_set_odm_combine, - .get_optc_source = optc2_get_optc_source, - .set_gsl = optc2_set_gsl, - .set_gsl_source_select = optc2_set_gsl_source_select, - .set_vtg_params = optc1_set_vtg_params, - .program_manual_trigger = optc2_program_manual_trigger, - .setup_manual_trigger = optc2_setup_manual_trigger, - .get_hw_timing = optc1_get_hw_timing, - .align_vblanks = optc2_align_vblanks, -}; - -void dcn20_timing_generator_init(struct optc *optc1) -{ - optc1->base.funcs = &dcn20_tg_funcs; - - optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; - optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; - - optc1->min_h_blank = 32; - optc1->min_v_blank = 3; - optc1->min_v_blank_interlace = 5; - optc1->min_h_sync_width = 4;// Minimum HSYNC = 8 pixels asked By HW in the first place for no actual reason. Oculus Rift S will not light up with 8 as it's hsyncWidth is 6. Changing it to 4 to fix that issue. - optc1->min_v_sync_width = 1; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h deleted file mode 100644 index f7968b9ca1..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_OPTC_DCN20_H__ -#define __DC_OPTC_DCN20_H__ - -#include "../dcn10/dcn10_optc.h" - -#define TG_COMMON_REG_LIST_DCN2_0(inst) \ - TG_COMMON_REG_LIST_DCN(inst),\ - SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\ - SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\ - SRI(OTG_GSL_WINDOW_X, OTG, inst),\ - SRI(OTG_GSL_WINDOW_Y, OTG, inst),\ - SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\ - SRI(OTG_DSC_START_POSITION, OTG, inst),\ - SRI(OTG_CRC_CNTL2, OTG, inst),\ - SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\ - SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\ - SRI(OPTC_WIDTH_CONTROL, ODM, inst),\ - SRI(OPTC_MEMORY_CONFIG, ODM, inst),\ - SR(DWB_SOURCE_SELECT),\ - SRI(OTG_MANUAL_FLOW_CONTROL, OTG, inst), \ - SRI(OTG_DRR_CONTROL, OTG, inst) - -#define TG_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\ - TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_X, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_Y, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_EN, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL2, DIG_UPDATE_LOCATION, mask_sh),\ - SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\ - SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\ - SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \ - SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\ - SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \ - SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \ - SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \ - SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DSC_MODE, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\ - SF(ODM0_OPTC_MEMORY_CONFIG, OPTC_MEM_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, mask_sh),\ - SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\ - SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\ - SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\ - SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_SEGMENT_WIDTH, mask_sh),\ - SF(DWB_SOURCE_SELECT, OPTC_DWB0_SOURCE_SELECT, mask_sh),\ - SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh),\ - SF(OTG0_OTG_MANUAL_FLOW_CONTROL, MANUAL_FLOW_CONTROL, mask_sh), \ - SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh) - -void dcn20_timing_generator_init(struct optc *optc); - -void optc2_get_last_used_drr_vtotal(struct timing_generator *optc, - uint32_t *refresh_rate); - -bool optc2_enable_crtc(struct timing_generator *optc); - -void optc2_set_gsl(struct timing_generator *optc, - const struct gsl_params *params); - -void optc2_set_gsl_source_select(struct timing_generator *optc, - int group_idx, - uint32_t gsl_ready_signal); - -void optc2_set_dsc_config(struct timing_generator *optc, - enum optc_dsc_mode dsc_mode, - uint32_t dsc_bytes_per_pixel, - uint32_t dsc_slice_width); - -void optc2_get_dsc_status(struct timing_generator *optc, - uint32_t *dsc_mode); - -void optc2_set_odm_bypass(struct timing_generator *optc, - const struct dc_crtc_timing *dc_crtc_timing); - -void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, - struct dc_crtc_timing *timing); - -void optc2_get_optc_source(struct timing_generator *optc, - uint32_t *num_of_src_opp, - uint32_t *src_opp_id_0, - uint32_t *src_opp_id_1); - -void optc2_triplebuffer_lock(struct timing_generator *optc); -void optc2_triplebuffer_unlock(struct timing_generator *optc); -void optc2_lock_doublebuffer_disable(struct timing_generator *optc); -void optc2_lock_doublebuffer_enable(struct timing_generator *optc); -void optc2_setup_manual_trigger(struct timing_generator *optc); -void optc2_program_manual_trigger(struct timing_generator *optc); -bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing); -bool optc2_configure_crc(struct timing_generator *optc, - const struct crc_params *params); -#endif /* __DC_OPTC_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c deleted file mode 100644 index e73e597548..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ /dev/null @@ -1,2793 +0,0 @@ -/* -* Copyright 2016 Advanced Micro Devices, Inc. - * Copyright 2019 Raptor Engineering, LLC - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include - -#include "dm_services.h" -#include "dc.h" - -#include "dcn20_init.h" - -#include "resource.h" -#include "include/irq_service_interface.h" -#include "dcn20/dcn20_resource.h" - -#include "dml/dcn20/dcn20_fpu.h" - -#include "dcn10/dcn10_hubp.h" -#include "dcn10/dcn10_ipp.h" -#include "dcn20_hubbub.h" -#include "dcn20_mpc.h" -#include "dcn20_hubp.h" -#include "irq/dcn20/irq_service_dcn20.h" -#include "dcn20_dpp.h" -#include "dcn20_optc.h" -#include "dcn20/dcn20_hwseq.h" -#include "dce110/dce110_hwseq.h" -#include "dcn10/dcn10_resource.h" -#include "dcn20_opp.h" - -#include "dcn20_dsc.h" - -#include "dcn20_link_encoder.h" -#include "dcn20_stream_encoder.h" -#include "dce/dce_clock_source.h" -#include "dce/dce_audio.h" -#include "dce/dce_hwseq.h" -#include "virtual/virtual_stream_encoder.h" -#include "dce110/dce110_resource.h" -#include "dml/display_mode_vba.h" -#include "dcn20_dccg.h" -#include "dcn20_vmid.h" -#include "dce/dce_panel_cntl.h" - -#include "navi10_ip_offset.h" - -#include "dcn/dcn_2_0_0_offset.h" -#include "dcn/dcn_2_0_0_sh_mask.h" -#include "dpcs/dpcs_2_0_0_offset.h" -#include "dpcs/dpcs_2_0_0_sh_mask.h" - -#include "nbio/nbio_2_3_offset.h" - -#include "dcn20/dcn20_dwb.h" -#include "dcn20/dcn20_mmhubbub.h" - -#include "mmhub/mmhub_2_0_0_offset.h" -#include "mmhub/mmhub_2_0_0_sh_mask.h" - -#include "reg_helper.h" -#include "dce/dce_abm.h" -#include "dce/dce_dmcu.h" -#include "dce/dce_aux.h" -#include "dce/dce_i2c.h" -#include "vm_helper.h" -#include "link_enc_cfg.h" - -#include "amdgpu_socbb.h" - -#include "link.h" -#define DC_LOGGER_INIT(logger) - -#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL - #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f - #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 - #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f - #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 - #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f - #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 - #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f - #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 - #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f - #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 - #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f - #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 - #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f - #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 -#endif - - -enum dcn20_clk_src_array_id { - DCN20_CLK_SRC_PLL0, - DCN20_CLK_SRC_PLL1, - DCN20_CLK_SRC_PLL2, - DCN20_CLK_SRC_PLL3, - DCN20_CLK_SRC_PLL4, - DCN20_CLK_SRC_PLL5, - DCN20_CLK_SRC_TOTAL -}; - -/* begin ********************* - * macros to expend register list macro defined in HW object header file */ - -/* DCN */ -#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg - -#define BASE(seg) BASE_INNER(seg) - -#define SR(reg_name)\ - .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -#define SRI(reg_name, block, id)\ - .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define SRI2_DWB(reg_name, block, id)\ - .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name -#define SF_DWB(reg_name, field_name, post_fix)\ - .field_name = reg_name ## __ ## field_name ## post_fix - -#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ - .field_name = reg_name ## __ ## field_name ## post_fix - -#define SRIR(var_name, reg_name, block, id)\ - .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define SRII(reg_name, block, id)\ - .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define DCCG_SRII(reg_name, block, id)\ - .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define VUPDATE_SRII(reg_name, block, id)\ - .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ - mm ## reg_name ## _ ## block ## id - -/* NBIO */ -#define NBIO_BASE_INNER(seg) \ - NBIO_BASE__INST0_SEG ## seg - -#define NBIO_BASE(seg) \ - NBIO_BASE_INNER(seg) - -#define NBIO_SR(reg_name)\ - .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -/* MMHUB */ -#define MMHUB_BASE_INNER(seg) \ - MMHUB_BASE__INST0_SEG ## seg - -#define MMHUB_BASE(seg) \ - MMHUB_BASE_INNER(seg) - -#define MMHUB_SR(reg_name)\ - .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \ - mmMM ## reg_name - -static const struct bios_registers bios_regs = { - NBIO_SR(BIOS_SCRATCH_3), - NBIO_SR(BIOS_SCRATCH_6) -}; - -#define clk_src_regs(index, pllid)\ -[index] = {\ - CS_COMMON_REG_LIST_DCN2_0(index, pllid),\ -} - -static const struct dce110_clk_src_regs clk_src_regs[] = { - clk_src_regs(0, A), - clk_src_regs(1, B), - clk_src_regs(2, C), - clk_src_regs(3, D), - clk_src_regs(4, E), - clk_src_regs(5, F) -}; - -static const struct dce110_clk_src_shift cs_shift = { - CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) -}; - -static const struct dce110_clk_src_mask cs_mask = { - CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) -}; - -static const struct dce_dmcu_registers dmcu_regs = { - DMCU_DCN10_REG_LIST() -}; - -static const struct dce_dmcu_shift dmcu_shift = { - DMCU_MASK_SH_LIST_DCN10(__SHIFT) -}; - -static const struct dce_dmcu_mask dmcu_mask = { - DMCU_MASK_SH_LIST_DCN10(_MASK) -}; - -static const struct dce_abm_registers abm_regs = { - ABM_DCN20_REG_LIST() -}; - -static const struct dce_abm_shift abm_shift = { - ABM_MASK_SH_LIST_DCN20(__SHIFT) -}; - -static const struct dce_abm_mask abm_mask = { - ABM_MASK_SH_LIST_DCN20(_MASK) -}; - -#define audio_regs(id)\ -[id] = {\ - AUD_COMMON_REG_LIST(id)\ -} - -static const struct dce_audio_registers audio_regs[] = { - audio_regs(0), - audio_regs(1), - audio_regs(2), - audio_regs(3), - audio_regs(4), - audio_regs(5), - audio_regs(6), -}; - -#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ - AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) - -static const struct dce_audio_shift audio_shift = { - DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_audio_mask audio_mask = { - DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) -}; - -#define stream_enc_regs(id)\ -[id] = {\ - SE_DCN2_REG_LIST(id)\ -} - -static const struct dcn10_stream_enc_registers stream_enc_regs[] = { - stream_enc_regs(0), - stream_enc_regs(1), - stream_enc_regs(2), - stream_enc_regs(3), - stream_enc_regs(4), - stream_enc_regs(5), -}; - -static const struct dcn10_stream_encoder_shift se_shift = { - SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT) -}; - -static const struct dcn10_stream_encoder_mask se_mask = { - SE_COMMON_MASK_SH_LIST_DCN20(_MASK) -}; - - -#define aux_regs(id)\ -[id] = {\ - DCN2_AUX_REG_LIST(id)\ -} - -static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { - aux_regs(0), - aux_regs(1), - aux_regs(2), - aux_regs(3), - aux_regs(4), - aux_regs(5) -}; - -#define hpd_regs(id)\ -[id] = {\ - HPD_REG_LIST(id)\ -} - -static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { - hpd_regs(0), - hpd_regs(1), - hpd_regs(2), - hpd_regs(3), - hpd_regs(4), - hpd_regs(5) -}; - -#define link_regs(id, phyid)\ -[id] = {\ - LE_DCN10_REG_LIST(id), \ - UNIPHY_DCN2_REG_LIST(phyid), \ - DPCS_DCN2_REG_LIST(id), \ - SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ -} - -static const struct dcn10_link_enc_registers link_enc_regs[] = { - link_regs(0, A), - link_regs(1, B), - link_regs(2, C), - link_regs(3, D), - link_regs(4, E), - link_regs(5, F) -}; - -static const struct dcn10_link_enc_shift le_shift = { - LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\ - DPCS_DCN2_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn10_link_enc_mask le_mask = { - LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\ - DPCS_DCN2_MASK_SH_LIST(_MASK) -}; - -static const struct dce_panel_cntl_registers panel_cntl_regs[] = { - { DCN_PANEL_CNTL_REG_LIST() } -}; - -static const struct dce_panel_cntl_shift panel_cntl_shift = { - DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_panel_cntl_mask panel_cntl_mask = { - DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) -}; - -#define ipp_regs(id)\ -[id] = {\ - IPP_REG_LIST_DCN20(id),\ -} - -static const struct dcn10_ipp_registers ipp_regs[] = { - ipp_regs(0), - ipp_regs(1), - ipp_regs(2), - ipp_regs(3), - ipp_regs(4), - ipp_regs(5), -}; - -static const struct dcn10_ipp_shift ipp_shift = { - IPP_MASK_SH_LIST_DCN20(__SHIFT) -}; - -static const struct dcn10_ipp_mask ipp_mask = { - IPP_MASK_SH_LIST_DCN20(_MASK), -}; - -#define opp_regs(id)\ -[id] = {\ - OPP_REG_LIST_DCN20(id),\ -} - -static const struct dcn20_opp_registers opp_regs[] = { - opp_regs(0), - opp_regs(1), - opp_regs(2), - opp_regs(3), - opp_regs(4), - opp_regs(5), -}; - -static const struct dcn20_opp_shift opp_shift = { - OPP_MASK_SH_LIST_DCN20(__SHIFT) -}; - -static const struct dcn20_opp_mask opp_mask = { - OPP_MASK_SH_LIST_DCN20(_MASK) -}; - -#define aux_engine_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST0(id), \ - .AUXN_IMPCAL = 0, \ - .AUXP_IMPCAL = 0, \ - .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ -} - -static const struct dce110_aux_registers aux_engine_regs[] = { - aux_engine_regs(0), - aux_engine_regs(1), - aux_engine_regs(2), - aux_engine_regs(3), - aux_engine_regs(4), - aux_engine_regs(5) -}; - -#define tf_regs(id)\ -[id] = {\ - TF_REG_LIST_DCN20(id),\ - TF_REG_LIST_DCN20_COMMON_APPEND(id),\ -} - -static const struct dcn2_dpp_registers tf_regs[] = { - tf_regs(0), - tf_regs(1), - tf_regs(2), - tf_regs(3), - tf_regs(4), - tf_regs(5), -}; - -static const struct dcn2_dpp_shift tf_shift = { - TF_REG_LIST_SH_MASK_DCN20(__SHIFT), - TF_DEBUG_REG_LIST_SH_DCN20 -}; - -static const struct dcn2_dpp_mask tf_mask = { - TF_REG_LIST_SH_MASK_DCN20(_MASK), - TF_DEBUG_REG_LIST_MASK_DCN20 -}; - -#define dwbc_regs_dcn2(id)\ -[id] = {\ - DWBC_COMMON_REG_LIST_DCN2_0(id),\ - } - -static const struct dcn20_dwbc_registers dwbc20_regs[] = { - dwbc_regs_dcn2(0), -}; - -static const struct dcn20_dwbc_shift dwbc20_shift = { - DWBC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) -}; - -static const struct dcn20_dwbc_mask dwbc20_mask = { - DWBC_COMMON_MASK_SH_LIST_DCN2_0(_MASK) -}; - -#define mcif_wb_regs_dcn2(id)\ -[id] = {\ - MCIF_WB_COMMON_REG_LIST_DCN2_0(id),\ - } - -static const struct dcn20_mmhubbub_registers mcif_wb20_regs[] = { - mcif_wb_regs_dcn2(0), -}; - -static const struct dcn20_mmhubbub_shift mcif_wb20_shift = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) -}; - -static const struct dcn20_mmhubbub_mask mcif_wb20_mask = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(_MASK) -}; - -static const struct dcn20_mpc_registers mpc_regs = { - MPC_REG_LIST_DCN2_0(0), - MPC_REG_LIST_DCN2_0(1), - MPC_REG_LIST_DCN2_0(2), - MPC_REG_LIST_DCN2_0(3), - MPC_REG_LIST_DCN2_0(4), - MPC_REG_LIST_DCN2_0(5), - MPC_OUT_MUX_REG_LIST_DCN2_0(0), - MPC_OUT_MUX_REG_LIST_DCN2_0(1), - MPC_OUT_MUX_REG_LIST_DCN2_0(2), - MPC_OUT_MUX_REG_LIST_DCN2_0(3), - MPC_OUT_MUX_REG_LIST_DCN2_0(4), - MPC_OUT_MUX_REG_LIST_DCN2_0(5), - MPC_DBG_REG_LIST_DCN2_0() -}; - -static const struct dcn20_mpc_shift mpc_shift = { - MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT), - MPC_DEBUG_REG_LIST_SH_DCN20 -}; - -static const struct dcn20_mpc_mask mpc_mask = { - MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK), - MPC_DEBUG_REG_LIST_MASK_DCN20 -}; - -#define tg_regs(id)\ -[id] = {TG_COMMON_REG_LIST_DCN2_0(id)} - - -static const struct dcn_optc_registers tg_regs[] = { - tg_regs(0), - tg_regs(1), - tg_regs(2), - tg_regs(3), - tg_regs(4), - tg_regs(5) -}; - -static const struct dcn_optc_shift tg_shift = { - TG_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) -}; - -static const struct dcn_optc_mask tg_mask = { - TG_COMMON_MASK_SH_LIST_DCN2_0(_MASK) -}; - -#define hubp_regs(id)\ -[id] = {\ - HUBP_REG_LIST_DCN20(id)\ -} - -static const struct dcn_hubp2_registers hubp_regs[] = { - hubp_regs(0), - hubp_regs(1), - hubp_regs(2), - hubp_regs(3), - hubp_regs(4), - hubp_regs(5) -}; - -static const struct dcn_hubp2_shift hubp_shift = { - HUBP_MASK_SH_LIST_DCN20(__SHIFT) -}; - -static const struct dcn_hubp2_mask hubp_mask = { - HUBP_MASK_SH_LIST_DCN20(_MASK) -}; - -static const struct dcn_hubbub_registers hubbub_reg = { - HUBBUB_REG_LIST_DCN20(0) -}; - -static const struct dcn_hubbub_shift hubbub_shift = { - HUBBUB_MASK_SH_LIST_DCN20(__SHIFT) -}; - -static const struct dcn_hubbub_mask hubbub_mask = { - HUBBUB_MASK_SH_LIST_DCN20(_MASK) -}; - -#define vmid_regs(id)\ -[id] = {\ - DCN20_VMID_REG_LIST(id)\ -} - -static const struct dcn_vmid_registers vmid_regs[] = { - vmid_regs(0), - vmid_regs(1), - vmid_regs(2), - vmid_regs(3), - vmid_regs(4), - vmid_regs(5), - vmid_regs(6), - vmid_regs(7), - vmid_regs(8), - vmid_regs(9), - vmid_regs(10), - vmid_regs(11), - vmid_regs(12), - vmid_regs(13), - vmid_regs(14), - vmid_regs(15) -}; - -static const struct dcn20_vmid_shift vmid_shifts = { - DCN20_VMID_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn20_vmid_mask vmid_masks = { - DCN20_VMID_MASK_SH_LIST(_MASK) -}; - -static const struct dce110_aux_registers_shift aux_shift = { - DCN_AUX_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce110_aux_registers_mask aux_mask = { - DCN_AUX_MASK_SH_LIST(_MASK) -}; - -static int map_transmitter_id_to_phy_instance( - enum transmitter transmitter) -{ - switch (transmitter) { - case TRANSMITTER_UNIPHY_A: - return 0; - break; - case TRANSMITTER_UNIPHY_B: - return 1; - break; - case TRANSMITTER_UNIPHY_C: - return 2; - break; - case TRANSMITTER_UNIPHY_D: - return 3; - break; - case TRANSMITTER_UNIPHY_E: - return 4; - break; - case TRANSMITTER_UNIPHY_F: - return 5; - break; - default: - ASSERT(0); - return 0; - } -} - -#define dsc_regsDCN20(id)\ -[id] = {\ - DSC_REG_LIST_DCN20(id)\ -} - -static const struct dcn20_dsc_registers dsc_regs[] = { - dsc_regsDCN20(0), - dsc_regsDCN20(1), - dsc_regsDCN20(2), - dsc_regsDCN20(3), - dsc_regsDCN20(4), - dsc_regsDCN20(5) -}; - -static const struct dcn20_dsc_shift dsc_shift = { - DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) -}; - -static const struct dcn20_dsc_mask dsc_mask = { - DSC_REG_LIST_SH_MASK_DCN20(_MASK) -}; - -static const struct dccg_registers dccg_regs = { - DCCG_REG_LIST_DCN2() -}; - -static const struct dccg_shift dccg_shift = { - DCCG_MASK_SH_LIST_DCN2(__SHIFT) -}; - -static const struct dccg_mask dccg_mask = { - DCCG_MASK_SH_LIST_DCN2(_MASK) -}; - -static const struct resource_caps res_cap_nv10 = { - .num_timing_generator = 6, - .num_opp = 6, - .num_video_plane = 6, - .num_audio = 7, - .num_stream_encoder = 6, - .num_pll = 6, - .num_dwb = 1, - .num_ddc = 6, - .num_vmid = 16, - .num_dsc = 6, -}; - -static const struct dc_plane_cap plane_cap = { - .type = DC_PLANE_TYPE_DCN_UNIVERSAL, - .per_pixel_alpha = true, - - .pixel_format_support = { - .argb8888 = true, - .nv12 = true, - .fp16 = true, - .p010 = true - }, - - .max_upscale_factor = { - .argb8888 = 16000, - .nv12 = 16000, - .fp16 = 1 - }, - - .max_downscale_factor = { - .argb8888 = 250, - .nv12 = 250, - .fp16 = 1 - }, - 16, - 16 -}; -static const struct resource_caps res_cap_nv14 = { - .num_timing_generator = 5, - .num_opp = 5, - .num_video_plane = 5, - .num_audio = 6, - .num_stream_encoder = 5, - .num_pll = 5, - .num_dwb = 1, - .num_ddc = 5, - .num_vmid = 16, - .num_dsc = 5, -}; - -static const struct dc_debug_options debug_defaults_drv = { - .disable_dmcu = false, - .force_abm_enable = false, - .timing_trace = false, - .clock_trace = true, - .disable_pplib_clock_request = true, - .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, - .force_single_disp_pipe_split = false, - .disable_dcc = DCC_ENABLE, - .vsr_support = true, - .performance_trace = false, - .max_downscale_src_width = 5120,/*upto 5K*/ - .disable_pplib_wm_range = false, - .scl_reset_length10 = true, - .sanity_checks = false, - .underflow_assert_delay_us = 0xFFFFFFFF, - .enable_legacy_fast_update = true, - .using_dml2 = false, -}; - -void dcn20_dpp_destroy(struct dpp **dpp) -{ - kfree(TO_DCN20_DPP(*dpp)); - *dpp = NULL; -} - -struct dpp *dcn20_dpp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn20_dpp *dpp = - kzalloc(sizeof(struct dcn20_dpp), GFP_ATOMIC); - - if (!dpp) - return NULL; - - if (dpp2_construct(dpp, ctx, inst, - &tf_regs[inst], &tf_shift, &tf_mask)) - return &dpp->base; - - BREAK_TO_DEBUGGER(); - kfree(dpp); - return NULL; -} - -struct input_pixel_processor *dcn20_ipp_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn10_ipp *ipp = - kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC); - - if (!ipp) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dcn20_ipp_construct(ipp, ctx, inst, - &ipp_regs[inst], &ipp_shift, &ipp_mask); - return &ipp->base; -} - - -struct output_pixel_processor *dcn20_opp_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_opp *opp = - kzalloc(sizeof(struct dcn20_opp), GFP_ATOMIC); - - if (!opp) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dcn20_opp_construct(opp, ctx, inst, - &opp_regs[inst], &opp_shift, &opp_mask); - return &opp->base; -} - -struct dce_aux *dcn20_aux_engine_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC); - - if (!aux_engine) - return NULL; - - dce110_aux_engine_construct(aux_engine, ctx, inst, - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst], - &aux_mask, - &aux_shift, - ctx->dc->caps.extended_aux_timeout_support); - - return &aux_engine->base; -} -#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } - -static const struct dce_i2c_registers i2c_hw_regs[] = { - i2c_inst_regs(1), - i2c_inst_regs(2), - i2c_inst_regs(3), - i2c_inst_regs(4), - i2c_inst_regs(5), - i2c_inst_regs(6), -}; - -static const struct dce_i2c_shift i2c_shifts = { - I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT) -}; - -static const struct dce_i2c_mask i2c_masks = { - I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) -}; - -struct dce_i2c_hw *dcn20_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC); - - if (!dce_i2c_hw) - return NULL; - - dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, - &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); - - return dce_i2c_hw; -} -struct mpc *dcn20_mpc_create(struct dc_context *ctx) -{ - struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc), - GFP_ATOMIC); - - if (!mpc20) - return NULL; - - dcn20_mpc_construct(mpc20, ctx, - &mpc_regs, - &mpc_shift, - &mpc_mask, - 6); - - return &mpc20->base; -} - -struct hubbub *dcn20_hubbub_create(struct dc_context *ctx) -{ - int i; - struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), - GFP_ATOMIC); - - if (!hubbub) - return NULL; - - hubbub2_construct(hubbub, ctx, - &hubbub_reg, - &hubbub_shift, - &hubbub_mask); - - for (i = 0; i < res_cap_nv10.num_vmid; i++) { - struct dcn20_vmid *vmid = &hubbub->vmid[i]; - - vmid->ctx = ctx; - - vmid->regs = &vmid_regs[i]; - vmid->shifts = &vmid_shifts; - vmid->masks = &vmid_masks; - } - - return &hubbub->base; -} - -struct timing_generator *dcn20_timing_generator_create( - struct dc_context *ctx, - uint32_t instance) -{ - struct optc *tgn10 = - kzalloc(sizeof(struct optc), GFP_ATOMIC); - - if (!tgn10) - return NULL; - - tgn10->base.inst = instance; - tgn10->base.ctx = ctx; - - tgn10->tg_regs = &tg_regs[instance]; - tgn10->tg_shift = &tg_shift; - tgn10->tg_mask = &tg_mask; - - dcn20_timing_generator_init(tgn10); - - return &tgn10->base; -} - -static const struct encoder_feature_support link_enc_feature = { - .max_hdmi_deep_color = COLOR_DEPTH_121212, - .max_hdmi_pixel_clock = 600000, - .hdmi_ycbcr420_supported = true, - .dp_ycbcr420_supported = true, - .fec_supported = true, - .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_HBR3_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true, - .flags.bits.IS_TPS4_CAPABLE = true -}; - -struct link_encoder *dcn20_link_encoder_create( - struct dc_context *ctx, - const struct encoder_init_data *enc_init_data) -{ - struct dcn20_link_encoder *enc20 = - kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - int link_regs_id; - - if (!enc20) - return NULL; - - link_regs_id = - map_transmitter_id_to_phy_instance(enc_init_data->transmitter); - - dcn20_link_encoder_construct(enc20, - enc_init_data, - &link_enc_feature, - &link_enc_regs[link_regs_id], - &link_enc_aux_regs[enc_init_data->channel - 1], - &link_enc_hpd_regs[enc_init_data->hpd_source], - &le_shift, - &le_mask); - - return &enc20->enc10.base; -} - -static struct panel_cntl *dcn20_panel_cntl_create(const struct panel_cntl_init_data *init_data) -{ - struct dce_panel_cntl *panel_cntl = - kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); - - if (!panel_cntl) - return NULL; - - dce_panel_cntl_construct(panel_cntl, - init_data, - &panel_cntl_regs[init_data->inst], - &panel_cntl_shift, - &panel_cntl_mask); - - return &panel_cntl->base; -} - -static struct clock_source *dcn20_clock_source_create( - struct dc_context *ctx, - struct dc_bios *bios, - enum clock_source_id id, - const struct dce110_clk_src_regs *regs, - bool dp_clk_src) -{ - struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC); - - if (!clk_src) - return NULL; - - if (dcn20_clk_src_construct(clk_src, ctx, bios, id, - regs, &cs_shift, &cs_mask)) { - clk_src->base.dp_clk_src = dp_clk_src; - return &clk_src->base; - } - - kfree(clk_src); - BREAK_TO_DEBUGGER(); - return NULL; -} - -static void read_dce_straps( - struct dc_context *ctx, - struct resource_straps *straps) -{ - generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), - FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); -} - -static struct audio *dcn20_create_audio( - struct dc_context *ctx, unsigned int inst) -{ - return dce_audio_create(ctx, inst, - &audio_regs[inst], &audio_shift, &audio_mask); -} - -struct stream_encoder *dcn20_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dcn10_stream_encoder *enc1 = - kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); - - if (!enc1) - return NULL; - - if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) { - if (eng_id >= ENGINE_ID_DIGD) - eng_id++; - } - - dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, - &stream_enc_regs[eng_id], - &se_shift, &se_mask); - - return &enc1->base; -} - -static const struct dce_hwseq_registers hwseq_reg = { - HWSEQ_DCN2_REG_LIST() -}; - -static const struct dce_hwseq_shift hwseq_shift = { - HWSEQ_DCN2_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_hwseq_mask hwseq_mask = { - HWSEQ_DCN2_MASK_SH_LIST(_MASK) -}; - -struct dce_hwseq *dcn20_hwseq_create( - struct dc_context *ctx) -{ - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); - - if (hws) { - hws->ctx = ctx; - hws->regs = &hwseq_reg; - hws->shifts = &hwseq_shift; - hws->masks = &hwseq_mask; - } - return hws; -} - -static const struct resource_create_funcs res_create_funcs = { - .read_dce_straps = read_dce_straps, - .create_audio = dcn20_create_audio, - .create_stream_encoder = dcn20_stream_encoder_create, - .create_hwseq = dcn20_hwseq_create, -}; - -static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu); - -void dcn20_clock_source_destroy(struct clock_source **clk_src) -{ - kfree(TO_DCE110_CLK_SRC(*clk_src)); - *clk_src = NULL; -} - - -struct display_stream_compressor *dcn20_dsc_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_dsc *dsc = - kzalloc(sizeof(struct dcn20_dsc), GFP_ATOMIC); - - if (!dsc) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); - return &dsc->base; -} - -void dcn20_dsc_destroy(struct display_stream_compressor **dsc) -{ - kfree(container_of(*dsc, struct dcn20_dsc, base)); - *dsc = NULL; -} - - -static void dcn20_resource_destruct(struct dcn20_resource_pool *pool) -{ - unsigned int i; - - for (i = 0; i < pool->base.stream_enc_count; i++) { - if (pool->base.stream_enc[i] != NULL) { - kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); - pool->base.stream_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_dsc; i++) { - if (pool->base.dscs[i] != NULL) - dcn20_dsc_destroy(&pool->base.dscs[i]); - } - - if (pool->base.mpc != NULL) { - kfree(TO_DCN20_MPC(pool->base.mpc)); - pool->base.mpc = NULL; - } - if (pool->base.hubbub != NULL) { - kfree(pool->base.hubbub); - pool->base.hubbub = NULL; - } - for (i = 0; i < pool->base.pipe_count; i++) { - if (pool->base.dpps[i] != NULL) - dcn20_dpp_destroy(&pool->base.dpps[i]); - - if (pool->base.ipps[i] != NULL) - pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); - - if (pool->base.hubps[i] != NULL) { - kfree(TO_DCN20_HUBP(pool->base.hubps[i])); - pool->base.hubps[i] = NULL; - } - - if (pool->base.irqs != NULL) { - dal_irq_service_destroy(&pool->base.irqs); - } - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - if (pool->base.engines[i] != NULL) - dce110_engine_destroy(&pool->base.engines[i]); - if (pool->base.hw_i2cs[i] != NULL) { - kfree(pool->base.hw_i2cs[i]); - pool->base.hw_i2cs[i] = NULL; - } - if (pool->base.sw_i2cs[i] != NULL) { - kfree(pool->base.sw_i2cs[i]); - pool->base.sw_i2cs[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - if (pool->base.opps[i] != NULL) - pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - if (pool->base.timing_generators[i] != NULL) { - kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); - pool->base.timing_generators[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_dwb; i++) { - if (pool->base.dwbc[i] != NULL) { - kfree(TO_DCN20_DWBC(pool->base.dwbc[i])); - pool->base.dwbc[i] = NULL; - } - if (pool->base.mcif_wb[i] != NULL) { - kfree(TO_DCN20_MMHUBBUB(pool->base.mcif_wb[i])); - pool->base.mcif_wb[i] = NULL; - } - } - - for (i = 0; i < pool->base.audio_count; i++) { - if (pool->base.audios[i]) - dce_aud_destroy(&pool->base.audios[i]); - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] != NULL) { - dcn20_clock_source_destroy(&pool->base.clock_sources[i]); - pool->base.clock_sources[i] = NULL; - } - } - - if (pool->base.dp_clock_source != NULL) { - dcn20_clock_source_destroy(&pool->base.dp_clock_source); - pool->base.dp_clock_source = NULL; - } - - - if (pool->base.abm != NULL) - dce_abm_destroy(&pool->base.abm); - - if (pool->base.dmcu != NULL) - dce_dmcu_destroy(&pool->base.dmcu); - - if (pool->base.dccg != NULL) - dcn_dccg_destroy(&pool->base.dccg); - - if (pool->base.pp_smu != NULL) - dcn20_pp_smu_destroy(&pool->base.pp_smu); - - if (pool->base.oem_device != NULL) { - struct dc *dc = pool->base.oem_device->ctx->dc; - - dc->link_srv->destroy_ddc_service(&pool->base.oem_device); - } -} - -struct hubp *dcn20_hubp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn20_hubp *hubp2 = - kzalloc(sizeof(struct dcn20_hubp), GFP_ATOMIC); - - if (!hubp2) - return NULL; - - if (hubp2_construct(hubp2, ctx, inst, - &hubp_regs[inst], &hubp_shift, &hubp_mask)) - return &hubp2->base; - - BREAK_TO_DEBUGGER(); - kfree(hubp2); - return NULL; -} - -static void get_pixel_clock_parameters( - struct pipe_ctx *pipe_ctx, - struct pixel_clk_params *pixel_clk_params) -{ - const struct dc_stream_state *stream = pipe_ctx->stream; - struct pipe_ctx *odm_pipe; - int opp_cnt = 1; - struct dc_link *link = stream->link; - struct link_encoder *link_enc = NULL; - struct dc *dc = pipe_ctx->stream->ctx->dc; - struct dce_hwseq *hws = dc->hwseq; - - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) - opp_cnt++; - - pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; - - link_enc = link_enc_cfg_get_link_enc(link); - if (link_enc) - pixel_clk_params->encoder_object_id = link_enc->id; - - pixel_clk_params->signal_type = pipe_ctx->stream->signal; - pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; - /* TODO: un-hardcode*/ - /* TODO - DP2.0 HW: calculate requested_sym_clk for UHBR rates */ - pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * - LINK_RATE_REF_FREQ_IN_KHZ; - pixel_clk_params->flags.ENABLE_SS = 0; - pixel_clk_params->color_depth = - stream->timing.display_color_depth; - pixel_clk_params->flags.DISPLAY_BLANKED = 1; - pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding; - - if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) - pixel_clk_params->color_depth = COLOR_DEPTH_888; - - if (opp_cnt == 4) - pixel_clk_params->requested_pix_clk_100hz /= 4; - else if (optc2_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2) - pixel_clk_params->requested_pix_clk_100hz /= 2; - else if (hws->funcs.is_dp_dig_pixel_rate_div_policy) { - if (hws->funcs.is_dp_dig_pixel_rate_div_policy(pipe_ctx)) - pixel_clk_params->requested_pix_clk_100hz /= 2; - } - - if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) - pixel_clk_params->requested_pix_clk_100hz *= 2; - -} - -static void build_clamping_params(struct dc_stream_state *stream) -{ - stream->clamping.clamping_level = CLAMPING_FULL_RANGE; - stream->clamping.c_depth = stream->timing.display_color_depth; - stream->clamping.pixel_encoding = stream->timing.pixel_encoding; -} - -void dcn20_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx) -{ - get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); - pipe_ctx->clock_source->funcs->get_pix_clk_dividers( - pipe_ctx->clock_source, - &pipe_ctx->stream_res.pix_clk_params, - &pipe_ctx->pll_settings); -} - -static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx) -{ - - dcn20_build_pipe_pix_clk_params(pipe_ctx); - - pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; - - resource_build_bit_depth_reduction_params(pipe_ctx->stream, - &pipe_ctx->stream->bit_depth_params); - build_clamping_params(pipe_ctx->stream); - - return DC_OK; -} - -enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) -{ - enum dc_status status = DC_OK; - struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); - - if (!pipe_ctx) - return DC_ERROR_UNEXPECTED; - - - status = build_pipe_hw_param(pipe_ctx); - - return status; -} - - -void dcn20_acquire_dsc(const struct dc *dc, - struct resource_context *res_ctx, - struct display_stream_compressor **dsc, - int pipe_idx) -{ - int i; - const struct resource_pool *pool = dc->res_pool; - struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc; - - ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */ - *dsc = NULL; - - /* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */ - if (pool->res_cap->num_dsc == pool->res_cap->num_opp) { - *dsc = pool->dscs[pipe_idx]; - res_ctx->is_dsc_acquired[pipe_idx] = true; - return; - } - - /* Return old DSC to avoid the need for re-programming */ - if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) { - *dsc = dsc_old; - res_ctx->is_dsc_acquired[dsc_old->inst] = true; - return ; - } - - /* Find first free DSC */ - for (i = 0; i < pool->res_cap->num_dsc; i++) - if (!res_ctx->is_dsc_acquired[i]) { - *dsc = pool->dscs[i]; - res_ctx->is_dsc_acquired[i] = true; - break; - } -} - -void dcn20_release_dsc(struct resource_context *res_ctx, - const struct resource_pool *pool, - struct display_stream_compressor **dsc) -{ - int i; - - for (i = 0; i < pool->res_cap->num_dsc; i++) - if (pool->dscs[i] == *dsc) { - res_ctx->is_dsc_acquired[i] = false; - *dsc = NULL; - break; - } -} - - - -enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, - struct dc_state *dc_ctx, - struct dc_stream_state *dc_stream) -{ - enum dc_status result = DC_OK; - int i; - - /* Get a DSC if required and available */ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &dc_ctx->res_ctx.pipe_ctx[i]; - - if (pipe_ctx->top_pipe) - continue; - - if (pipe_ctx->stream != dc_stream) - continue; - - if (pipe_ctx->stream_res.dsc) - continue; - - dcn20_acquire_dsc(dc, &dc_ctx->res_ctx, &pipe_ctx->stream_res.dsc, i); - - /* The number of DSCs can be less than the number of pipes */ - if (!pipe_ctx->stream_res.dsc) { - result = DC_NO_DSC_RESOURCE; - } - - break; - } - - return result; -} - - -static enum dc_status remove_dsc_from_stream_resource(struct dc *dc, - struct dc_state *new_ctx, - struct dc_stream_state *dc_stream) -{ - struct pipe_ctx *pipe_ctx = NULL; - int i; - - for (i = 0; i < MAX_PIPES; i++) { - if (new_ctx->res_ctx.pipe_ctx[i].stream == dc_stream && !new_ctx->res_ctx.pipe_ctx[i].top_pipe) { - pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i]; - - if (pipe_ctx->stream_res.dsc) - dcn20_release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc); - } - } - - if (!pipe_ctx) - return DC_ERROR_UNEXPECTED; - else - return DC_OK; -} - - -enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) -{ - enum dc_status result = DC_ERROR_UNEXPECTED; - - result = resource_map_pool_resources(dc, new_ctx, dc_stream); - - if (result == DC_OK) - result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream); - - /* Get a DSC if required and available */ - if (result == DC_OK && dc_stream->timing.flags.DSC) - result = dcn20_add_dsc_to_stream_resource(dc, new_ctx, dc_stream); - - if (result == DC_OK) - result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream); - - return result; -} - - -enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) -{ - enum dc_status result = DC_OK; - - result = remove_dsc_from_stream_resource(dc, new_ctx, dc_stream); - - return result; -} - -/** - * dcn20_split_stream_for_odm - Check if stream can be splited for ODM - * - * @dc: DC object with resource pool info required for pipe split - * @res_ctx: Persistent state of resources - * @prev_odm_pipe: Reference to the previous ODM pipe - * @next_odm_pipe: Reference to the next ODM pipe - * - * This function takes a logically active pipe and a logically free pipe and - * halves all the scaling parameters that need to be halved while populating - * the free pipe with the required resources and configuring the next/previous - * ODM pipe pointers. - * - * Return: - * Return true if split stream for ODM is possible, otherwise, return false. - */ -bool dcn20_split_stream_for_odm( - const struct dc *dc, - struct resource_context *res_ctx, - struct pipe_ctx *prev_odm_pipe, - struct pipe_ctx *next_odm_pipe) -{ - int pipe_idx = next_odm_pipe->pipe_idx; - const struct resource_pool *pool = dc->res_pool; - - *next_odm_pipe = *prev_odm_pipe; - - next_odm_pipe->pipe_idx = pipe_idx; - next_odm_pipe->plane_res.mi = pool->mis[next_odm_pipe->pipe_idx]; - next_odm_pipe->plane_res.hubp = pool->hubps[next_odm_pipe->pipe_idx]; - next_odm_pipe->plane_res.ipp = pool->ipps[next_odm_pipe->pipe_idx]; - next_odm_pipe->plane_res.xfm = pool->transforms[next_odm_pipe->pipe_idx]; - next_odm_pipe->plane_res.dpp = pool->dpps[next_odm_pipe->pipe_idx]; - next_odm_pipe->plane_res.mpcc_inst = pool->dpps[next_odm_pipe->pipe_idx]->inst; - next_odm_pipe->stream_res.dsc = NULL; - if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) { - next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe; - next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe; - } - if (prev_odm_pipe->top_pipe && prev_odm_pipe->top_pipe->next_odm_pipe) { - prev_odm_pipe->top_pipe->next_odm_pipe->bottom_pipe = next_odm_pipe; - next_odm_pipe->top_pipe = prev_odm_pipe->top_pipe->next_odm_pipe; - } - if (prev_odm_pipe->bottom_pipe && prev_odm_pipe->bottom_pipe->next_odm_pipe) { - prev_odm_pipe->bottom_pipe->next_odm_pipe->top_pipe = next_odm_pipe; - next_odm_pipe->bottom_pipe = prev_odm_pipe->bottom_pipe->next_odm_pipe; - } - prev_odm_pipe->next_odm_pipe = next_odm_pipe; - next_odm_pipe->prev_odm_pipe = prev_odm_pipe; - - if (prev_odm_pipe->plane_state) { - struct scaler_data *sd = &prev_odm_pipe->plane_res.scl_data; - int new_width; - - /* HACTIVE halved for odm combine */ - sd->h_active /= 2; - /* Calculate new vp and recout for left pipe */ - /* Need at least 16 pixels width per side */ - if (sd->recout.x + 16 >= sd->h_active) - return false; - new_width = sd->h_active - sd->recout.x; - sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int( - sd->ratios.horz, sd->recout.width - new_width)); - sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int( - sd->ratios.horz_c, sd->recout.width - new_width)); - sd->recout.width = new_width; - - /* Calculate new vp and recout for right pipe */ - sd = &next_odm_pipe->plane_res.scl_data; - /* HACTIVE halved for odm combine */ - sd->h_active /= 2; - /* Need at least 16 pixels width per side */ - if (new_width <= 16) - return false; - new_width = sd->recout.width + sd->recout.x - sd->h_active; - sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int( - sd->ratios.horz, sd->recout.width - new_width)); - sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int( - sd->ratios.horz_c, sd->recout.width - new_width)); - sd->recout.width = new_width; - sd->viewport.x += dc_fixpt_floor(dc_fixpt_mul_int( - sd->ratios.horz, sd->h_active - sd->recout.x)); - sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int( - sd->ratios.horz_c, sd->h_active - sd->recout.x)); - sd->recout.x = 0; - } - if (!next_odm_pipe->top_pipe) - next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx]; - else - next_odm_pipe->stream_res.opp = next_odm_pipe->top_pipe->stream_res.opp; - if (next_odm_pipe->stream->timing.flags.DSC == 1 && !next_odm_pipe->top_pipe) { - dcn20_acquire_dsc(dc, res_ctx, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx); - ASSERT(next_odm_pipe->stream_res.dsc); - if (next_odm_pipe->stream_res.dsc == NULL) - return false; - } - - return true; -} - -void dcn20_split_stream_for_mpc( - struct resource_context *res_ctx, - const struct resource_pool *pool, - struct pipe_ctx *primary_pipe, - struct pipe_ctx *secondary_pipe) -{ - int pipe_idx = secondary_pipe->pipe_idx; - struct pipe_ctx *sec_bot_pipe = secondary_pipe->bottom_pipe; - - *secondary_pipe = *primary_pipe; - secondary_pipe->bottom_pipe = sec_bot_pipe; - - secondary_pipe->pipe_idx = pipe_idx; - secondary_pipe->plane_res.mi = pool->mis[secondary_pipe->pipe_idx]; - secondary_pipe->plane_res.hubp = pool->hubps[secondary_pipe->pipe_idx]; - secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx]; - secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx]; - secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx]; - secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst; - secondary_pipe->stream_res.dsc = NULL; - if (primary_pipe->bottom_pipe && primary_pipe->bottom_pipe != secondary_pipe) { - ASSERT(!secondary_pipe->bottom_pipe); - secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe; - secondary_pipe->bottom_pipe->top_pipe = secondary_pipe; - } - primary_pipe->bottom_pipe = secondary_pipe; - secondary_pipe->top_pipe = primary_pipe; - - ASSERT(primary_pipe->plane_state); -} - -unsigned int dcn20_calc_max_scaled_time( - unsigned int time_per_pixel, - enum mmhubbub_wbif_mode mode, - unsigned int urgent_watermark) -{ - unsigned int time_per_byte = 0; - unsigned int total_y_free_entry = 0x200; /* two memory piece for luma */ - unsigned int total_c_free_entry = 0x140; /* two memory piece for chroma */ - unsigned int small_free_entry, max_free_entry; - unsigned int buf_lh_capability; - unsigned int max_scaled_time; - - if (mode == PACKED_444) /* packed mode */ - time_per_byte = time_per_pixel/4; - else if (mode == PLANAR_420_8BPC) - time_per_byte = time_per_pixel; - else if (mode == PLANAR_420_10BPC) /* p010 */ - time_per_byte = time_per_pixel * 819/1024; - - if (time_per_byte == 0) - time_per_byte = 1; - - small_free_entry = (total_y_free_entry > total_c_free_entry) ? total_c_free_entry : total_y_free_entry; - max_free_entry = (mode == PACKED_444) ? total_y_free_entry + total_c_free_entry : small_free_entry; - buf_lh_capability = max_free_entry*time_per_byte*32/16; /* there is 4bit fraction */ - max_scaled_time = buf_lh_capability - urgent_watermark; - return max_scaled_time; -} - -void dcn20_set_mcif_arb_params( - struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int pipe_cnt) -{ - enum mmhubbub_wbif_mode wbif_mode; - struct mcif_arb_params *wb_arb_params; - int i, j, dwb_pipe; - - /* Writeback MCIF_WB arbitration parameters */ - dwb_pipe = 0; - for (i = 0; i < dc->res_pool->pipe_count; i++) { - - if (!context->res_ctx.pipe_ctx[i].stream) - continue; - - for (j = 0; j < MAX_DWB_PIPES; j++) { - if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].wb_enabled == false) - continue; - - //wb_arb_params = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j].mcif_arb_params; - wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[dwb_pipe]; - - if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.out_format == dwb_scaler_mode_yuv420) { - if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC) - wbif_mode = PLANAR_420_8BPC; - else - wbif_mode = PLANAR_420_10BPC; - } else - wbif_mode = PACKED_444; - - DC_FP_START(); - dcn20_fpu_set_wb_arb_params(wb_arb_params, context, pipes, pipe_cnt, i); - DC_FP_END(); - - wb_arb_params->slice_lines = 32; - wb_arb_params->arbitration_slice = 2; - wb_arb_params->max_scaled_time = dcn20_calc_max_scaled_time(wb_arb_params->time_per_pixel, - wbif_mode, - wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */ - - dwb_pipe++; - - if (dwb_pipe >= MAX_DWB_PIPES) - return; - } - if (dwb_pipe >= MAX_DWB_PIPES) - return; - } -} - -bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) -{ - int i; - - /* Validate DSC config, dsc count validation is already done */ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i]; - struct dc_stream_state *stream = pipe_ctx->stream; - struct dsc_config dsc_cfg; - struct pipe_ctx *odm_pipe; - int opp_cnt = 1; - - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) - opp_cnt++; - - /* Only need to validate top pipe */ - if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC) - continue; - - dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left - + stream->timing.h_border_right) / opp_cnt; - dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top - + stream->timing.v_border_bottom; - dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; - dsc_cfg.color_depth = stream->timing.display_color_depth; - dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; - dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; - dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; - - if (!pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg)) - return false; - } - return true; -} - -struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, - struct resource_context *res_ctx, - const struct resource_pool *pool, - const struct pipe_ctx *primary_pipe) -{ - struct pipe_ctx *secondary_pipe = NULL; - - if (dc && primary_pipe) { - int j; - int preferred_pipe_idx = 0; - - /* first check the prev dc state: - * if this primary pipe has a bottom pipe in prev. state - * and if the bottom pipe is still available (which it should be), - * pick that pipe as secondary - * Same logic applies for ODM pipes - */ - if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) { - preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx; - if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { - secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; - secondary_pipe->pipe_idx = preferred_pipe_idx; - } - } - if (secondary_pipe == NULL && - dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) { - preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx; - if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { - secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; - secondary_pipe->pipe_idx = preferred_pipe_idx; - } - } - - /* - * if this primary pipe does not have a bottom pipe in prev. state - * start backward and find a pipe that did not used to be a bottom pipe in - * prev. dc state. This way we make sure we keep the same assignment as - * last state and will not have to reprogram every pipe - */ - if (secondary_pipe == NULL) { - for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) { - if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL - && dc->current_state->res_ctx.pipe_ctx[j].prev_odm_pipe == NULL) { - preferred_pipe_idx = j; - - if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { - secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; - secondary_pipe->pipe_idx = preferred_pipe_idx; - break; - } - } - } - } - /* - * We should never hit this assert unless assignments are shuffled around - * if this happens we will prob. hit a vsync tdr - */ - ASSERT(secondary_pipe); - /* - * search backwards for the second pipe to keep pipe - * assignment more consistent - */ - if (secondary_pipe == NULL) { - for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) { - preferred_pipe_idx = j; - - if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { - secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; - secondary_pipe->pipe_idx = preferred_pipe_idx; - break; - } - } - } - } - - return secondary_pipe; -} - -void dcn20_merge_pipes_for_validate( - struct dc *dc, - struct dc_state *context) -{ - int i; - - /* merge previously split odm pipes since mode support needs to make the decision */ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *odm_pipe = pipe->next_odm_pipe; - - if (pipe->prev_odm_pipe) - continue; - - pipe->next_odm_pipe = NULL; - while (odm_pipe) { - struct pipe_ctx *next_odm_pipe = odm_pipe->next_odm_pipe; - - odm_pipe->plane_state = NULL; - odm_pipe->stream = NULL; - odm_pipe->top_pipe = NULL; - odm_pipe->bottom_pipe = NULL; - odm_pipe->prev_odm_pipe = NULL; - odm_pipe->next_odm_pipe = NULL; - if (odm_pipe->stream_res.dsc) - dcn20_release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc); - /* Clear plane_res and stream_res */ - memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res)); - memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res)); - odm_pipe = next_odm_pipe; - } - if (pipe->plane_state) - resource_build_scaling_params(pipe); - } - - /* merge previously mpc split pipes since mode support needs to make the decision */ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe; - - if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) - continue; - - pipe->bottom_pipe = hsplit_pipe->bottom_pipe; - if (hsplit_pipe->bottom_pipe) - hsplit_pipe->bottom_pipe->top_pipe = pipe; - hsplit_pipe->plane_state = NULL; - hsplit_pipe->stream = NULL; - hsplit_pipe->top_pipe = NULL; - hsplit_pipe->bottom_pipe = NULL; - - /* Clear plane_res and stream_res */ - memset(&hsplit_pipe->plane_res, 0, sizeof(hsplit_pipe->plane_res)); - memset(&hsplit_pipe->stream_res, 0, sizeof(hsplit_pipe->stream_res)); - if (pipe->plane_state) - resource_build_scaling_params(pipe); - } -} - -int dcn20_validate_apply_pipe_split_flags( - struct dc *dc, - struct dc_state *context, - int vlevel, - int *split, - bool *merge) -{ - int i, pipe_idx, vlevel_split; - int plane_count = 0; - bool force_split = false; - bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID; - struct vba_vars_st *v = &context->bw_ctx.dml.vba; - int max_mpc_comb = v->maxMpcComb; - - if (context->stream_count > 1) { - if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) - avoid_split = true; - } else if (dc->debug.force_single_disp_pipe_split) - force_split = true; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - - /** - * Workaround for avoiding pipe-split in cases where we'd split - * planes that are too small, resulting in splits that aren't - * valid for the scaler. - */ - if (pipe->plane_state && - (pipe->plane_state->dst_rect.width <= 16 || - pipe->plane_state->dst_rect.height <= 16 || - pipe->plane_state->src_rect.width <= 16 || - pipe->plane_state->src_rect.height <= 16)) - avoid_split = true; - - /* TODO: fix dc bugs and remove this split threshold thing */ - if (pipe->stream && !pipe->prev_odm_pipe && - (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state)) - ++plane_count; - } - if (plane_count > dc->res_pool->pipe_count / 2) - avoid_split = true; - - /* W/A: Mode timing with borders may not work well with pipe split, avoid for this corner case */ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - struct dc_crtc_timing timing; - - if (!pipe->stream) - continue; - else { - timing = pipe->stream->timing; - if (timing.h_border_left + timing.h_border_right - + timing.v_border_top + timing.v_border_bottom > 0) { - avoid_split = true; - break; - } - } - } - - /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */ - if (avoid_split) { - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { - if (!context->res_ctx.pipe_ctx[i].stream) - continue; - - for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++) - if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 && - v->ModeSupport[vlevel][0]) - break; - /* Impossible to not split this pipe */ - if (vlevel > context->bw_ctx.dml.soc.num_states) - vlevel = vlevel_split; - else - max_mpc_comb = 0; - pipe_idx++; - } - v->maxMpcComb = max_mpc_comb; - } - - /* Split loop sets which pipe should be split based on dml outputs and dc flags */ - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - int pipe_plane = v->pipe_plane[pipe_idx]; - bool split4mpc = context->stream_count == 1 && plane_count == 1 - && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4; - - if (!context->res_ctx.pipe_ctx[i].stream) - continue; - - if (split4mpc || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 4) - split[i] = 4; - else if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 2) - split[i] = 2; - - if ((pipe->stream->view_format == - VIEW_3D_FORMAT_SIDE_BY_SIDE || - pipe->stream->view_format == - VIEW_3D_FORMAT_TOP_AND_BOTTOM) && - (pipe->stream->timing.timing_3d_format == - TIMING_3D_FORMAT_TOP_AND_BOTTOM || - pipe->stream->timing.timing_3d_format == - TIMING_3D_FORMAT_SIDE_BY_SIDE)) - split[i] = 2; - if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) { - split[i] = 2; - v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1; - } - if (dc->debug.force_odm_combine_4to1 & (1 << pipe->stream_res.tg->inst)) { - split[i] = 4; - v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_4to1; - } - /*420 format workaround*/ - if (pipe->stream->timing.h_addressable > 7680 && - pipe->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) { - split[i] = 4; - } - v->ODMCombineEnabled[pipe_plane] = - v->ODMCombineEnablePerState[vlevel][pipe_plane]; - - if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) { - if (resource_get_mpc_slice_count(pipe) == 2) { - /*If need split for mpc but 2 way split already*/ - if (split[i] == 4) - split[i] = 2; /* 2 -> 4 MPC */ - else if (split[i] == 2) - split[i] = 0; /* 2 -> 2 MPC */ - else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) - merge[i] = true; /* 2 -> 1 MPC */ - } else if (resource_get_mpc_slice_count(pipe) == 4) { - /*If need split for mpc but 4 way split already*/ - if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe) - || !pipe->bottom_pipe)) { - merge[i] = true; /* 4 -> 2 MPC */ - } else if (split[i] == 0 && pipe->top_pipe && - pipe->top_pipe->plane_state == pipe->plane_state) - merge[i] = true; /* 4 -> 1 MPC */ - split[i] = 0; - } else if (resource_get_odm_slice_count(pipe) > 1) { - /* ODM -> MPC transition */ - if (pipe->prev_odm_pipe) { - split[i] = 0; - merge[i] = true; - } - } - } else { - if (resource_get_odm_slice_count(pipe) == 2) { - /*If need split for odm but 2 way split already*/ - if (split[i] == 4) - split[i] = 2; /* 2 -> 4 ODM */ - else if (split[i] == 2) - split[i] = 0; /* 2 -> 2 ODM */ - else if (pipe->prev_odm_pipe) { - ASSERT(0); /* NOT expected yet */ - merge[i] = true; /* exit ODM */ - } - } else if (resource_get_odm_slice_count(pipe) == 4) { - /*If need split for odm but 4 way split already*/ - if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe) - || !pipe->next_odm_pipe)) { - merge[i] = true; /* 4 -> 2 ODM */ - } else if (split[i] == 0 && pipe->prev_odm_pipe) { - ASSERT(0); /* NOT expected yet */ - merge[i] = true; /* exit ODM */ - } - split[i] = 0; - } else if (resource_get_mpc_slice_count(pipe) > 1) { - /* MPC -> ODM transition */ - ASSERT(0); /* NOT expected yet */ - if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { - split[i] = 0; - merge[i] = true; - } - } - } - - /* Adjust dppclk when split is forced, do not bother with dispclk */ - if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1) { - DC_FP_START(); - dcn20_fpu_adjust_dppclk(v, vlevel, max_mpc_comb, pipe_idx, false); - DC_FP_END(); - } - pipe_idx++; - } - - return vlevel; -} - -bool dcn20_fast_validate_bw( - struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int *pipe_cnt_out, - int *pipe_split_from, - int *vlevel_out, - bool fast_validate) -{ - bool out = false; - int split[MAX_PIPES] = { 0 }; - int pipe_cnt, i, pipe_idx, vlevel; - - ASSERT(pipes); - if (!pipes) - return false; - - dcn20_merge_pipes_for_validate(dc, context); - - DC_FP_START(); - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); - DC_FP_END(); - - *pipe_cnt_out = pipe_cnt; - - if (!pipe_cnt) { - out = true; - goto validate_out; - } - - vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); - - if (vlevel > context->bw_ctx.dml.soc.num_states) - goto validate_fail; - - vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL); - - /*initialize pipe_just_split_from to invalid idx*/ - for (i = 0; i < MAX_PIPES; i++) - pipe_split_from[i] = -1; - - for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe; - - if (!pipe->stream || pipe_split_from[i] >= 0) - continue; - - pipe_idx++; - - if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { - hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); - ASSERT(hsplit_pipe); - if (!dcn20_split_stream_for_odm( - dc, &context->res_ctx, - pipe, hsplit_pipe)) - goto validate_fail; - pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx; - dcn20_build_mapped_resource(dc, context, pipe->stream); - } - - if (!pipe->plane_state) - continue; - /* Skip 2nd half of already split pipe */ - if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state) - continue; - - /* We do not support mpo + odm at the moment */ - if (hsplit_pipe && hsplit_pipe->plane_state != pipe->plane_state - && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) - goto validate_fail; - - if (split[i] == 2) { - if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) { - /* pipe not split previously needs split */ - hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); - ASSERT(hsplit_pipe); - if (!hsplit_pipe) { - DC_FP_START(); - dcn20_fpu_adjust_dppclk(&context->bw_ctx.dml.vba, vlevel, context->bw_ctx.dml.vba.maxMpcComb, pipe_idx, true); - DC_FP_END(); - continue; - } - if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { - if (!dcn20_split_stream_for_odm( - dc, &context->res_ctx, - pipe, hsplit_pipe)) - goto validate_fail; - dcn20_build_mapped_resource(dc, context, pipe->stream); - } else { - dcn20_split_stream_for_mpc( - &context->res_ctx, dc->res_pool, - pipe, hsplit_pipe); - resource_build_scaling_params(pipe); - resource_build_scaling_params(hsplit_pipe); - } - pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx; - } - } else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) { - /* merge should already have been done */ - ASSERT(0); - } - } - /* Actual dsc count per stream dsc validation*/ - if (!dcn20_validate_dsc(dc, context)) { - context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] = - DML_FAIL_DSC_VALIDATION_FAILURE; - goto validate_fail; - } - - *vlevel_out = vlevel; - - out = true; - goto validate_out; - -validate_fail: - out = false; - -validate_out: - return out; -} - -bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) -{ - bool voltage_supported; - display_e2e_pipe_params_st *pipes; - - pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL); - if (!pipes) - return false; - - DC_FP_START(); - voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate, pipes); - DC_FP_END(); - - kfree(pipes); - return voltage_supported; -} - -struct pipe_ctx *dcn20_acquire_free_pipe_for_layer( - const struct dc_state *cur_ctx, - struct dc_state *new_ctx, - const struct resource_pool *pool, - const struct pipe_ctx *opp_head) -{ - struct resource_context *res_ctx = &new_ctx->res_ctx; - struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(res_ctx, opp_head->stream); - struct pipe_ctx *sec_dpp_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, otg_master); - - ASSERT(otg_master); - - if (!sec_dpp_pipe) - return NULL; - - sec_dpp_pipe->stream = opp_head->stream; - sec_dpp_pipe->stream_res.tg = opp_head->stream_res.tg; - sec_dpp_pipe->stream_res.opp = opp_head->stream_res.opp; - - sec_dpp_pipe->plane_res.hubp = pool->hubps[sec_dpp_pipe->pipe_idx]; - sec_dpp_pipe->plane_res.ipp = pool->ipps[sec_dpp_pipe->pipe_idx]; - sec_dpp_pipe->plane_res.dpp = pool->dpps[sec_dpp_pipe->pipe_idx]; - sec_dpp_pipe->plane_res.mpcc_inst = pool->dpps[sec_dpp_pipe->pipe_idx]->inst; - - return sec_dpp_pipe; -} - -bool dcn20_get_dcc_compression_cap(const struct dc *dc, - const struct dc_dcc_surface_param *input, - struct dc_surface_dcc_cap *output) -{ - return dc->res_pool->hubbub->funcs->get_dcc_compression_cap( - dc->res_pool->hubbub, - input, - output); -} - -static void dcn20_destroy_resource_pool(struct resource_pool **pool) -{ - struct dcn20_resource_pool *dcn20_pool = TO_DCN20_RES_POOL(*pool); - - dcn20_resource_destruct(dcn20_pool); - kfree(dcn20_pool); - *pool = NULL; -} - - -static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn20_get_dcc_compression_cap -}; - - -enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state) -{ - enum surface_pixel_format surf_pix_format = plane_state->format; - unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format); - - plane_state->tiling_info.gfx9.swizzle = DC_SW_64KB_S; - if (bpp == 64) - plane_state->tiling_info.gfx9.swizzle = DC_SW_64KB_D; - - return DC_OK; -} - -void dcn20_release_pipe(struct dc_state *context, - struct pipe_ctx *pipe, - const struct resource_pool *pool) -{ - if (resource_is_pipe_type(pipe, OPP_HEAD) && pipe->stream_res.dsc) - dcn20_release_dsc(&context->res_ctx, pool, &pipe->stream_res.dsc); - memset(pipe, 0, sizeof(*pipe)); -} - -static const struct resource_funcs dcn20_res_pool_funcs = { - .destroy = dcn20_destroy_resource_pool, - .link_enc_create = dcn20_link_encoder_create, - .panel_cntl_create = dcn20_panel_cntl_create, - .validate_bandwidth = dcn20_validate_bandwidth, - .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, - .release_pipe = dcn20_release_pipe, - .add_stream_to_ctx = dcn20_add_stream_to_ctx, - .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, - .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, - .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context, - .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, - .set_mcif_arb_params = dcn20_set_mcif_arb_params, - .populate_dml_pipes = dcn20_populate_dml_pipes_from_context, - .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link -}; - -bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t pipe_count = pool->res_cap->num_dwb; - - for (i = 0; i < pipe_count; i++) { - struct dcn20_dwbc *dwbc20 = kzalloc(sizeof(struct dcn20_dwbc), - GFP_KERNEL); - - if (!dwbc20) { - dm_error("DC: failed to create dwbc20!\n"); - return false; - } - dcn20_dwbc_construct(dwbc20, ctx, - &dwbc20_regs[i], - &dwbc20_shift, - &dwbc20_mask, - i); - pool->dwbc[i] = &dwbc20->base; - } - return true; -} - -bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t pipe_count = pool->res_cap->num_dwb; - - ASSERT(pipe_count > 0); - - for (i = 0; i < pipe_count; i++) { - struct dcn20_mmhubbub *mcif_wb20 = kzalloc(sizeof(struct dcn20_mmhubbub), - GFP_KERNEL); - - if (!mcif_wb20) { - dm_error("DC: failed to create mcif_wb20!\n"); - return false; - } - - dcn20_mmhubbub_construct(mcif_wb20, ctx, - &mcif_wb20_regs[i], - &mcif_wb20_shift, - &mcif_wb20_mask, - i); - - pool->mcif_wb[i] = &mcif_wb20->base; - } - return true; -} - -static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx) -{ - struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_ATOMIC); - - if (!pp_smu) - return pp_smu; - - dm_pp_get_funcs(ctx, pp_smu); - - if (pp_smu->ctx.ver != PP_SMU_VER_NV) - pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs)); - - return pp_smu; -} - -static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu) -{ - if (pp_smu && *pp_smu) { - kfree(*pp_smu); - *pp_smu = NULL; - } -} - -static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb( - uint32_t hw_internal_rev) -{ - if (ASICREV_IS_NAVI14_M(hw_internal_rev)) - return &dcn2_0_nv14_soc; - - if (ASICREV_IS_NAVI12_P(hw_internal_rev)) - return &dcn2_0_nv12_soc; - - return &dcn2_0_soc; -} - -static struct _vcs_dpi_ip_params_st *get_asic_rev_ip_params( - uint32_t hw_internal_rev) -{ - /* NV14 */ - if (ASICREV_IS_NAVI14_M(hw_internal_rev)) - return &dcn2_0_nv14_ip; - - /* NV12 and NV10 */ - return &dcn2_0_ip; -} - -static enum dml_project get_dml_project_version(uint32_t hw_internal_rev) -{ - return DML_PROJECT_NAVI10v2; -} - -static bool init_soc_bounding_box(struct dc *dc, - struct dcn20_resource_pool *pool) -{ - struct _vcs_dpi_soc_bounding_box_st *loaded_bb = - get_asic_rev_soc_bb(dc->ctx->asic_id.hw_internal_rev); - struct _vcs_dpi_ip_params_st *loaded_ip = - get_asic_rev_ip_params(dc->ctx->asic_id.hw_internal_rev); - - DC_LOGGER_INIT(dc->ctx->logger); - - if (pool->base.pp_smu) { - struct pp_smu_nv_clock_table max_clocks = {0}; - unsigned int uclk_states[8] = {0}; - unsigned int num_states = 0; - enum pp_smu_status status; - bool clock_limits_available = false; - bool uclk_states_available = false; - - if (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states) { - status = (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states) - (&pool->base.pp_smu->nv_funcs.pp_smu, uclk_states, &num_states); - - uclk_states_available = (status == PP_SMU_RESULT_OK); - } - - if (pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks) { - status = (*pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks) - (&pool->base.pp_smu->nv_funcs.pp_smu, &max_clocks); - /* SMU cannot set DCF clock to anything equal to or higher than SOC clock - */ - if (max_clocks.dcfClockInKhz >= max_clocks.socClockInKhz) - max_clocks.dcfClockInKhz = max_clocks.socClockInKhz - 1000; - clock_limits_available = (status == PP_SMU_RESULT_OK); - } - - if (clock_limits_available && uclk_states_available && num_states) { - DC_FP_START(); - dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states); - DC_FP_END(); - } else if (clock_limits_available) { - DC_FP_START(); - dcn20_cap_soc_clocks(loaded_bb, max_clocks); - DC_FP_END(); - } - } - - loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator; - loaded_ip->max_num_dpp = pool->base.pipe_count; - DC_FP_START(); - dcn20_patch_bounding_box(dc, loaded_bb); - DC_FP_END(); - return true; -} - -static bool dcn20_resource_construct( - uint8_t num_virtual_links, - struct dc *dc, - struct dcn20_resource_pool *pool) -{ - int i; - struct dc_context *ctx = dc->ctx; - struct irq_service_init_data init_data; - struct ddc_service_init_data ddc_init_data = {0}; - struct _vcs_dpi_soc_bounding_box_st *loaded_bb = - get_asic_rev_soc_bb(ctx->asic_id.hw_internal_rev); - struct _vcs_dpi_ip_params_st *loaded_ip = - get_asic_rev_ip_params(ctx->asic_id.hw_internal_rev); - enum dml_project dml_project_version = - get_dml_project_version(ctx->asic_id.hw_internal_rev); - - ctx->dc_bios->regs = &bios_regs; - pool->base.funcs = &dcn20_res_pool_funcs; - - if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) { - pool->base.res_cap = &res_cap_nv14; - pool->base.pipe_count = 5; - pool->base.mpcc_count = 5; - } else { - pool->base.res_cap = &res_cap_nv10; - pool->base.pipe_count = 6; - pool->base.mpcc_count = 6; - } - /************************************************* - * Resource + asic cap harcoding * - *************************************************/ - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - - dc->caps.max_downscale_ratio = 200; - dc->caps.i2c_speed_in_khz = 100; - dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a not applied by default*/ - dc->caps.max_cursor_size = 256; - dc->caps.min_horizontal_blanking_period = 80; - dc->caps.dmdata_alloc_size = 2048; - - dc->caps.max_slave_planes = 1; - dc->caps.max_slave_yuv_planes = 1; - dc->caps.max_slave_rgb_planes = 1; - dc->caps.post_blend_color_processing = true; - dc->caps.force_dp_tps4_for_cp2520 = true; - dc->caps.extended_aux_timeout_support = true; - - /* Color pipeline capabilities */ - dc->caps.color.dpp.dcn_arch = 1; - dc->caps.color.dpp.input_lut_shared = 0; - dc->caps.color.dpp.icsc = 1; - dc->caps.color.dpp.dgam_ram = 1; - dc->caps.color.dpp.dgam_rom_caps.srgb = 1; - dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; - dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0; - dc->caps.color.dpp.dgam_rom_caps.pq = 0; - dc->caps.color.dpp.dgam_rom_caps.hlg = 0; - dc->caps.color.dpp.post_csc = 0; - dc->caps.color.dpp.gamma_corr = 0; - dc->caps.color.dpp.dgam_rom_for_yuv = 1; - - dc->caps.color.dpp.hw_3d_lut = 1; - dc->caps.color.dpp.ogam_ram = 1; - // no OGAM ROM on DCN2, only MPC ROM - dc->caps.color.dpp.ogam_rom_caps.srgb = 0; - dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; - dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.dpp.ogam_rom_caps.pq = 0; - dc->caps.color.dpp.ogam_rom_caps.hlg = 0; - dc->caps.color.dpp.ocsc = 0; - - dc->caps.color.mpc.gamut_remap = 0; - dc->caps.color.mpc.num_3dluts = 0; - dc->caps.color.mpc.shared_3d_lut = 0; - dc->caps.color.mpc.ogam_ram = 1; - dc->caps.color.mpc.ogam_rom_caps.srgb = 0; - dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; - dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.mpc.ogam_rom_caps.pq = 0; - dc->caps.color.mpc.ogam_rom_caps.hlg = 0; - dc->caps.color.mpc.ocsc = 1; - - dc->caps.dp_hdmi21_pcon_support = true; - - if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) - dc->debug = debug_defaults_drv; - - //dcn2.0x - dc->work_arounds.dedcn20_305_wa = true; - - // Init the vm_helper - if (dc->vm_helper) - vm_helper_init(dc->vm_helper, 16); - - /************************************************* - * Create resources * - *************************************************/ - - pool->base.clock_sources[DCN20_CLK_SRC_PLL0] = - dcn20_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL0, - &clk_src_regs[0], false); - pool->base.clock_sources[DCN20_CLK_SRC_PLL1] = - dcn20_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL1, - &clk_src_regs[1], false); - pool->base.clock_sources[DCN20_CLK_SRC_PLL2] = - dcn20_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL2, - &clk_src_regs[2], false); - pool->base.clock_sources[DCN20_CLK_SRC_PLL3] = - dcn20_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL3, - &clk_src_regs[3], false); - pool->base.clock_sources[DCN20_CLK_SRC_PLL4] = - dcn20_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL4, - &clk_src_regs[4], false); - pool->base.clock_sources[DCN20_CLK_SRC_PLL5] = - dcn20_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL5, - &clk_src_regs[5], false); - pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL; - /* todo: not reuse phy_pll registers */ - pool->base.dp_clock_source = - dcn20_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_ID_DP_DTO, - &clk_src_regs[0], true); - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - - pool->base.dccg = dccg2_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); - if (pool->base.dccg == NULL) { - dm_error("DC: failed to create dccg!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - pool->base.dmcu = dcn20_dmcu_create(ctx, - &dmcu_regs, - &dmcu_shift, - &dmcu_mask); - if (pool->base.dmcu == NULL) { - dm_error("DC: failed to create dmcu!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - pool->base.abm = dce_abm_create(ctx, - &abm_regs, - &abm_shift, - &abm_mask); - if (pool->base.abm == NULL) { - dm_error("DC: failed to create abm!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - pool->base.pp_smu = dcn20_pp_smu_create(ctx); - - - if (!init_soc_bounding_box(dc, pool)) { - dm_error("DC: failed to initialize soc bounding box!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - dml_init_instance(&dc->dml, loaded_bb, loaded_ip, dml_project_version); - - if (!dc->debug.disable_pplib_wm_range) { - struct pp_smu_wm_range_sets ranges = {0}; - int i = 0; - - ranges.num_reader_wm_sets = 0; - - if (loaded_bb->num_states == 1) { - ranges.reader_wm_sets[0].wm_inst = i; - ranges.reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges.reader_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges.reader_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - - ranges.num_reader_wm_sets = 1; - } else if (loaded_bb->num_states > 1) { - for (i = 0; i < 4 && i < loaded_bb->num_states; i++) { - ranges.reader_wm_sets[i].wm_inst = i; - ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - DC_FP_START(); - dcn20_fpu_set_wm_ranges(i, &ranges, loaded_bb); - DC_FP_END(); - - ranges.num_reader_wm_sets = i + 1; - } - - ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges.reader_wm_sets[ranges.num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - } - - ranges.num_writer_wm_sets = 1; - - ranges.writer_wm_sets[0].wm_inst = 0; - ranges.writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges.writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - ranges.writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges.writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - - /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ - if (pool->base.pp_smu->nv_funcs.set_wm_ranges) - pool->base.pp_smu->nv_funcs.set_wm_ranges(&pool->base.pp_smu->nv_funcs.pp_smu, &ranges); - } - - init_data.ctx = dc->ctx; - pool->base.irqs = dal_irq_service_dcn20_create(&init_data); - if (!pool->base.irqs) - goto create_fail; - - /* mem input -> ipp -> dpp -> opp -> TG */ - for (i = 0; i < pool->base.pipe_count; i++) { - pool->base.hubps[i] = dcn20_hubp_create(ctx, i); - if (pool->base.hubps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create memory input!\n"); - goto create_fail; - } - - pool->base.ipps[i] = dcn20_ipp_create(ctx, i); - if (pool->base.ipps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create input pixel processor!\n"); - goto create_fail; - } - - pool->base.dpps[i] = dcn20_dpp_create(ctx, i); - if (pool->base.dpps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create dpps!\n"); - goto create_fail; - } - } - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - pool->base.engines[i] = dcn20_aux_engine_create(ctx, i); - if (pool->base.engines[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create aux engine!!\n"); - goto create_fail; - } - pool->base.hw_i2cs[i] = dcn20_i2c_hw_create(ctx, i); - if (pool->base.hw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create hw i2c!!\n"); - goto create_fail; - } - pool->base.sw_i2cs[i] = NULL; - } - - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - pool->base.opps[i] = dcn20_opp_create(ctx, i); - if (pool->base.opps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create output pixel processor!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - pool->base.timing_generators[i] = dcn20_timing_generator_create( - ctx, i); - if (pool->base.timing_generators[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto create_fail; - } - } - - pool->base.timing_generator_count = i; - - pool->base.mpc = dcn20_mpc_create(ctx); - if (pool->base.mpc == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mpc!\n"); - goto create_fail; - } - - pool->base.hubbub = dcn20_hubbub_create(ctx); - if (pool->base.hubbub == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create hubbub!\n"); - goto create_fail; - } - - for (i = 0; i < pool->base.res_cap->num_dsc; i++) { - pool->base.dscs[i] = dcn20_dsc_create(ctx, i); - if (pool->base.dscs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create display stream compressor %d!\n", i); - goto create_fail; - } - } - - if (!dcn20_dwbc_create(ctx, &pool->base)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create dwbc!\n"); - goto create_fail; - } - if (!dcn20_mmhubbub_create(ctx, &pool->base)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mcif_wb!\n"); - goto create_fail; - } - - if (!resource_construct(num_virtual_links, dc, &pool->base, - &res_create_funcs)) - goto create_fail; - - dcn20_hw_sequencer_construct(dc); - - // IF NV12, set PG function pointer to NULL. It's not that - // PG isn't supported for NV12, it's that we don't want to - // program the registers because that will cause more power - // to be consumed. We could have created dcn20_init_hw to get - // the same effect by checking ASIC rev, but there was a - // request at some point to not check ASIC rev on hw sequencer. - if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) { - dc->hwseq->funcs.enable_power_gating_plane = NULL; - dc->debug.disable_dpp_power_gate = true; - dc->debug.disable_hubp_power_gate = true; - } - - - dc->caps.max_planes = pool->base.pipe_count; - - for (i = 0; i < dc->caps.max_planes; ++i) - dc->caps.planes[i] = plane_cap; - - dc->cap_funcs = cap_funcs; - - if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { - ddc_init_data.ctx = dc->ctx; - ddc_init_data.link = NULL; - ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; - ddc_init_data.id.enum_id = 0; - ddc_init_data.id.type = OBJECT_TYPE_GENERIC; - pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data); - } else { - pool->base.oem_device = NULL; - } - - return true; - -create_fail: - - dcn20_resource_destruct(pool); - - return false; -} - -struct resource_pool *dcn20_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc) -{ - struct dcn20_resource_pool *pool = - kzalloc(sizeof(struct dcn20_resource_pool), GFP_ATOMIC); - - if (!pool) - return NULL; - - if (dcn20_resource_construct(init_data->num_virtual_links, dc, pool)) - return &pool->base; - - BREAK_TO_DEBUGGER(); - kfree(pool); - return NULL; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h deleted file mode 100644 index 4cee3fa11a..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ /dev/null @@ -1,171 +0,0 @@ -/* -* Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_RESOURCE_DCN20_H__ -#define __DC_RESOURCE_DCN20_H__ - -#include "core_types.h" -#include "dml/dcn20/dcn20_fpu.h" - -#define TO_DCN20_RES_POOL(pool)\ - container_of(pool, struct dcn20_resource_pool, base) - -struct dc; -struct resource_pool; -struct _vcs_dpi_display_pipe_params_st; - -extern struct _vcs_dpi_ip_params_st dcn2_0_ip; -extern struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip; -extern struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc; -extern struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc; -extern struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc; - -struct dcn20_resource_pool { - struct resource_pool base; -}; -struct resource_pool *dcn20_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc); - -struct link_encoder *dcn20_link_encoder_create( - struct dc_context *ctx, - const struct encoder_init_data *enc_init_data); - -unsigned int dcn20_calc_max_scaled_time( - unsigned int time_per_pixel, - enum mmhubbub_wbif_mode mode, - unsigned int urgent_watermark); - -struct pipe_ctx *dcn20_acquire_free_pipe_for_layer( - const struct dc_state *cur_ctx, - struct dc_state *new_ctx, - const struct resource_pool *pool, - const struct pipe_ctx *opp_head_pipe); -void dcn20_release_pipe(struct dc_state *context, - struct pipe_ctx *pipe, - const struct resource_pool *pool); -struct stream_encoder *dcn20_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx); - -struct dce_hwseq *dcn20_hwseq_create( - struct dc_context *ctx); - -bool dcn20_get_dcc_compression_cap(const struct dc *dc, - const struct dc_dcc_surface_param *input, - struct dc_surface_dcc_cap *output); - -void dcn20_dpp_destroy(struct dpp **dpp); - -struct dpp *dcn20_dpp_create( - struct dc_context *ctx, - uint32_t inst); - -struct input_pixel_processor *dcn20_ipp_create( - struct dc_context *ctx, uint32_t inst); - -struct output_pixel_processor *dcn20_opp_create( - struct dc_context *ctx, uint32_t inst); - -struct dce_aux *dcn20_aux_engine_create( - struct dc_context *ctx, uint32_t inst); - -struct dce_i2c_hw *dcn20_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst); - -void dcn20_clock_source_destroy(struct clock_source **clk_src); - -struct display_stream_compressor *dcn20_dsc_create( - struct dc_context *ctx, uint32_t inst); -void dcn20_dsc_destroy(struct display_stream_compressor **dsc); - -struct hubp *dcn20_hubp_create( - struct dc_context *ctx, - uint32_t inst); -struct timing_generator *dcn20_timing_generator_create( - struct dc_context *ctx, - uint32_t instance); -struct mpc *dcn20_mpc_create(struct dc_context *ctx); -struct hubbub *dcn20_hubbub_create(struct dc_context *ctx); - -bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool); -bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool); - -void dcn20_set_mcif_arb_params( - struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int pipe_cnt); -bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); -void dcn20_merge_pipes_for_validate( - struct dc *dc, - struct dc_state *context); -int dcn20_validate_apply_pipe_split_flags( - struct dc *dc, - struct dc_state *context, - int vlevel, - int *split, - bool *merge); -void dcn20_release_dsc(struct resource_context *res_ctx, - const struct resource_pool *pool, - struct display_stream_compressor **dsc); -bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx); -void dcn20_split_stream_for_mpc( - struct resource_context *res_ctx, - const struct resource_pool *pool, - struct pipe_ctx *primary_pipe, - struct pipe_ctx *secondary_pipe); -bool dcn20_split_stream_for_odm( - const struct dc *dc, - struct resource_context *res_ctx, - struct pipe_ctx *prev_odm_pipe, - struct pipe_ctx *next_odm_pipe); -void dcn20_acquire_dsc(const struct dc *dc, - struct resource_context *res_ctx, - struct display_stream_compressor **dsc, - int pipe_idx); -struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, - struct resource_context *res_ctx, - const struct resource_pool *pool, - const struct pipe_ctx *primary_pipe); -bool dcn20_fast_validate_bw( - struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int *pipe_cnt_out, - int *pipe_split_from, - int *vlevel_out, - bool fast_validate); - -enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream); -enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); -enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream); -enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); -enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state); -void dcn20_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx); - -#endif /* __DC_RESOURCE_DCN20_H__ */ - diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile index 3a41a97b07..2b0b4f32e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile @@ -1,9 +1,8 @@ # SPDX-License-Identifier: MIT # # Makefile for DCN. -DCN201 = dcn201_init.o dcn201_resource.o \ - dcn201_hubbub.o\ - dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_optc.o dcn201_dpp.o \ +DCN201 = dcn201_hubbub.o\ + dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_dpp.o \ dcn201_dccg.o dcn201_link_encoder.o AMD_DAL_DCN201 = $(addprefix $(AMDDALPATH)/dc/dcn201/,$(DCN201)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c deleted file mode 100644 index a13bf6c938..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dce110/dce110_hwseq.h" -#include "dcn10/dcn10_hwseq.h" -#include "dcn20/dcn20_hwseq.h" -#include "dcn201/dcn201_hwseq.h" -#include "dcn201_init.h" - -static const struct hw_sequencer_funcs dcn201_funcs = { - .program_gamut_remap = dcn10_program_gamut_remap, - .init_hw = dcn201_init_hw, - .power_down_on_boot = NULL, - .apply_ctx_to_hw = dce110_apply_ctx_to_hw, - .apply_ctx_for_surface = NULL, - .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, - .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, - .post_unlock_program_front_end = dcn10_post_unlock_program_front_end, - .update_plane_addr = dcn201_update_plane_addr, - .update_dchub = dcn10_update_dchub, - .update_pending_status = dcn10_update_pending_status, - .program_output_csc = dcn20_program_output_csc, - .enable_accelerated_mode = dce110_enable_accelerated_mode, - .enable_timing_synchronization = dcn10_enable_timing_synchronization, - .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, - .update_info_frame = dce110_update_info_frame, - .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, - .enable_stream = dce110_enable_stream, - .disable_stream = dce110_disable_stream, - .unblank_stream = dcn201_unblank_stream, - .blank_stream = dce110_blank_stream, - .enable_audio_stream = dce110_enable_audio_stream, - .disable_audio_stream = dce110_disable_audio_stream, - .disable_plane = dcn10_disable_plane, - .pipe_control_lock = dcn201_pipe_control_lock, - .interdependent_update_lock = dcn10_lock_all_pipes, - .cursor_lock = dcn10_cursor_lock, - .prepare_bandwidth = dcn20_prepare_bandwidth, - .optimize_bandwidth = dcn20_optimize_bandwidth, - .update_bandwidth = dcn20_update_bandwidth, - .set_drr = dcn10_set_drr, - .get_position = dcn10_get_position, - .set_static_screen_control = dcn10_set_static_screen_control, - .setup_stereo = dcn10_setup_stereo, - .set_avmute = dce110_set_avmute, - .log_hw_state = dcn10_log_hw_state, - .get_hw_state = dcn10_get_hw_state, - .clear_status_bits = dcn10_clear_status_bits, - .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, - .edp_backlight_control = dce110_edp_backlight_control, - .edp_power_control = dce110_edp_power_control, - .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, - .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, - .set_clock = dcn10_set_clock, - .get_clock = dcn10_get_clock, - .program_triplebuffer = dcn20_program_triple_buffer, - .dmdata_status_done = dcn20_dmdata_status_done, - .set_dmdata_attributes = dcn201_set_dmdata_attributes, - .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, - .calc_vupdate_position = dcn10_calc_vupdate_position, - .set_cursor_position = dcn10_set_cursor_position, - .set_cursor_attribute = dcn201_set_cursor_attribute, - .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, - .set_backlight_level = dce110_set_backlight_level, - .set_abm_immediate_disable = dce110_set_abm_immediate_disable, - .set_pipe = dce110_set_pipe, - .enable_lvds_link_output = dce110_enable_lvds_link_output, - .enable_tmds_link_output = dce110_enable_tmds_link_output, - .enable_dp_link_output = dce110_enable_dp_link_output, - .disable_link_output = dce110_disable_link_output, - .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, - .update_visual_confirm_color = dcn10_update_visual_confirm_color, -}; - -static const struct hwseq_private_funcs dcn201_private_funcs = { - .init_pipes = NULL, - .update_plane_addr = dcn201_update_plane_addr, - .plane_atomic_disconnect = dcn201_plane_atomic_disconnect, - .program_pipe = dcn10_program_pipe, - .update_mpcc = dcn201_update_mpcc, - .set_input_transfer_func = dcn20_set_input_transfer_func, - .set_output_transfer_func = dcn20_set_output_transfer_func, - .power_down = dce110_power_down, - .enable_display_power_gating = dcn10_dummy_display_power_gating, - .blank_pixel_data = dcn20_blank_pixel_data, - .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap, - .enable_stream_timing = dcn20_enable_stream_timing, - .edp_backlight_control = dce110_edp_backlight_control, - .disable_stream_gating = NULL, - .enable_stream_gating = NULL, - .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, - .did_underflow_occur = dcn10_did_underflow_occur, - .init_blank = dcn201_init_blank, - .disable_vga = dcn10_disable_vga, - .bios_golden_init = dcn10_bios_golden_init, - .plane_atomic_disable = dcn10_plane_atomic_disable, - .plane_atomic_power_down = dcn10_plane_atomic_power_down, - .enable_power_gating_plane = dcn10_enable_power_gating_plane, - .dpp_pg_control = dcn10_dpp_pg_control, - .hubp_pg_control = dcn10_hubp_pg_control, - .dsc_pg_control = NULL, - .set_hdr_multiplier = dcn10_set_hdr_multiplier, - .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, - .wait_for_blank_complete = dcn20_wait_for_blank_complete, - .dccg_init = dcn20_dccg_init, - .set_blend_lut = dcn20_set_blend_lut, - .set_shaper_3dlut = dcn20_set_shaper_3dlut, -}; - -void dcn201_hw_sequencer_construct(struct dc *dc) -{ - dc->hwss = dcn201_funcs; - dc->hwseq->funcs = dcn201_private_funcs; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h deleted file mode 100644 index 1168887b03..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_DCN201_INIT_H__ -#define __DC_DCN201_INIT_H__ - -struct dc; - -void dcn201_hw_sequencer_construct(struct dc *dc); - -#endif /* __DC_DCN201_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c index 8e77db46a4..6a71ba3dfc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c @@ -50,6 +50,7 @@ static struct opp_funcs dcn201_opp_funcs = { .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator, .opp_program_dpg_dimensions = opp2_program_dpg_dimensions, .dpg_is_blanked = opp2_dpg_is_blanked, + .dpg_is_pending = opp2_dpg_is_pending, .opp_dpg_set_blank_color = opp2_dpg_set_blank_color, .opp_destroy = opp1_destroy, .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel, diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c deleted file mode 100644 index 70fcbec03f..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "reg_helper.h" -#include "dcn201_optc.h" -#include "dcn10/dcn10_optc.h" -#include "dc.h" - -#define REG(reg)\ - optc1->tg_regs->reg - -#define CTX \ - optc1->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - optc1->tg_shift->field_name, optc1->tg_mask->field_name - -/*TEMP: Need to figure out inheritance model here.*/ -bool optc201_is_two_pixels_per_containter(const struct dc_crtc_timing *timing) -{ - return optc1_is_two_pixels_per_containter(timing); -} - -static void optc201_triplebuffer_lock(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET(OTG_GLOBAL_CONTROL0, 0, - OTG_MASTER_UPDATE_LOCK_SEL, optc->inst); - REG_SET(OTG_VUPDATE_KEEPOUT, 0, - OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1); - REG_SET(OTG_MASTER_UPDATE_LOCK, 0, - OTG_MASTER_UPDATE_LOCK, 1); - - REG_WAIT(OTG_MASTER_UPDATE_LOCK, - UPDATE_LOCK_STATUS, 1, - 1, 10); -} - -static void optc201_triplebuffer_unlock(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET(OTG_MASTER_UPDATE_LOCK, 0, - OTG_MASTER_UPDATE_LOCK, 0); - REG_SET(OTG_VUPDATE_KEEPOUT, 0, - OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 0); - -} - -static bool optc201_validate_timing( - struct timing_generator *optc, - const struct dc_crtc_timing *timing) -{ - uint32_t v_blank; - uint32_t h_blank; - uint32_t min_v_blank; - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - ASSERT(timing != NULL); - - v_blank = (timing->v_total - timing->v_addressable - - timing->v_border_top - timing->v_border_bottom); - - h_blank = (timing->h_total - timing->h_addressable - - timing->h_border_right - - timing->h_border_left); - - if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE && - timing->timing_3d_format != TIMING_3D_FORMAT_HW_FRAME_PACKING && - timing->timing_3d_format != TIMING_3D_FORMAT_TOP_AND_BOTTOM && - timing->timing_3d_format != TIMING_3D_FORMAT_SIDE_BY_SIDE && - timing->timing_3d_format != TIMING_3D_FORMAT_FRAME_ALTERNATE && - timing->timing_3d_format != TIMING_3D_FORMAT_INBAND_FA) - return false; - - /* Check maximum number of pixels supported by Timing Generator - * (Currently will never fail, in order to fail needs display which - * needs more than 8192 horizontal and - * more than 8192 vertical total pixels) - */ - if (timing->h_total > optc1->max_h_total || - timing->v_total > optc1->max_v_total) - return false; - - if (h_blank < optc1->min_h_blank) - return false; - - if (timing->h_sync_width < optc1->min_h_sync_width || - timing->v_sync_width < optc1->min_v_sync_width) - return false; - - min_v_blank = timing->flags.INTERLACE?optc1->min_v_blank_interlace:optc1->min_v_blank; - - if (v_blank < min_v_blank) - return false; - - return true; - -} - -static void optc201_get_optc_source(struct timing_generator *optc, - uint32_t *num_of_src_opp, - uint32_t *src_opp_id_0, - uint32_t *src_opp_id_1) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_GET(OPTC_DATA_SOURCE_SELECT, - OPTC_SEG0_SRC_SEL, src_opp_id_0); - - *num_of_src_opp = 1; -} - -static struct timing_generator_funcs dcn201_tg_funcs = { - .validate_timing = optc201_validate_timing, - .program_timing = optc1_program_timing, - .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, - .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, - .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, - .program_global_sync = optc1_program_global_sync, - .enable_crtc = optc2_enable_crtc, - .disable_crtc = optc1_disable_crtc, - /* used by enable_timing_synchronization. Not need for FPGA */ - .is_counter_moving = optc1_is_counter_moving, - .get_position = optc1_get_position, - .get_frame_count = optc1_get_vblank_counter, - .get_scanoutpos = optc1_get_crtc_scanoutpos, - .get_otg_active_size = optc1_get_otg_active_size, - .set_early_control = optc1_set_early_control, - /* used by enable_timing_synchronization. Not need for FPGA */ - .wait_for_state = optc1_wait_for_state, - .set_blank = optc1_set_blank, - .is_blanked = optc1_is_blanked, - .set_blank_color = optc1_program_blank_color, - .did_triggered_reset_occur = optc1_did_triggered_reset_occur, - .enable_reset_trigger = optc1_enable_reset_trigger, - .enable_crtc_reset = optc1_enable_crtc_reset, - .disable_reset_trigger = optc1_disable_reset_trigger, - .triplebuffer_lock = optc201_triplebuffer_lock, - .triplebuffer_unlock = optc201_triplebuffer_unlock, - .lock = optc1_lock, - .unlock = optc1_unlock, - .enable_optc_clock = optc1_enable_optc_clock, - .set_drr = optc1_set_drr, - .get_last_used_drr_vtotal = NULL, - .set_vtotal_min_max = optc1_set_vtotal_min_max, - .set_static_screen_control = optc1_set_static_screen_control, - .program_stereo = optc1_program_stereo, - .is_stereo_left_eye = optc1_is_stereo_left_eye, - .set_blank_data_double_buffer = optc1_set_blank_data_double_buffer, - .tg_init = optc1_tg_init, - .is_tg_enabled = optc1_is_tg_enabled, - .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, - .clear_optc_underflow = optc1_clear_optc_underflow, - .get_crc = optc1_get_crc, - .configure_crc = optc2_configure_crc, - .set_dsc_config = optc2_set_dsc_config, - .set_dwb_source = NULL, - .get_optc_source = optc201_get_optc_source, - .set_vtg_params = optc1_set_vtg_params, - .program_manual_trigger = optc2_program_manual_trigger, - .setup_manual_trigger = optc2_setup_manual_trigger, - .get_hw_timing = optc1_get_hw_timing, -}; - -void dcn201_timing_generator_init(struct optc *optc1) -{ - optc1->base.funcs = &dcn201_tg_funcs; - - optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; - optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; - - optc1->min_h_blank = 32; - optc1->min_v_blank = 3; - optc1->min_v_blank_interlace = 5; - optc1->min_h_sync_width = 8; - optc1->min_v_sync_width = 1; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h deleted file mode 100644 index e9545b7351..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_OPTC_DCN201_H__ -#define __DC_OPTC_DCN201_H__ - -#include "dcn20/dcn20_optc.h" - -#define TG_COMMON_REG_LIST_DCN201(inst) \ - TG_COMMON_REG_LIST_DCN(inst),\ - SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\ - SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\ - SRI(OTG_GSL_WINDOW_X, OTG, inst),\ - SRI(OTG_GSL_WINDOW_Y, OTG, inst),\ - SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\ - SRI(OTG_DSC_START_POSITION, OTG, inst),\ - SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\ - SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\ - SRI(OPTC_WIDTH_CONTROL, ODM, inst),\ - SR(DWB_SOURCE_SELECT) - -#define TG_COMMON_MASK_SH_LIST_DCN201(mask_sh)\ - TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_X, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_Y, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_EN, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\ - SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\ - SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\ - SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \ - SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\ - SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\ - SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \ - SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \ - SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \ - SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \ - SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \ - SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\ - SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\ - SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\ - SF(DWB_SOURCE_SELECT, OPTC_DWB0_SOURCE_SELECT, mask_sh),\ - SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh),\ - SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh) - -void dcn201_timing_generator_init(struct optc *optc); - -bool optc201_is_two_pixels_per_containter(const struct dc_crtc_timing *timing); - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c deleted file mode 100644 index bca22d8676..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c +++ /dev/null @@ -1,1308 +0,0 @@ -/* -* Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" -#include "dc.h" - -#include "dcn201_init.h" -#include "dml/dcn20/dcn20_fpu.h" -#include "resource.h" -#include "include/irq_service_interface.h" -#include "dcn201_resource.h" - -#include "dcn20/dcn20_resource.h" - -#include "dcn10/dcn10_hubp.h" -#include "dcn10/dcn10_ipp.h" -#include "dcn201_mpc.h" -#include "dcn201_hubp.h" -#include "irq/dcn201/irq_service_dcn201.h" -#include "dcn201/dcn201_dpp.h" -#include "dcn201/dcn201_hubbub.h" -#include "dcn201_dccg.h" -#include "dcn201_optc.h" -#include "dcn201/dcn201_hwseq.h" -#include "dce110/dce110_hwseq.h" -#include "dcn201_opp.h" -#include "dcn201/dcn201_link_encoder.h" -#include "dcn20/dcn20_stream_encoder.h" -#include "dce/dce_clock_source.h" -#include "dce/dce_audio.h" -#include "dce/dce_hwseq.h" -#include "virtual/virtual_stream_encoder.h" -#include "dce110/dce110_resource.h" -#include "dce/dce_aux.h" -#include "dce/dce_i2c.h" -#include "dcn201_hubbub.h" -#include "dcn10/dcn10_resource.h" - -#include "cyan_skillfish_ip_offset.h" - -#include "dcn/dcn_2_0_3_offset.h" -#include "dcn/dcn_2_0_3_sh_mask.h" -#include "dpcs/dpcs_2_0_3_offset.h" -#include "dpcs/dpcs_2_0_3_sh_mask.h" - -#include "mmhub/mmhub_2_0_0_offset.h" -#include "mmhub/mmhub_2_0_0_sh_mask.h" -#include "nbio/nbio_7_4_offset.h" - -#include "reg_helper.h" - -#define MIN_DISP_CLK_KHZ 100000 -#define MIN_DPP_CLK_KHZ 100000 - -static struct _vcs_dpi_ip_params_st dcn201_ip = { - .gpuvm_enable = 0, - .hostvm_enable = 0, - .gpuvm_max_page_table_levels = 4, - .hostvm_max_page_table_levels = 4, - .hostvm_cached_page_table_levels = 0, - .pte_group_size_bytes = 2048, - .rob_buffer_size_kbytes = 168, - .det_buffer_size_kbytes = 164, - .dpte_buffer_size_in_pte_reqs_luma = 84, - .pde_proc_buffer_size_64k_reqs = 48, - .dpp_output_buffer_pixels = 2560, - .opp_output_buffer_lines = 1, - .pixel_chunk_size_kbytes = 8, - .pte_chunk_size_kbytes = 2, - .meta_chunk_size_kbytes = 2, - .writeback_chunk_size_kbytes = 2, - .line_buffer_size_bits = 789504, - .is_line_buffer_bpp_fixed = 0, - .line_buffer_fixed_bpp = 0, - .dcc_supported = true, - .max_line_buffer_lines = 12, - .writeback_luma_buffer_size_kbytes = 12, - .writeback_chroma_buffer_size_kbytes = 8, - .writeback_chroma_line_buffer_width_pixels = 4, - .writeback_max_hscl_ratio = 1, - .writeback_max_vscl_ratio = 1, - .writeback_min_hscl_ratio = 1, - .writeback_min_vscl_ratio = 1, - .writeback_max_hscl_taps = 12, - .writeback_max_vscl_taps = 12, - .writeback_line_buffer_luma_buffer_size = 0, - .writeback_line_buffer_chroma_buffer_size = 9600, - .cursor_buffer_size = 8, - .cursor_chunk_size = 2, - .max_num_otg = 2, - .max_num_dpp = 4, - .max_num_wb = 0, - .max_dchub_pscl_bw_pix_per_clk = 4, - .max_pscl_lb_bw_pix_per_clk = 2, - .max_lb_vscl_bw_pix_per_clk = 4, - .max_vscl_hscl_bw_pix_per_clk = 4, - .max_hscl_ratio = 8, - .max_vscl_ratio = 8, - .hscl_mults = 4, - .vscl_mults = 4, - .max_hscl_taps = 8, - .max_vscl_taps = 8, - .dispclk_ramp_margin_percent = 1, - .underscan_factor = 1.10, - .min_vblank_lines = 30, - .dppclk_delay_subtotal = 77, - .dppclk_delay_scl_lb_only = 16, - .dppclk_delay_scl = 50, - .dppclk_delay_cnvc_formatter = 8, - .dppclk_delay_cnvc_cursor = 6, - .dispclk_delay_subtotal = 87, - .dcfclk_cstate_latency = 10, - .max_inter_dcn_tile_repeaters = 8, - .number_of_cursors = 1, -}; - -static struct _vcs_dpi_soc_bounding_box_st dcn201_soc = { - .clock_limits = { - { - .state = 0, - .dscclk_mhz = 400.0, - .dcfclk_mhz = 1000.0, - .fabricclk_mhz = 200.0, - .dispclk_mhz = 300.0, - .dppclk_mhz = 300.0, - .phyclk_mhz = 810.0, - .socclk_mhz = 1254.0, - .dram_speed_mts = 2000.0, - }, - { - .state = 1, - .dscclk_mhz = 400.0, - .dcfclk_mhz = 1000.0, - .fabricclk_mhz = 250.0, - .dispclk_mhz = 1200.0, - .dppclk_mhz = 1200.0, - .phyclk_mhz = 810.0, - .socclk_mhz = 1254.0, - .dram_speed_mts = 3600.0, - }, - { - .state = 2, - .dscclk_mhz = 400.0, - .dcfclk_mhz = 1000.0, - .fabricclk_mhz = 750.0, - .dispclk_mhz = 1200.0, - .dppclk_mhz = 1200.0, - .phyclk_mhz = 810.0, - .socclk_mhz = 1254.0, - .dram_speed_mts = 6800.0, - }, - { - .state = 3, - .dscclk_mhz = 400.0, - .dcfclk_mhz = 1000.0, - .fabricclk_mhz = 250.0, - .dispclk_mhz = 1200.0, - .dppclk_mhz = 1200.0, - .phyclk_mhz = 810.0, - .socclk_mhz = 1254.0, - .dram_speed_mts = 14000.0, - }, - { - .state = 4, - .dscclk_mhz = 400.0, - .dcfclk_mhz = 1000.0, - .fabricclk_mhz = 750.0, - .dispclk_mhz = 1200.0, - .dppclk_mhz = 1200.0, - .phyclk_mhz = 810.0, - .socclk_mhz = 1254.0, - .dram_speed_mts = 14000.0, - } - }, - .num_states = 4, - .sr_exit_time_us = 9.0, - .sr_enter_plus_exit_time_us = 11.0, - .urgent_latency_us = 4.0, - .urgent_latency_pixel_data_only_us = 4.0, - .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, - .urgent_latency_vm_data_only_us = 4.0, - .urgent_out_of_order_return_per_channel_pixel_only_bytes = 256, - .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 256, - .urgent_out_of_order_return_per_channel_vm_only_bytes = 256, - .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0, - .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 80.0, - .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 80.0, - .max_avg_sdp_bw_use_normal_percent = 80.0, - .max_avg_dram_bw_use_normal_percent = 69.0, - .writeback_latency_us = 12.0, - .ideal_dram_bw_after_urgent_percent = 80.0, - .max_request_size_bytes = 256, - .dram_channel_width_bytes = 2, - .fabric_datapath_to_dcn_data_return_bytes = 64, - .dcn_downspread_percent = 0.3, - .downspread_percent = 0.3, - .dram_page_open_time_ns = 50.0, - .dram_rw_turnaround_time_ns = 17.5, - .dram_return_buffer_per_channel_bytes = 8192, - .round_trip_ping_latency_dcfclk_cycles = 128, - .urgent_out_of_order_return_per_channel_bytes = 256, - .channel_interleave_bytes = 256, - .num_banks = 8, - .num_chans = 16, - .vmm_page_size_bytes = 4096, - .dram_clock_change_latency_us = 250.0, - .writeback_dram_clock_change_latency_us = 23.0, - .return_bus_width_bytes = 64, - .dispclk_dppclk_vco_speed_mhz = 3000, - .use_urgent_burst_bw = 0, -}; - -enum dcn20_clk_src_array_id { - DCN20_CLK_SRC_PLL0, - DCN20_CLK_SRC_PLL1, - DCN20_CLK_SRC_TOTAL_DCN201 -}; - -/* begin ********************* - * macros to expend register list macro defined in HW object header file */ - -/* DCN */ - -#undef BASE_INNER -#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg - -#define BASE(seg) BASE_INNER(seg) - -#define SR(reg_name)\ - .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -#define SRI(reg_name, block, id)\ - .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define SRIR(var_name, reg_name, block, id)\ - .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define SRII(reg_name, block, id)\ - .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define SRI_IX(reg_name, block, id)\ - .reg_name = ix ## block ## id ## _ ## reg_name - -#define DCCG_SRII(reg_name, block, id)\ - .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define VUPDATE_SRII(reg_name, block, id)\ - .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ - mm ## reg_name ## _ ## block ## id - -/* NBIO */ -#define NBIO_BASE_INNER(seg) \ - NBIO_BASE__INST0_SEG ## seg - -#define NBIO_BASE(seg) \ - NBIO_BASE_INNER(seg) - -#define NBIO_SR(reg_name)\ - .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -/* MMHUB */ -#define MMHUB_BASE_INNER(seg) \ - MMHUB_BASE__INST0_SEG ## seg - -#define MMHUB_BASE(seg) \ - MMHUB_BASE_INNER(seg) - -#define MMHUB_SR(reg_name)\ - .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \ - mmMM ## reg_name - -static const struct bios_registers bios_regs = { - NBIO_SR(BIOS_SCRATCH_3), - NBIO_SR(BIOS_SCRATCH_6) -}; - -#define clk_src_regs(index, pllid)\ -[index] = {\ - CS_COMMON_REG_LIST_DCN201(index, pllid),\ -} - -static const struct dce110_clk_src_regs clk_src_regs[] = { - clk_src_regs(0, A), - clk_src_regs(1, B) -}; - -static const struct dce110_clk_src_shift cs_shift = { - CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) -}; - -static const struct dce110_clk_src_mask cs_mask = { - CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) -}; - -#define audio_regs(id)\ -[id] = {\ - AUD_COMMON_REG_LIST(id)\ -} - -static const struct dce_audio_registers audio_regs[] = { - audio_regs(0), - audio_regs(1), -}; - -#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ - AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) - -static const struct dce_audio_shift audio_shift = { - DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_audio_mask audio_mask = { - DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) -}; - -#define stream_enc_regs(id)\ -[id] = {\ - SE_DCN2_REG_LIST(id)\ -} - -static const struct dcn10_stream_enc_registers stream_enc_regs[] = { - stream_enc_regs(0), - stream_enc_regs(1) -}; - -static const struct dcn10_stream_encoder_shift se_shift = { - SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT) -}; - -static const struct dcn10_stream_encoder_mask se_mask = { - SE_COMMON_MASK_SH_LIST_DCN20(_MASK) -}; - -static const struct dce110_aux_registers_shift aux_shift = { - DCN_AUX_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce110_aux_registers_mask aux_mask = { - DCN_AUX_MASK_SH_LIST(_MASK) -}; - -#define aux_regs(id)\ -[id] = {\ - DCN2_AUX_REG_LIST(id)\ -} - -static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { - aux_regs(0), - aux_regs(1), -}; - -#define hpd_regs(id)\ -[id] = {\ - HPD_REG_LIST(id)\ -} - -static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { - hpd_regs(0), - hpd_regs(1), -}; - -#define link_regs(id, phyid)\ -[id] = {\ - LE_DCN_COMMON_REG_LIST(id), \ - UNIPHY_DCN2_REG_LIST(phyid) \ -} - -static const struct dcn10_link_enc_registers link_enc_regs[] = { - link_regs(0, A), - link_regs(1, B), -}; - -#define LINK_ENCODER_MASK_SH_LIST_DCN201(mask_sh)\ - LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh) - -static const struct dcn10_link_enc_shift le_shift = { - LINK_ENCODER_MASK_SH_LIST_DCN201(__SHIFT) -}; - -static const struct dcn10_link_enc_mask le_mask = { - LINK_ENCODER_MASK_SH_LIST_DCN201(_MASK) -}; - -#define ipp_regs(id)\ -[id] = {\ - IPP_REG_LIST_DCN201(id),\ -} - -static const struct dcn10_ipp_registers ipp_regs[] = { - ipp_regs(0), - ipp_regs(1), - ipp_regs(2), - ipp_regs(3), -}; - -static const struct dcn10_ipp_shift ipp_shift = { - IPP_MASK_SH_LIST_DCN201(__SHIFT) -}; - -static const struct dcn10_ipp_mask ipp_mask = { - IPP_MASK_SH_LIST_DCN201(_MASK) -}; - -#define opp_regs(id)\ -[id] = {\ - OPP_REG_LIST_DCN201(id),\ -} - -static const struct dcn201_opp_registers opp_regs[] = { - opp_regs(0), - opp_regs(1), -}; - -static const struct dcn201_opp_shift opp_shift = { - OPP_MASK_SH_LIST_DCN201(__SHIFT) -}; - -static const struct dcn201_opp_mask opp_mask = { - OPP_MASK_SH_LIST_DCN201(_MASK) -}; - -#define aux_engine_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST0(id), \ - .AUX_RESET_MASK = 0 \ -} - -static const struct dce110_aux_registers aux_engine_regs[] = { - aux_engine_regs(0), - aux_engine_regs(1) -}; - -#define tf_regs(id)\ -[id] = {\ - TF_REG_LIST_DCN201(id),\ -} - -static const struct dcn201_dpp_registers tf_regs[] = { - tf_regs(0), - tf_regs(1), - tf_regs(2), - tf_regs(3), -}; - -static const struct dcn201_dpp_shift tf_shift = { - TF_REG_LIST_SH_MASK_DCN201(__SHIFT) -}; - -static const struct dcn201_dpp_mask tf_mask = { - TF_REG_LIST_SH_MASK_DCN201(_MASK) -}; - -static const struct dcn201_mpc_registers mpc_regs = { - MPC_REG_LIST_DCN201(0), - MPC_REG_LIST_DCN201(1), - MPC_REG_LIST_DCN201(2), - MPC_REG_LIST_DCN201(3), - MPC_REG_LIST_DCN201(4), - MPC_OUT_MUX_REG_LIST_DCN201(0), - MPC_OUT_MUX_REG_LIST_DCN201(1), -}; - -static const struct dcn201_mpc_shift mpc_shift = { - MPC_COMMON_MASK_SH_LIST_DCN201(__SHIFT) -}; - -static const struct dcn201_mpc_mask mpc_mask = { - MPC_COMMON_MASK_SH_LIST_DCN201(_MASK) -}; - -#define tg_regs_dcn201(id)\ -[id] = {TG_COMMON_REG_LIST_DCN201(id)} - -static const struct dcn_optc_registers tg_regs[] = { - tg_regs_dcn201(0), - tg_regs_dcn201(1) -}; - -static const struct dcn_optc_shift tg_shift = { - TG_COMMON_MASK_SH_LIST_DCN201(__SHIFT) -}; - -static const struct dcn_optc_mask tg_mask = { - TG_COMMON_MASK_SH_LIST_DCN201(_MASK) -}; - -#define hubp_regsDCN201(id)\ -[id] = {\ - HUBP_REG_LIST_DCN201(id)\ -} - -static const struct dcn201_hubp_registers hubp_regs[] = { - hubp_regsDCN201(0), - hubp_regsDCN201(1), - hubp_regsDCN201(2), - hubp_regsDCN201(3) -}; - -static const struct dcn201_hubp_shift hubp_shift = { - HUBP_MASK_SH_LIST_DCN201(__SHIFT) -}; - -static const struct dcn201_hubp_mask hubp_mask = { - HUBP_MASK_SH_LIST_DCN201(_MASK) -}; - -static const struct dcn_hubbub_registers hubbub_reg = { - HUBBUB_REG_LIST_DCN201(0) -}; - -static const struct dcn_hubbub_shift hubbub_shift = { - HUBBUB_MASK_SH_LIST_DCN201(__SHIFT) -}; - -static const struct dcn_hubbub_mask hubbub_mask = { - HUBBUB_MASK_SH_LIST_DCN201(_MASK) -}; - - -static const struct dccg_registers dccg_regs = { - DCCG_COMMON_REG_LIST_DCN_BASE() -}; - -static const struct dccg_shift dccg_shift = { - DCCG_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(__SHIFT) -}; - -static const struct dccg_mask dccg_mask = { - DCCG_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(_MASK) -}; - -static const struct resource_caps res_cap_dnc201 = { - .num_timing_generator = 2, - .num_opp = 2, - .num_video_plane = 4, - .num_audio = 2, - .num_stream_encoder = 2, - .num_pll = 2, - .num_ddc = 2, -}; - -static const struct dc_plane_cap plane_cap = { - .type = DC_PLANE_TYPE_DCN_UNIVERSAL, - .per_pixel_alpha = true, - - .pixel_format_support = { - .argb8888 = true, - .nv12 = false, - .fp16 = true, - .p010 = false, - }, - - .max_upscale_factor = { - .argb8888 = 16000, - .nv12 = 16000, - .fp16 = 1 - }, - - .max_downscale_factor = { - .argb8888 = 250, - .nv12 = 250, - .fp16 = 250 - }, - 64, - 64 -}; - -static const struct dc_debug_options debug_defaults_drv = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = false, - .clock_trace = true, - .disable_pplib_clock_request = true, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, - .force_single_disp_pipe_split = false, - .disable_dcc = DCC_ENABLE, - .vsr_support = true, - .performance_trace = false, - .az_endpoint_mute_only = true, - .max_downscale_src_width = 3840, - .disable_pplib_wm_range = true, - .scl_reset_length10 = true, - .sanity_checks = false, - .underflow_assert_delay_us = 0xFFFFFFFF, - .enable_tri_buf = false, - .enable_legacy_fast_update = true, - .using_dml2 = false, -}; - -static void dcn201_dpp_destroy(struct dpp **dpp) -{ - kfree(TO_DCN201_DPP(*dpp)); - *dpp = NULL; -} - -static struct dpp *dcn201_dpp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn201_dpp *dpp = - kzalloc(sizeof(struct dcn201_dpp), GFP_ATOMIC); - - if (!dpp) - return NULL; - - if (dpp201_construct(dpp, ctx, inst, - &tf_regs[inst], &tf_shift, &tf_mask)) - return &dpp->base; - - kfree(dpp); - return NULL; -} - -static struct input_pixel_processor *dcn201_ipp_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn10_ipp *ipp = - kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC); - - if (!ipp) { - return NULL; - } - - dcn20_ipp_construct(ipp, ctx, inst, - &ipp_regs[inst], &ipp_shift, &ipp_mask); - return &ipp->base; -} - - -static struct output_pixel_processor *dcn201_opp_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn201_opp *opp = - kzalloc(sizeof(struct dcn201_opp), GFP_ATOMIC); - - if (!opp) { - return NULL; - } - - dcn201_opp_construct(opp, ctx, inst, - &opp_regs[inst], &opp_shift, &opp_mask); - return &opp->base; -} - -static struct dce_aux *dcn201_aux_engine_create(struct dc_context *ctx, - uint32_t inst) -{ - struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC); - - if (!aux_engine) - return NULL; - - dce110_aux_engine_construct(aux_engine, ctx, inst, - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst], - &aux_mask, - &aux_shift, - ctx->dc->caps.extended_aux_timeout_support); - - return &aux_engine->base; -} -#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } - -static const struct dce_i2c_registers i2c_hw_regs[] = { - i2c_inst_regs(1), - i2c_inst_regs(2), -}; - -static const struct dce_i2c_shift i2c_shifts = { - I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT) -}; - -static const struct dce_i2c_mask i2c_masks = { - I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) -}; - -static struct dce_i2c_hw *dcn201_i2c_hw_create(struct dc_context *ctx, - uint32_t inst) -{ - struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC); - - if (!dce_i2c_hw) - return NULL; - - dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, - &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); - - return dce_i2c_hw; -} - -static struct mpc *dcn201_mpc_create(struct dc_context *ctx, uint32_t num_mpcc) -{ - struct dcn201_mpc *mpc201 = kzalloc(sizeof(struct dcn201_mpc), - GFP_ATOMIC); - - if (!mpc201) - return NULL; - - dcn201_mpc_construct(mpc201, ctx, - &mpc_regs, - &mpc_shift, - &mpc_mask, - num_mpcc); - - return &mpc201->base; -} - -static struct hubbub *dcn201_hubbub_create(struct dc_context *ctx) -{ - struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), - GFP_ATOMIC); - - if (!hubbub) - return NULL; - - hubbub201_construct(hubbub, ctx, - &hubbub_reg, - &hubbub_shift, - &hubbub_mask); - - return &hubbub->base; -} - -static struct timing_generator *dcn201_timing_generator_create( - struct dc_context *ctx, - uint32_t instance) -{ - struct optc *tgn10 = - kzalloc(sizeof(struct optc), GFP_ATOMIC); - - if (!tgn10) - return NULL; - - tgn10->base.inst = instance; - tgn10->base.ctx = ctx; - - tgn10->tg_regs = &tg_regs[instance]; - tgn10->tg_shift = &tg_shift; - tgn10->tg_mask = &tg_mask; - - dcn201_timing_generator_init(tgn10); - - return &tgn10->base; -} - -static const struct encoder_feature_support link_enc_feature = { - .max_hdmi_deep_color = COLOR_DEPTH_121212, - .max_hdmi_pixel_clock = 600000, - .hdmi_ycbcr420_supported = true, - .dp_ycbcr420_supported = true, - .fec_supported = true, - .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_HBR3_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true, - .flags.bits.IS_TPS4_CAPABLE = true -}; - -static struct link_encoder *dcn201_link_encoder_create( - struct dc_context *ctx, - const struct encoder_init_data *enc_init_data) -{ - struct dcn20_link_encoder *enc20 = - kzalloc(sizeof(struct dcn20_link_encoder), GFP_ATOMIC); - struct dcn10_link_encoder *enc10 = &enc20->enc10; - - if (!enc20) - return NULL; - - dcn201_link_encoder_construct(enc20, - enc_init_data, - &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], - &link_enc_aux_regs[enc_init_data->channel - 1], - &link_enc_hpd_regs[enc_init_data->hpd_source], - &le_shift, - &le_mask); - - return &enc10->base; -} - -static struct clock_source *dcn201_clock_source_create( - struct dc_context *ctx, - struct dc_bios *bios, - enum clock_source_id id, - const struct dce110_clk_src_regs *regs, - bool dp_clk_src) -{ - struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC); - - if (!clk_src) - return NULL; - - if (dce112_clk_src_construct(clk_src, ctx, bios, id, - regs, &cs_shift, &cs_mask)) { - clk_src->base.dp_clk_src = dp_clk_src; - return &clk_src->base; - } - kfree(clk_src); - return NULL; -} - -static void read_dce_straps( - struct dc_context *ctx, - struct resource_straps *straps) -{ - generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), - - FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); -} - -static struct audio *dcn201_create_audio( - struct dc_context *ctx, unsigned int inst) -{ - return dce_audio_create(ctx, inst, - &audio_regs[inst], &audio_shift, &audio_mask); -} - -static struct stream_encoder *dcn201_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dcn10_stream_encoder *enc1 = - kzalloc(sizeof(struct dcn10_stream_encoder), GFP_ATOMIC); - - if (!enc1) - return NULL; - - dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, - &stream_enc_regs[eng_id], - &se_shift, &se_mask); - - return &enc1->base; -} - -static const struct dce_hwseq_registers hwseq_reg = { - HWSEQ_DCN201_REG_LIST() -}; - -static const struct dce_hwseq_shift hwseq_shift = { - HWSEQ_DCN201_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_hwseq_mask hwseq_mask = { - HWSEQ_DCN201_MASK_SH_LIST(_MASK) -}; - -static struct dce_hwseq *dcn201_hwseq_create( - struct dc_context *ctx) -{ - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_ATOMIC); - - if (hws) { - hws->ctx = ctx; - hws->regs = &hwseq_reg; - hws->shifts = &hwseq_shift; - hws->masks = &hwseq_mask; - } - return hws; -} - -static const struct resource_create_funcs res_create_funcs = { - .read_dce_straps = read_dce_straps, - .create_audio = dcn201_create_audio, - .create_stream_encoder = dcn201_stream_encoder_create, - .create_hwseq = dcn201_hwseq_create, -}; - -static void dcn201_clock_source_destroy(struct clock_source **clk_src) -{ - kfree(TO_DCE110_CLK_SRC(*clk_src)); - *clk_src = NULL; -} - -static void dcn201_resource_destruct(struct dcn201_resource_pool *pool) -{ - unsigned int i; - - for (i = 0; i < pool->base.stream_enc_count; i++) { - if (pool->base.stream_enc[i] != NULL) { - kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); - pool->base.stream_enc[i] = NULL; - } - } - - - if (pool->base.mpc != NULL) { - kfree(TO_DCN201_MPC(pool->base.mpc)); - pool->base.mpc = NULL; - } - - if (pool->base.hubbub != NULL) { - kfree(pool->base.hubbub); - pool->base.hubbub = NULL; - } - - for (i = 0; i < pool->base.pipe_count; i++) { - if (pool->base.dpps[i] != NULL) - dcn201_dpp_destroy(&pool->base.dpps[i]); - - if (pool->base.ipps[i] != NULL) - pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); - - if (pool->base.hubps[i] != NULL) { - kfree(TO_DCN10_HUBP(pool->base.hubps[i])); - pool->base.hubps[i] = NULL; - } - - if (pool->base.irqs != NULL) { - dal_irq_service_destroy(&pool->base.irqs); - } - } - - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - if (pool->base.opps[i] != NULL) - pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - if (pool->base.timing_generators[i] != NULL) { - kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); - pool->base.timing_generators[i] = NULL; - } - } - for (i = 0; i < pool->base.audio_count; i++) { - if (pool->base.audios[i]) - dce_aud_destroy(&pool->base.audios[i]); - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] != NULL) { - dcn201_clock_source_destroy(&pool->base.clock_sources[i]); - pool->base.clock_sources[i] = NULL; - } - } - - if (pool->base.dp_clock_source != NULL) { - dcn201_clock_source_destroy(&pool->base.dp_clock_source); - pool->base.dp_clock_source = NULL; - } - - if (pool->base.dccg != NULL) - dcn_dccg_destroy(&pool->base.dccg); -} - -static struct hubp *dcn201_hubp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn201_hubp *hubp201 = - kzalloc(sizeof(struct dcn201_hubp), GFP_ATOMIC); - - if (!hubp201) - return NULL; - - if (dcn201_hubp_construct(hubp201, ctx, inst, - &hubp_regs[inst], &hubp_shift, &hubp_mask)) - return &hubp201->base; - - kfree(hubp201); - return NULL; -} - -static struct pipe_ctx *dcn201_acquire_free_pipe_for_layer( - const struct dc_state *cur_ctx, - struct dc_state *new_ctx, - const struct resource_pool *pool, - const struct pipe_ctx *opp_head_pipe) -{ - struct resource_context *res_ctx = &new_ctx->res_ctx; - struct pipe_ctx *head_pipe = resource_get_otg_master_for_stream(res_ctx, opp_head_pipe->stream); - struct pipe_ctx *idle_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, head_pipe); - - if (!head_pipe) - ASSERT(0); - - if (!idle_pipe) - return NULL; - - idle_pipe->stream = head_pipe->stream; - idle_pipe->stream_res.tg = head_pipe->stream_res.tg; - idle_pipe->stream_res.opp = head_pipe->stream_res.opp; - - idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; - idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx]; - idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx]; - idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst; - - return idle_pipe; -} - -static bool dcn201_get_dcc_compression_cap(const struct dc *dc, - const struct dc_dcc_surface_param *input, - struct dc_surface_dcc_cap *output) -{ - return dc->res_pool->hubbub->funcs->get_dcc_compression_cap( - dc->res_pool->hubbub, - input, - output); -} - -static void dcn201_populate_dml_writeback_from_context(struct dc *dc, - struct resource_context *res_ctx, - display_e2e_pipe_params_st *pipes) -{ - DC_FP_START(); - dcn201_populate_dml_writeback_from_context_fpu(dc, res_ctx, pipes); - DC_FP_END(); -} - -static void dcn201_destroy_resource_pool(struct resource_pool **pool) -{ - struct dcn201_resource_pool *dcn201_pool = TO_DCN201_RES_POOL(*pool); - - dcn201_resource_destruct(dcn201_pool); - kfree(dcn201_pool); - *pool = NULL; -} - -static void dcn201_link_init(struct dc_link *link) -{ - if (link->ctx->dc_bios->integrated_info) - link->dp_ss_off = !link->ctx->dc_bios->integrated_info->dp_ss_control; -} - -static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn201_get_dcc_compression_cap, -}; - -static struct resource_funcs dcn201_res_pool_funcs = { - .link_init = dcn201_link_init, - .destroy = dcn201_destroy_resource_pool, - .link_enc_create = dcn201_link_encoder_create, - .panel_cntl_create = NULL, - .validate_bandwidth = dcn20_validate_bandwidth, - .populate_dml_pipes = dcn20_populate_dml_pipes_from_context, - .add_stream_to_ctx = dcn20_add_stream_to_ctx, - .add_dsc_to_stream_resource = NULL, - .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, - .acquire_free_pipe_as_secondary_dpp_pipe = dcn201_acquire_free_pipe_for_layer, - .release_pipe = dcn20_release_pipe, - .populate_dml_writeback_from_context = dcn201_populate_dml_writeback_from_context, - .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, - .set_mcif_arb_params = dcn20_set_mcif_arb_params, - .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link -}; - -static bool dcn201_resource_construct( - uint8_t num_virtual_links, - struct dc *dc, - struct dcn201_resource_pool *pool) -{ - int i; - struct dc_context *ctx = dc->ctx; - - ctx->dc_bios->regs = &bios_regs; - - pool->base.res_cap = &res_cap_dnc201; - pool->base.funcs = &dcn201_res_pool_funcs; - - /************************************************* - * Resource + asic cap harcoding * - *************************************************/ - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - - pool->base.pipe_count = 4; - pool->base.mpcc_count = 5; - dc->caps.max_downscale_ratio = 200; - dc->caps.i2c_speed_in_khz = 100; - dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.5 w/a applied by default*/ - dc->caps.max_cursor_size = 256; - dc->caps.min_horizontal_blanking_period = 80; - dc->caps.dmdata_alloc_size = 2048; - - dc->caps.max_slave_planes = 1; - dc->caps.max_slave_yuv_planes = 1; - dc->caps.max_slave_rgb_planes = 1; - dc->caps.post_blend_color_processing = true; - dc->caps.force_dp_tps4_for_cp2520 = true; - dc->caps.extended_aux_timeout_support = true; - - /* Color pipeline capabilities */ - dc->caps.color.dpp.dcn_arch = 1; - dc->caps.color.dpp.input_lut_shared = 0; - dc->caps.color.dpp.icsc = 1; - dc->caps.color.dpp.dgam_ram = 1; - dc->caps.color.dpp.dgam_rom_caps.srgb = 1; - dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; - dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0; - dc->caps.color.dpp.dgam_rom_caps.pq = 0; - dc->caps.color.dpp.dgam_rom_caps.hlg = 0; - dc->caps.color.dpp.post_csc = 0; - dc->caps.color.dpp.gamma_corr = 0; - dc->caps.color.dpp.dgam_rom_for_yuv = 1; - - dc->caps.color.dpp.hw_3d_lut = 1; - dc->caps.color.dpp.ogam_ram = 1; - // no OGAM ROM on DCN2 - dc->caps.color.dpp.ogam_rom_caps.srgb = 0; - dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; - dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.dpp.ogam_rom_caps.pq = 0; - dc->caps.color.dpp.ogam_rom_caps.hlg = 0; - dc->caps.color.dpp.ocsc = 0; - - dc->caps.color.mpc.gamut_remap = 0; - dc->caps.color.mpc.num_3dluts = 0; - dc->caps.color.mpc.shared_3d_lut = 0; - dc->caps.color.mpc.ogam_ram = 1; - dc->caps.color.mpc.ogam_rom_caps.srgb = 0; - dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; - dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.mpc.ogam_rom_caps.pq = 0; - dc->caps.color.mpc.ogam_rom_caps.hlg = 0; - dc->caps.color.mpc.ocsc = 1; - - dc->debug = debug_defaults_drv; - - /*a0 only, remove later*/ - dc->work_arounds.no_connect_phy_config = true; - dc->work_arounds.dedcn20_305_wa = true; - /************************************************* - * Create resources * - *************************************************/ - - pool->base.clock_sources[DCN20_CLK_SRC_PLL0] = - dcn201_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL0, - &clk_src_regs[0], false); - pool->base.clock_sources[DCN20_CLK_SRC_PLL1] = - dcn201_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL1, - &clk_src_regs[1], false); - - pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN201; - - /* todo: not reuse phy_pll registers */ - pool->base.dp_clock_source = - dcn201_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_ID_DP_DTO, - &clk_src_regs[0], true); - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - goto create_fail; - } - } - - pool->base.dccg = dccg201_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); - if (pool->base.dccg == NULL) { - dm_error("DC: failed to create dccg!\n"); - goto create_fail; - } - - dcn201_ip.max_num_otg = pool->base.res_cap->num_timing_generator; - dcn201_ip.max_num_dpp = pool->base.pipe_count; - dml_init_instance(&dc->dml, &dcn201_soc, &dcn201_ip, DML_PROJECT_DCN201); - { - struct irq_service_init_data init_data; - init_data.ctx = dc->ctx; - pool->base.irqs = dal_irq_service_dcn201_create(&init_data); - if (!pool->base.irqs) - goto create_fail; - } - - /* mem input -> ipp -> dpp -> opp -> TG */ - for (i = 0; i < pool->base.pipe_count; i++) { - pool->base.hubps[i] = dcn201_hubp_create(ctx, i); - if (pool->base.hubps[i] == NULL) { - dm_error( - "DC: failed to create memory input!\n"); - goto create_fail; - } - - pool->base.ipps[i] = dcn201_ipp_create(ctx, i); - if (pool->base.ipps[i] == NULL) { - dm_error( - "DC: failed to create input pixel processor!\n"); - goto create_fail; - } - - pool->base.dpps[i] = dcn201_dpp_create(ctx, i); - if (pool->base.dpps[i] == NULL) { - dm_error( - "DC: failed to create dpps!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - pool->base.opps[i] = dcn201_opp_create(ctx, i); - if (pool->base.opps[i] == NULL) { - dm_error( - "DC: failed to create output pixel processor!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - pool->base.engines[i] = dcn201_aux_engine_create(ctx, i); - if (pool->base.engines[i] == NULL) { - dm_error( - "DC:failed to create aux engine!!\n"); - goto create_fail; - } - pool->base.hw_i2cs[i] = dcn201_i2c_hw_create(ctx, i); - if (pool->base.hw_i2cs[i] == NULL) { - dm_error( - "DC:failed to create hw i2c!!\n"); - goto create_fail; - } - pool->base.sw_i2cs[i] = NULL; - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - pool->base.timing_generators[i] = dcn201_timing_generator_create( - ctx, i); - if (pool->base.timing_generators[i] == NULL) { - dm_error("DC: failed to create tg!\n"); - goto create_fail; - } - } - - pool->base.timing_generator_count = i; - - pool->base.mpc = dcn201_mpc_create(ctx, pool->base.mpcc_count); - if (pool->base.mpc == NULL) { - dm_error("DC: failed to create mpc!\n"); - goto create_fail; - } - - pool->base.hubbub = dcn201_hubbub_create(ctx); - if (pool->base.hubbub == NULL) { - dm_error("DC: failed to create hubbub!\n"); - goto create_fail; - } - - if (!resource_construct(num_virtual_links, dc, &pool->base, - &res_create_funcs)) - goto create_fail; - - dcn201_hw_sequencer_construct(dc); - - dc->caps.max_planes = pool->base.pipe_count; - - for (i = 0; i < dc->caps.max_planes; ++i) - dc->caps.planes[i] = plane_cap; - - dc->cap_funcs = cap_funcs; - - return true; - -create_fail: - - dcn201_resource_destruct(pool); - - return false; -} - -struct resource_pool *dcn201_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc) -{ - struct dcn201_resource_pool *pool = - kzalloc(sizeof(struct dcn201_resource_pool), GFP_ATOMIC); - - if (!pool) - return NULL; - - if (dcn201_resource_construct(init_data->num_virtual_links, dc, pool)) - return &pool->base; - - kfree(pool); - return NULL; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h deleted file mode 100644 index e0467d17d4..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h +++ /dev/null @@ -1,50 +0,0 @@ -/* -* Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_RESOURCE_DCN201_H__ -#define __DC_RESOURCE_DCN201_H__ - -#include "core_types.h" - -#define RRDPCS_PHY_DP_TX_PSTATE_POWER_UP 0x00000000 -#define RRDPCS_PHY_DP_TX_PSTATE_HOLD 0x00000001 -#define RRDPCS_PHY_DP_TX_PSTATE_HOLD_OFF 0x00000002 -#define RRDPCS_PHY_DP_TX_PSTATE_POWER_DOWN 0x00000003 - -#define TO_DCN201_RES_POOL(pool)\ - container_of(pool, struct dcn201_resource_pool, base) - -struct dc; -struct resource_pool; -struct _vcs_dpi_display_pipe_params_st; - -struct dcn201_resource_pool { - struct resource_pool base; -}; -struct resource_pool *dcn201_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc); - -#endif /* __DC_RESOURCE_DCN201_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile index ce1be0afae..ca92f5c8e7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile @@ -2,7 +2,7 @@ # # Makefile for DCN21. -DCN21 = dcn21_init.o dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o \ +DCN21 = dcn21_hubp.o dcn21_hubbub.o \ dcn21_link_encoder.o dcn21_dccg.o AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c deleted file mode 100644 index 18249c6b6d..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Copyright 2016-2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dce110/dce110_hwseq.h" -#include "dcn10/dcn10_hwseq.h" -#include "dcn20/dcn20_hwseq.h" -#include "dcn21/dcn21_hwseq.h" - -#include "dcn21_init.h" - -static const struct hw_sequencer_funcs dcn21_funcs = { - .program_gamut_remap = dcn10_program_gamut_remap, - .init_hw = dcn10_init_hw, - .power_down_on_boot = dcn10_power_down_on_boot, - .apply_ctx_to_hw = dce110_apply_ctx_to_hw, - .apply_ctx_for_surface = NULL, - .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, - .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, - .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, - .update_plane_addr = dcn20_update_plane_addr, - .update_dchub = dcn10_update_dchub, - .update_pending_status = dcn10_update_pending_status, - .program_output_csc = dcn20_program_output_csc, - .enable_accelerated_mode = dce110_enable_accelerated_mode, - .enable_timing_synchronization = dcn10_enable_timing_synchronization, - .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, - .update_info_frame = dce110_update_info_frame, - .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, - .enable_stream = dcn20_enable_stream, - .disable_stream = dce110_disable_stream, - .unblank_stream = dcn20_unblank_stream, - .blank_stream = dce110_blank_stream, - .enable_audio_stream = dce110_enable_audio_stream, - .disable_audio_stream = dce110_disable_audio_stream, - .disable_plane = dcn20_disable_plane, - .pipe_control_lock = dcn20_pipe_control_lock, - .interdependent_update_lock = dcn10_lock_all_pipes, - .cursor_lock = dcn10_cursor_lock, - .prepare_bandwidth = dcn20_prepare_bandwidth, - .optimize_bandwidth = dcn20_optimize_bandwidth, - .update_bandwidth = dcn20_update_bandwidth, - .set_drr = dcn10_set_drr, - .get_position = dcn10_get_position, - .set_static_screen_control = dcn10_set_static_screen_control, - .setup_stereo = dcn10_setup_stereo, - .set_avmute = dce110_set_avmute, - .log_hw_state = dcn10_log_hw_state, - .get_hw_state = dcn10_get_hw_state, - .clear_status_bits = dcn10_clear_status_bits, - .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, - .edp_backlight_control = dce110_edp_backlight_control, - .edp_power_control = dce110_edp_power_control, - .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, - .set_cursor_position = dcn10_set_cursor_position, - .set_cursor_attribute = dcn10_set_cursor_attribute, - .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, - .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, - .set_clock = dcn10_set_clock, - .get_clock = dcn10_get_clock, - .program_triplebuffer = dcn20_program_triple_buffer, - .enable_writeback = dcn20_enable_writeback, - .disable_writeback = dcn20_disable_writeback, - .dmdata_status_done = dcn20_dmdata_status_done, - .program_dmdata_engine = dcn20_program_dmdata_engine, - .set_dmdata_attributes = dcn20_set_dmdata_attributes, - .init_sys_ctx = dcn21_init_sys_ctx, - .init_vm_ctx = dcn20_init_vm_ctx, - .set_flip_control_gsl = dcn20_set_flip_control_gsl, - .optimize_pwr_state = dcn21_optimize_pwr_state, - .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, - .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, - .calc_vupdate_position = dcn10_calc_vupdate_position, - .power_down = dce110_power_down, - .set_backlight_level = dcn21_set_backlight_level, - .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, - .set_pipe = dcn21_set_pipe, - .enable_lvds_link_output = dce110_enable_lvds_link_output, - .enable_tmds_link_output = dce110_enable_tmds_link_output, - .enable_dp_link_output = dce110_enable_dp_link_output, - .disable_link_output = dce110_disable_link_output, - .is_abm_supported = dcn21_is_abm_supported, - .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, - .get_dcc_en_bits = dcn10_get_dcc_en_bits, - .update_visual_confirm_color = dcn10_update_visual_confirm_color, -}; - -static const struct hwseq_private_funcs dcn21_private_funcs = { - .init_pipes = dcn10_init_pipes, - .update_plane_addr = dcn20_update_plane_addr, - .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, - .update_mpcc = dcn20_update_mpcc, - .set_input_transfer_func = dcn20_set_input_transfer_func, - .set_output_transfer_func = dcn20_set_output_transfer_func, - .power_down = dce110_power_down, - .enable_display_power_gating = dcn10_dummy_display_power_gating, - .blank_pixel_data = dcn20_blank_pixel_data, - .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, - .enable_stream_timing = dcn20_enable_stream_timing, - .edp_backlight_control = dce110_edp_backlight_control, - .disable_stream_gating = dcn20_disable_stream_gating, - .enable_stream_gating = dcn20_enable_stream_gating, - .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, - .did_underflow_occur = dcn10_did_underflow_occur, - .init_blank = dcn20_init_blank, - .disable_vga = dcn20_disable_vga, - .bios_golden_init = dcn10_bios_golden_init, - .plane_atomic_disable = dcn20_plane_atomic_disable, - .plane_atomic_power_down = dcn10_plane_atomic_power_down, - .enable_power_gating_plane = dcn20_enable_power_gating_plane, - .dpp_pg_control = dcn20_dpp_pg_control, - .hubp_pg_control = dcn20_hubp_pg_control, - .update_odm = dcn20_update_odm, - .dsc_pg_control = dcn20_dsc_pg_control, - .set_hdr_multiplier = dcn10_set_hdr_multiplier, - .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, - .s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa, - .wait_for_blank_complete = dcn20_wait_for_blank_complete, - .dccg_init = dcn20_dccg_init, - .set_blend_lut = dcn20_set_blend_lut, - .set_shaper_3dlut = dcn20_set_shaper_3dlut, - .PLAT_58856_wa = dcn21_PLAT_58856_wa, -}; - -void dcn21_hw_sequencer_construct(struct dc *dc) -{ - dc->hwss = dcn21_funcs; - dc->hwseq->funcs = dcn21_private_funcs; - -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h deleted file mode 100644 index 3ed2429264..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_DCN21_INIT_H__ -#define __DC_DCN21_INIT_H__ - -struct dc; - -void dcn21_hw_sequencer_construct(struct dc *dc); - -#endif /* __DC_DCN20_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c deleted file mode 100644 index 42277b2805..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ /dev/null @@ -1,1745 +0,0 @@ -/* -* Copyright 2018 Advanced Micro Devices, Inc. - * Copyright 2019 Raptor Engineering, LLC - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include - -#include "dm_services.h" -#include "dc.h" - -#include "dcn21_init.h" - -#include "resource.h" -#include "include/irq_service_interface.h" -#include "dcn20/dcn20_resource.h" -#include "dcn21/dcn21_resource.h" - -#include "dml/dcn20/dcn20_fpu.h" - -#include "clk_mgr.h" -#include "dcn10/dcn10_hubp.h" -#include "dcn10/dcn10_ipp.h" -#include "dcn20/dcn20_hubbub.h" -#include "dcn20/dcn20_mpc.h" -#include "dcn20/dcn20_hubp.h" -#include "dcn21_hubp.h" -#include "irq/dcn21/irq_service_dcn21.h" -#include "dcn20/dcn20_dpp.h" -#include "dcn20/dcn20_optc.h" -#include "dcn21/dcn21_hwseq.h" -#include "dce110/dce110_hwseq.h" -#include "dcn20/dcn20_opp.h" -#include "dcn20/dcn20_dsc.h" -#include "dcn21/dcn21_link_encoder.h" -#include "dcn20/dcn20_stream_encoder.h" -#include "dce/dce_clock_source.h" -#include "dce/dce_audio.h" -#include "dce/dce_hwseq.h" -#include "virtual/virtual_stream_encoder.h" -#include "dml/display_mode_vba.h" -#include "dcn20/dcn20_dccg.h" -#include "dcn21/dcn21_dccg.h" -#include "dcn21_hubbub.h" -#include "dcn10/dcn10_resource.h" -#include "dce/dce_panel_cntl.h" - -#include "dcn20/dcn20_dwb.h" -#include "dcn20/dcn20_mmhubbub.h" -#include "dpcs/dpcs_2_1_0_offset.h" -#include "dpcs/dpcs_2_1_0_sh_mask.h" - -#include "renoir_ip_offset.h" -#include "dcn/dcn_2_1_0_offset.h" -#include "dcn/dcn_2_1_0_sh_mask.h" - -#include "nbio/nbio_7_0_offset.h" - -#include "mmhub/mmhub_2_0_0_offset.h" -#include "mmhub/mmhub_2_0_0_sh_mask.h" - -#include "reg_helper.h" -#include "dce/dce_abm.h" -#include "dce/dce_dmcu.h" -#include "dce/dce_aux.h" -#include "dce/dce_i2c.h" -#include "dcn21_resource.h" -#include "vm_helper.h" -#include "dcn20/dcn20_vmid.h" -#include "dce/dmub_psr.h" -#include "dce/dmub_abm.h" - -/* begin ********************* - * macros to expend register list macro defined in HW object header file */ - -/* DCN */ -#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg - -#define BASE(seg) BASE_INNER(seg) - -#define SR(reg_name)\ - .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -#define SRI(reg_name, block, id)\ - .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define SRIR(var_name, reg_name, block, id)\ - .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define SRII(reg_name, block, id)\ - .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define DCCG_SRII(reg_name, block, id)\ - .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define VUPDATE_SRII(reg_name, block, id)\ - .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ - mm ## reg_name ## _ ## block ## id - -/* NBIO */ -#define NBIO_BASE_INNER(seg) \ - NBIF0_BASE__INST0_SEG ## seg - -#define NBIO_BASE(seg) \ - NBIO_BASE_INNER(seg) - -#define NBIO_SR(reg_name)\ - .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -/* MMHUB */ -#define MMHUB_BASE_INNER(seg) \ - MMHUB_BASE__INST0_SEG ## seg - -#define MMHUB_BASE(seg) \ - MMHUB_BASE_INNER(seg) - -#define MMHUB_SR(reg_name)\ - .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \ - mmMM ## reg_name - -#define clk_src_regs(index, pllid)\ -[index] = {\ - CS_COMMON_REG_LIST_DCN2_1(index, pllid),\ -} - -static const struct dce110_clk_src_regs clk_src_regs[] = { - clk_src_regs(0, A), - clk_src_regs(1, B), - clk_src_regs(2, C), - clk_src_regs(3, D), - clk_src_regs(4, E), -}; - -static const struct dce110_clk_src_shift cs_shift = { - CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) -}; - -static const struct dce110_clk_src_mask cs_mask = { - CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) -}; - -static const struct bios_registers bios_regs = { - NBIO_SR(BIOS_SCRATCH_3), - NBIO_SR(BIOS_SCRATCH_6) -}; - -static const struct dce_dmcu_registers dmcu_regs = { - DMCU_DCN20_REG_LIST() -}; - -static const struct dce_dmcu_shift dmcu_shift = { - DMCU_MASK_SH_LIST_DCN10(__SHIFT) -}; - -static const struct dce_dmcu_mask dmcu_mask = { - DMCU_MASK_SH_LIST_DCN10(_MASK) -}; - -static const struct dce_abm_registers abm_regs = { - ABM_DCN20_REG_LIST() -}; - -static const struct dce_abm_shift abm_shift = { - ABM_MASK_SH_LIST_DCN20(__SHIFT) -}; - -static const struct dce_abm_mask abm_mask = { - ABM_MASK_SH_LIST_DCN20(_MASK) -}; - -#define audio_regs(id)\ -[id] = {\ - AUD_COMMON_REG_LIST(id)\ -} - -static const struct dce_audio_registers audio_regs[] = { - audio_regs(0), - audio_regs(1), - audio_regs(2), - audio_regs(3), - audio_regs(4), - audio_regs(5), -}; - -#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ - AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) - -static const struct dce_audio_shift audio_shift = { - DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_audio_mask audio_mask = { - DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) -}; - -static const struct dccg_registers dccg_regs = { - DCCG_COMMON_REG_LIST_DCN_BASE() -}; - -static const struct dccg_shift dccg_shift = { - DCCG_MASK_SH_LIST_DCN2_1(__SHIFT) -}; - -static const struct dccg_mask dccg_mask = { - DCCG_MASK_SH_LIST_DCN2_1(_MASK) -}; - -#define opp_regs(id)\ -[id] = {\ - OPP_REG_LIST_DCN20(id),\ -} - -static const struct dcn20_opp_registers opp_regs[] = { - opp_regs(0), - opp_regs(1), - opp_regs(2), - opp_regs(3), - opp_regs(4), - opp_regs(5), -}; - -static const struct dcn20_opp_shift opp_shift = { - OPP_MASK_SH_LIST_DCN20(__SHIFT) -}; - -static const struct dcn20_opp_mask opp_mask = { - OPP_MASK_SH_LIST_DCN20(_MASK) -}; - -#define tg_regs(id)\ -[id] = {TG_COMMON_REG_LIST_DCN2_0(id)} - -static const struct dcn_optc_registers tg_regs[] = { - tg_regs(0), - tg_regs(1), - tg_regs(2), - tg_regs(3) -}; - -static const struct dcn_optc_shift tg_shift = { - TG_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) -}; - -static const struct dcn_optc_mask tg_mask = { - TG_COMMON_MASK_SH_LIST_DCN2_0(_MASK) -}; - -static const struct dcn20_mpc_registers mpc_regs = { - MPC_REG_LIST_DCN2_0(0), - MPC_REG_LIST_DCN2_0(1), - MPC_REG_LIST_DCN2_0(2), - MPC_REG_LIST_DCN2_0(3), - MPC_REG_LIST_DCN2_0(4), - MPC_REG_LIST_DCN2_0(5), - MPC_OUT_MUX_REG_LIST_DCN2_0(0), - MPC_OUT_MUX_REG_LIST_DCN2_0(1), - MPC_OUT_MUX_REG_LIST_DCN2_0(2), - MPC_OUT_MUX_REG_LIST_DCN2_0(3), - MPC_DBG_REG_LIST_DCN2_0() -}; - -static const struct dcn20_mpc_shift mpc_shift = { - MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT), - MPC_DEBUG_REG_LIST_SH_DCN20 -}; - -static const struct dcn20_mpc_mask mpc_mask = { - MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK), - MPC_DEBUG_REG_LIST_MASK_DCN20 -}; - -#define hubp_regs(id)\ -[id] = {\ - HUBP_REG_LIST_DCN21(id)\ -} - -static const struct dcn_hubp2_registers hubp_regs[] = { - hubp_regs(0), - hubp_regs(1), - hubp_regs(2), - hubp_regs(3) -}; - -static const struct dcn_hubp2_shift hubp_shift = { - HUBP_MASK_SH_LIST_DCN21(__SHIFT) -}; - -static const struct dcn_hubp2_mask hubp_mask = { - HUBP_MASK_SH_LIST_DCN21(_MASK) -}; - -static const struct dcn_hubbub_registers hubbub_reg = { - HUBBUB_REG_LIST_DCN21() -}; - -static const struct dcn_hubbub_shift hubbub_shift = { - HUBBUB_MASK_SH_LIST_DCN21(__SHIFT) -}; - -static const struct dcn_hubbub_mask hubbub_mask = { - HUBBUB_MASK_SH_LIST_DCN21(_MASK) -}; - - -#define vmid_regs(id)\ -[id] = {\ - DCN20_VMID_REG_LIST(id)\ -} - -static const struct dcn_vmid_registers vmid_regs[] = { - vmid_regs(0), - vmid_regs(1), - vmid_regs(2), - vmid_regs(3), - vmid_regs(4), - vmid_regs(5), - vmid_regs(6), - vmid_regs(7), - vmid_regs(8), - vmid_regs(9), - vmid_regs(10), - vmid_regs(11), - vmid_regs(12), - vmid_regs(13), - vmid_regs(14), - vmid_regs(15) -}; - -static const struct dcn20_vmid_shift vmid_shifts = { - DCN20_VMID_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn20_vmid_mask vmid_masks = { - DCN20_VMID_MASK_SH_LIST(_MASK) -}; - -#define dsc_regsDCN20(id)\ -[id] = {\ - DSC_REG_LIST_DCN20(id)\ -} - -static const struct dcn20_dsc_registers dsc_regs[] = { - dsc_regsDCN20(0), - dsc_regsDCN20(1), - dsc_regsDCN20(2), - dsc_regsDCN20(3), - dsc_regsDCN20(4), - dsc_regsDCN20(5) -}; - -static const struct dcn20_dsc_shift dsc_shift = { - DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) -}; - -static const struct dcn20_dsc_mask dsc_mask = { - DSC_REG_LIST_SH_MASK_DCN20(_MASK) -}; - -#define ipp_regs(id)\ -[id] = {\ - IPP_REG_LIST_DCN20(id),\ -} - -static const struct dcn10_ipp_registers ipp_regs[] = { - ipp_regs(0), - ipp_regs(1), - ipp_regs(2), - ipp_regs(3), -}; - -static const struct dcn10_ipp_shift ipp_shift = { - IPP_MASK_SH_LIST_DCN20(__SHIFT) -}; - -static const struct dcn10_ipp_mask ipp_mask = { - IPP_MASK_SH_LIST_DCN20(_MASK), -}; - -#define opp_regs(id)\ -[id] = {\ - OPP_REG_LIST_DCN20(id),\ -} - - -#define aux_engine_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST0(id), \ - .AUXN_IMPCAL = 0, \ - .AUXP_IMPCAL = 0, \ - .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ -} - -static const struct dce110_aux_registers aux_engine_regs[] = { - aux_engine_regs(0), - aux_engine_regs(1), - aux_engine_regs(2), - aux_engine_regs(3), - aux_engine_regs(4), -}; - -#define tf_regs(id)\ -[id] = {\ - TF_REG_LIST_DCN20(id),\ - TF_REG_LIST_DCN20_COMMON_APPEND(id),\ -} - -static const struct dcn2_dpp_registers tf_regs[] = { - tf_regs(0), - tf_regs(1), - tf_regs(2), - tf_regs(3), -}; - -static const struct dcn2_dpp_shift tf_shift = { - TF_REG_LIST_SH_MASK_DCN20(__SHIFT), - TF_DEBUG_REG_LIST_SH_DCN20 -}; - -static const struct dcn2_dpp_mask tf_mask = { - TF_REG_LIST_SH_MASK_DCN20(_MASK), - TF_DEBUG_REG_LIST_MASK_DCN20 -}; - -#define stream_enc_regs(id)\ -[id] = {\ - SE_DCN2_REG_LIST(id)\ -} - -static const struct dcn10_stream_enc_registers stream_enc_regs[] = { - stream_enc_regs(0), - stream_enc_regs(1), - stream_enc_regs(2), - stream_enc_regs(3), - stream_enc_regs(4), -}; - -static const struct dce110_aux_registers_shift aux_shift = { - DCN_AUX_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce110_aux_registers_mask aux_mask = { - DCN_AUX_MASK_SH_LIST(_MASK) -}; - -static const struct dcn10_stream_encoder_shift se_shift = { - SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT) -}; - -static const struct dcn10_stream_encoder_mask se_mask = { - SE_COMMON_MASK_SH_LIST_DCN20(_MASK) -}; - -static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu); - -static struct input_pixel_processor *dcn21_ipp_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn10_ipp *ipp = - kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL); - - if (!ipp) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dcn20_ipp_construct(ipp, ctx, inst, - &ipp_regs[inst], &ipp_shift, &ipp_mask); - return &ipp->base; -} - -static struct dpp *dcn21_dpp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn20_dpp *dpp = - kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL); - - if (!dpp) - return NULL; - - if (dpp2_construct(dpp, ctx, inst, - &tf_regs[inst], &tf_shift, &tf_mask)) - return &dpp->base; - - BREAK_TO_DEBUGGER(); - kfree(dpp); - return NULL; -} - -static struct dce_aux *dcn21_aux_engine_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); - - if (!aux_engine) - return NULL; - - dce110_aux_engine_construct(aux_engine, ctx, inst, - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst], - &aux_mask, - &aux_shift, - ctx->dc->caps.extended_aux_timeout_support); - - return &aux_engine->base; -} - -#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } - -static const struct dce_i2c_registers i2c_hw_regs[] = { - i2c_inst_regs(1), - i2c_inst_regs(2), - i2c_inst_regs(3), - i2c_inst_regs(4), - i2c_inst_regs(5), -}; - -static const struct dce_i2c_shift i2c_shifts = { - I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT) -}; - -static const struct dce_i2c_mask i2c_masks = { - I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) -}; - -static struct dce_i2c_hw *dcn21_i2c_hw_create(struct dc_context *ctx, - uint32_t inst) -{ - struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); - - if (!dce_i2c_hw) - return NULL; - - dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, - &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); - - return dce_i2c_hw; -} - -static const struct resource_caps res_cap_rn = { - .num_timing_generator = 4, - .num_opp = 4, - .num_video_plane = 4, - .num_audio = 4, // 4 audio endpoints. 4 audio streams - .num_stream_encoder = 5, - .num_pll = 5, // maybe 3 because the last two used for USB-c - .num_dwb = 1, - .num_ddc = 5, - .num_vmid = 16, - .num_dsc = 3, -}; - -#ifdef DIAGS_BUILD -static const struct resource_caps res_cap_rn_FPGA_4pipe = { - .num_timing_generator = 4, - .num_opp = 4, - .num_video_plane = 4, - .num_audio = 7, - .num_stream_encoder = 4, - .num_pll = 4, - .num_dwb = 1, - .num_ddc = 4, - .num_dsc = 0, -}; - -static const struct resource_caps res_cap_rn_FPGA_2pipe_dsc = { - .num_timing_generator = 2, - .num_opp = 2, - .num_video_plane = 2, - .num_audio = 7, - .num_stream_encoder = 2, - .num_pll = 4, - .num_dwb = 1, - .num_ddc = 4, - .num_dsc = 2, -}; -#endif - -static const struct dc_plane_cap plane_cap = { - .type = DC_PLANE_TYPE_DCN_UNIVERSAL, - .per_pixel_alpha = true, - - .pixel_format_support = { - .argb8888 = true, - .nv12 = true, - .fp16 = true, - .p010 = true - }, - - .max_upscale_factor = { - .argb8888 = 16000, - .nv12 = 16000, - .fp16 = 16000 - }, - - .max_downscale_factor = { - .argb8888 = 250, - .nv12 = 250, - .fp16 = 250 - }, - 64, - 64 -}; - -static const struct dc_debug_options debug_defaults_drv = { - .disable_dmcu = false, - .force_abm_enable = false, - .timing_trace = false, - .clock_trace = true, - .disable_pplib_clock_request = true, - .min_disp_clk_khz = 100000, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, - .force_single_disp_pipe_split = false, - .disable_dcc = DCC_ENABLE, - .vsr_support = true, - .performance_trace = false, - .max_downscale_src_width = 4096, - .disable_pplib_wm_range = false, - .scl_reset_length10 = true, - .sanity_checks = true, - .disable_48mhz_pwrdwn = false, - .usbc_combo_phy_reset_wa = true, - .dmub_command_table = true, - .use_max_lb = true, - .enable_legacy_fast_update = true, - .using_dml2 = false, -}; - -static const struct dc_panel_config panel_config_defaults = { - .psr = { - .disable_psr = false, - .disallow_psrsu = false, - .disallow_replay = false, - }, - .ilr = { - .optimize_edp_link_rate = true, - }, -}; - -enum dcn20_clk_src_array_id { - DCN20_CLK_SRC_PLL0, - DCN20_CLK_SRC_PLL1, - DCN20_CLK_SRC_PLL2, - DCN20_CLK_SRC_PLL3, - DCN20_CLK_SRC_PLL4, - DCN20_CLK_SRC_TOTAL_DCN21 -}; - -static void dcn21_resource_destruct(struct dcn21_resource_pool *pool) -{ - unsigned int i; - - for (i = 0; i < pool->base.stream_enc_count; i++) { - if (pool->base.stream_enc[i] != NULL) { - kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); - pool->base.stream_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_dsc; i++) { - if (pool->base.dscs[i] != NULL) - dcn20_dsc_destroy(&pool->base.dscs[i]); - } - - if (pool->base.mpc != NULL) { - kfree(TO_DCN20_MPC(pool->base.mpc)); - pool->base.mpc = NULL; - } - if (pool->base.hubbub != NULL) { - kfree(pool->base.hubbub); - pool->base.hubbub = NULL; - } - for (i = 0; i < pool->base.pipe_count; i++) { - if (pool->base.dpps[i] != NULL) - dcn20_dpp_destroy(&pool->base.dpps[i]); - - if (pool->base.ipps[i] != NULL) - pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); - - if (pool->base.hubps[i] != NULL) { - kfree(TO_DCN20_HUBP(pool->base.hubps[i])); - pool->base.hubps[i] = NULL; - } - - if (pool->base.irqs != NULL) { - dal_irq_service_destroy(&pool->base.irqs); - } - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - if (pool->base.engines[i] != NULL) - dce110_engine_destroy(&pool->base.engines[i]); - if (pool->base.hw_i2cs[i] != NULL) { - kfree(pool->base.hw_i2cs[i]); - pool->base.hw_i2cs[i] = NULL; - } - if (pool->base.sw_i2cs[i] != NULL) { - kfree(pool->base.sw_i2cs[i]); - pool->base.sw_i2cs[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - if (pool->base.opps[i] != NULL) - pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - if (pool->base.timing_generators[i] != NULL) { - kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); - pool->base.timing_generators[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_dwb; i++) { - if (pool->base.dwbc[i] != NULL) { - kfree(TO_DCN20_DWBC(pool->base.dwbc[i])); - pool->base.dwbc[i] = NULL; - } - if (pool->base.mcif_wb[i] != NULL) { - kfree(TO_DCN20_MMHUBBUB(pool->base.mcif_wb[i])); - pool->base.mcif_wb[i] = NULL; - } - } - - for (i = 0; i < pool->base.audio_count; i++) { - if (pool->base.audios[i]) - dce_aud_destroy(&pool->base.audios[i]); - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] != NULL) { - dcn20_clock_source_destroy(&pool->base.clock_sources[i]); - pool->base.clock_sources[i] = NULL; - } - } - - if (pool->base.dp_clock_source != NULL) { - dcn20_clock_source_destroy(&pool->base.dp_clock_source); - pool->base.dp_clock_source = NULL; - } - - if (pool->base.abm != NULL) { - if (pool->base.abm->ctx->dc->config.disable_dmcu) - dmub_abm_destroy(&pool->base.abm); - else - dce_abm_destroy(&pool->base.abm); - } - - if (pool->base.dmcu != NULL) - dce_dmcu_destroy(&pool->base.dmcu); - - if (pool->base.psr != NULL) - dmub_psr_destroy(&pool->base.psr); - - if (pool->base.dccg != NULL) - dcn_dccg_destroy(&pool->base.dccg); - - if (pool->base.pp_smu != NULL) - dcn21_pp_smu_destroy(&pool->base.pp_smu); -} - -bool dcn21_fast_validate_bw(struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int *pipe_cnt_out, - int *pipe_split_from, - int *vlevel_out, - bool fast_validate) -{ - bool out = false; - int split[MAX_PIPES] = { 0 }; - int pipe_cnt, i, pipe_idx, vlevel; - - ASSERT(pipes); - if (!pipes) - return false; - - dcn20_merge_pipes_for_validate(dc, context); - - DC_FP_START(); - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); - DC_FP_END(); - - *pipe_cnt_out = pipe_cnt; - - if (!pipe_cnt) { - out = true; - goto validate_out; - } - /* - * DML favors voltage over p-state, but we're more interested in - * supporting p-state over voltage. We can't support p-state in - * prefetch mode > 0 so try capping the prefetch mode to start. - */ - context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = - dm_allow_self_refresh_and_mclk_switch; - vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); - - if (vlevel > context->bw_ctx.dml.soc.num_states) { - /* - * If mode is unsupported or there's still no p-state support then - * fall back to favoring voltage. - * - * We don't actually support prefetch mode 2, so require that we - * at least support prefetch mode 1. - */ - context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = - dm_allow_self_refresh; - vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); - if (vlevel > context->bw_ctx.dml.soc.num_states) - goto validate_fail; - } - - vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL); - - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *mpo_pipe = pipe->bottom_pipe; - struct vba_vars_st *vba = &context->bw_ctx.dml.vba; - - if (!pipe->stream) - continue; - - /* We only support full screen mpo with ODM */ - if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled - && pipe->plane_state && mpo_pipe - && memcmp(&mpo_pipe->plane_state->clip_rect, - &pipe->stream->src, - sizeof(struct rect)) != 0) { - ASSERT(mpo_pipe->plane_state != pipe->plane_state); - goto validate_fail; - } - pipe_idx++; - } - - /*initialize pipe_just_split_from to invalid idx*/ - for (i = 0; i < MAX_PIPES; i++) - pipe_split_from[i] = -1; - - for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe; - - if (!pipe->stream || pipe_split_from[i] >= 0) - continue; - - pipe_idx++; - - if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { - hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); - ASSERT(hsplit_pipe); - if (!dcn20_split_stream_for_odm( - dc, &context->res_ctx, - pipe, hsplit_pipe)) - goto validate_fail; - pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx; - dcn20_build_mapped_resource(dc, context, pipe->stream); - } - - if (!pipe->plane_state) - continue; - /* Skip 2nd half of already split pipe */ - if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state) - continue; - - if (split[i] == 2) { - if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) { - /* pipe not split previously needs split */ - hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); - ASSERT(hsplit_pipe); - if (!hsplit_pipe) { - DC_FP_START(); - dcn20_fpu_adjust_dppclk(&context->bw_ctx.dml.vba, vlevel, context->bw_ctx.dml.vba.maxMpcComb, pipe_idx, true); - DC_FP_END(); - continue; - } - if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { - if (!dcn20_split_stream_for_odm( - dc, &context->res_ctx, - pipe, hsplit_pipe)) - goto validate_fail; - dcn20_build_mapped_resource(dc, context, pipe->stream); - } else { - dcn20_split_stream_for_mpc( - &context->res_ctx, dc->res_pool, - pipe, hsplit_pipe); - resource_build_scaling_params(pipe); - resource_build_scaling_params(hsplit_pipe); - } - pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx; - } - } else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) { - /* merge should already have been done */ - ASSERT(0); - } - } - /* Actual dsc count per stream dsc validation*/ - if (!dcn20_validate_dsc(dc, context)) { - context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] = - DML_FAIL_DSC_VALIDATION_FAILURE; - goto validate_fail; - } - - *vlevel_out = vlevel; - - out = true; - goto validate_out; - -validate_fail: - out = false; - -validate_out: - return out; -} - -/* - * Some of the functions further below use the FPU, so we need to wrap this - * with DC_FP_START()/DC_FP_END(). Use the same approach as for - * dcn20_validate_bandwidth in dcn20_resource.c. - */ -static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) -{ - bool voltage_supported; - display_e2e_pipe_params_st *pipes; - - pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL); - if (!pipes) - return false; - - DC_FP_START(); - voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate, pipes); - DC_FP_END(); - - kfree(pipes); - return voltage_supported; -} - -static void dcn21_destroy_resource_pool(struct resource_pool **pool) -{ - struct dcn21_resource_pool *dcn21_pool = TO_DCN21_RES_POOL(*pool); - - dcn21_resource_destruct(dcn21_pool); - kfree(dcn21_pool); - *pool = NULL; -} - -static struct clock_source *dcn21_clock_source_create( - struct dc_context *ctx, - struct dc_bios *bios, - enum clock_source_id id, - const struct dce110_clk_src_regs *regs, - bool dp_clk_src) -{ - struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); - - if (!clk_src) - return NULL; - - if (dcn20_clk_src_construct(clk_src, ctx, bios, id, - regs, &cs_shift, &cs_mask)) { - clk_src->base.dp_clk_src = dp_clk_src; - return &clk_src->base; - } - - kfree(clk_src); - BREAK_TO_DEBUGGER(); - return NULL; -} - -static struct hubp *dcn21_hubp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn21_hubp *hubp21 = - kzalloc(sizeof(struct dcn21_hubp), GFP_KERNEL); - - if (!hubp21) - return NULL; - - if (hubp21_construct(hubp21, ctx, inst, - &hubp_regs[inst], &hubp_shift, &hubp_mask)) - return &hubp21->base; - - BREAK_TO_DEBUGGER(); - kfree(hubp21); - return NULL; -} - -static struct hubbub *dcn21_hubbub_create(struct dc_context *ctx) -{ - int i; - - struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), - GFP_KERNEL); - - if (!hubbub) - return NULL; - - hubbub21_construct(hubbub, ctx, - &hubbub_reg, - &hubbub_shift, - &hubbub_mask); - - for (i = 0; i < res_cap_rn.num_vmid; i++) { - struct dcn20_vmid *vmid = &hubbub->vmid[i]; - - vmid->ctx = ctx; - - vmid->regs = &vmid_regs[i]; - vmid->shifts = &vmid_shifts; - vmid->masks = &vmid_masks; - } - hubbub->num_vmid = res_cap_rn.num_vmid; - - return &hubbub->base; -} - -static struct output_pixel_processor *dcn21_opp_create(struct dc_context *ctx, - uint32_t inst) -{ - struct dcn20_opp *opp = - kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); - - if (!opp) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dcn20_opp_construct(opp, ctx, inst, - &opp_regs[inst], &opp_shift, &opp_mask); - return &opp->base; -} - -static struct timing_generator *dcn21_timing_generator_create(struct dc_context *ctx, - uint32_t instance) -{ - struct optc *tgn10 = - kzalloc(sizeof(struct optc), GFP_KERNEL); - - if (!tgn10) - return NULL; - - tgn10->base.inst = instance; - tgn10->base.ctx = ctx; - - tgn10->tg_regs = &tg_regs[instance]; - tgn10->tg_shift = &tg_shift; - tgn10->tg_mask = &tg_mask; - - dcn20_timing_generator_init(tgn10); - - return &tgn10->base; -} - -static struct mpc *dcn21_mpc_create(struct dc_context *ctx) -{ - struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc), - GFP_KERNEL); - - if (!mpc20) - return NULL; - - dcn20_mpc_construct(mpc20, ctx, - &mpc_regs, - &mpc_shift, - &mpc_mask, - 6); - - return &mpc20->base; -} - -static void read_dce_straps( - struct dc_context *ctx, - struct resource_straps *straps) -{ - generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), - FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); - -} - - -static struct display_stream_compressor *dcn21_dsc_create(struct dc_context *ctx, - uint32_t inst) -{ - struct dcn20_dsc *dsc = - kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); - - if (!dsc) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); - return &dsc->base; -} - -static struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx) -{ - struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); - - if (!pp_smu) - return pp_smu; - - dm_pp_get_funcs(ctx, pp_smu); - - if (pp_smu->ctx.ver != PP_SMU_VER_RN) - pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs)); - - - return pp_smu; -} - -static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu) -{ - if (pp_smu && *pp_smu) { - kfree(*pp_smu); - *pp_smu = NULL; - } -} - -static struct audio *dcn21_create_audio( - struct dc_context *ctx, unsigned int inst) -{ - return dce_audio_create(ctx, inst, - &audio_regs[inst], &audio_shift, &audio_mask); -} - -static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn20_get_dcc_compression_cap -}; - -static struct stream_encoder *dcn21_stream_encoder_create(enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dcn10_stream_encoder *enc1 = - kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); - - if (!enc1) - return NULL; - - dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, - &stream_enc_regs[eng_id], - &se_shift, &se_mask); - - return &enc1->base; -} - -static const struct dce_hwseq_registers hwseq_reg = { - HWSEQ_DCN21_REG_LIST() -}; - -static const struct dce_hwseq_shift hwseq_shift = { - HWSEQ_DCN21_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_hwseq_mask hwseq_mask = { - HWSEQ_DCN21_MASK_SH_LIST(_MASK) -}; - -static struct dce_hwseq *dcn21_hwseq_create( - struct dc_context *ctx) -{ - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); - - if (hws) { - hws->ctx = ctx; - hws->regs = &hwseq_reg; - hws->shifts = &hwseq_shift; - hws->masks = &hwseq_mask; - hws->wa.DEGVIDCN21 = true; - hws->wa.disallow_self_refresh_during_multi_plane_transition = true; - } - return hws; -} - -static const struct resource_create_funcs res_create_funcs = { - .read_dce_straps = read_dce_straps, - .create_audio = dcn21_create_audio, - .create_stream_encoder = dcn21_stream_encoder_create, - .create_hwseq = dcn21_hwseq_create, -}; - -static const struct encoder_feature_support link_enc_feature = { - .max_hdmi_deep_color = COLOR_DEPTH_121212, - .max_hdmi_pixel_clock = 600000, - .hdmi_ycbcr420_supported = true, - .dp_ycbcr420_supported = true, - .fec_supported = true, - .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_HBR3_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true, - .flags.bits.IS_TPS4_CAPABLE = true -}; - - -#define link_regs(id, phyid)\ -[id] = {\ - LE_DCN2_REG_LIST(id), \ - UNIPHY_DCN2_REG_LIST(phyid), \ - DPCS_DCN21_REG_LIST(id), \ - SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ -} - -static const struct dcn10_link_enc_registers link_enc_regs[] = { - link_regs(0, A), - link_regs(1, B), - link_regs(2, C), - link_regs(3, D), - link_regs(4, E), -}; - -static const struct dce_panel_cntl_registers panel_cntl_regs[] = { - { DCN_PANEL_CNTL_REG_LIST() } -}; - -static const struct dce_panel_cntl_shift panel_cntl_shift = { - DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_panel_cntl_mask panel_cntl_mask = { - DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) -}; - -#define aux_regs(id)\ -[id] = {\ - DCN2_AUX_REG_LIST(id)\ -} - -static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { - aux_regs(0), - aux_regs(1), - aux_regs(2), - aux_regs(3), - aux_regs(4) -}; - -#define hpd_regs(id)\ -[id] = {\ - HPD_REG_LIST(id)\ -} - -static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { - hpd_regs(0), - hpd_regs(1), - hpd_regs(2), - hpd_regs(3), - hpd_regs(4) -}; - -static const struct dcn10_link_enc_shift le_shift = { - LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\ - DPCS_DCN21_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn10_link_enc_mask le_mask = { - LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\ - DPCS_DCN21_MASK_SH_LIST(_MASK) -}; - -static int map_transmitter_id_to_phy_instance( - enum transmitter transmitter) -{ - switch (transmitter) { - case TRANSMITTER_UNIPHY_A: - return 0; - break; - case TRANSMITTER_UNIPHY_B: - return 1; - break; - case TRANSMITTER_UNIPHY_C: - return 2; - break; - case TRANSMITTER_UNIPHY_D: - return 3; - break; - case TRANSMITTER_UNIPHY_E: - return 4; - break; - default: - ASSERT(0); - return 0; - } -} - -static struct link_encoder *dcn21_link_encoder_create( - struct dc_context *ctx, - const struct encoder_init_data *enc_init_data) -{ - struct dcn21_link_encoder *enc21 = - kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL); - int link_regs_id; - - if (!enc21) - return NULL; - - link_regs_id = - map_transmitter_id_to_phy_instance(enc_init_data->transmitter); - - dcn21_link_encoder_construct(enc21, - enc_init_data, - &link_enc_feature, - &link_enc_regs[link_regs_id], - &link_enc_aux_regs[enc_init_data->channel - 1], - &link_enc_hpd_regs[enc_init_data->hpd_source], - &le_shift, - &le_mask); - - return &enc21->enc10.base; -} - -static struct panel_cntl *dcn21_panel_cntl_create(const struct panel_cntl_init_data *init_data) -{ - struct dce_panel_cntl *panel_cntl = - kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); - - if (!panel_cntl) - return NULL; - - dce_panel_cntl_construct(panel_cntl, - init_data, - &panel_cntl_regs[init_data->inst], - &panel_cntl_shift, - &panel_cntl_mask); - - return &panel_cntl->base; -} - -static void dcn21_get_panel_config_defaults(struct dc_panel_config *panel_config) -{ - *panel_config = panel_config_defaults; -} - -#define CTX ctx - -#define REG(reg_name) \ - (DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) - -static uint32_t read_pipe_fuses(struct dc_context *ctx) -{ - uint32_t value = REG_READ(CC_DC_PIPE_DIS); - /* RV1 support max 4 pipes */ - value = value & 0xf; - return value; -} - -static enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state) -{ - if (plane_state->ctx->dc->debug.disable_dcc == DCC_ENABLE) { - plane_state->dcc.enable = 1; - /* align to our worst case block width */ - plane_state->dcc.meta_pitch = ((plane_state->src_rect.width + 1023) / 1024) * 1024; - } - - return dcn20_patch_unknown_plane_state(plane_state); -} - -static const struct resource_funcs dcn21_res_pool_funcs = { - .destroy = dcn21_destroy_resource_pool, - .link_enc_create = dcn21_link_encoder_create, - .panel_cntl_create = dcn21_panel_cntl_create, - .validate_bandwidth = dcn21_validate_bandwidth, - .populate_dml_pipes = dcn21_populate_dml_pipes_from_context, - .add_stream_to_ctx = dcn20_add_stream_to_ctx, - .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, - .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, - .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, - .release_pipe = dcn20_release_pipe, - .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context, - .patch_unknown_plane_state = dcn21_patch_unknown_plane_state, - .set_mcif_arb_params = dcn20_set_mcif_arb_params, - .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, - .update_bw_bounding_box = dcn21_update_bw_bounding_box, - .get_panel_config_defaults = dcn21_get_panel_config_defaults, -}; - -static bool dcn21_resource_construct( - uint8_t num_virtual_links, - struct dc *dc, - struct dcn21_resource_pool *pool) -{ - int i, j; - struct dc_context *ctx = dc->ctx; - struct irq_service_init_data init_data; - uint32_t pipe_fuses = read_pipe_fuses(ctx); - uint32_t num_pipes; - - ctx->dc_bios->regs = &bios_regs; - - pool->base.res_cap = &res_cap_rn; -#ifdef DIAGS_BUILD - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - //pool->base.res_cap = &res_cap_nv10_FPGA_2pipe_dsc; - pool->base.res_cap = &res_cap_rn_FPGA_4pipe; -#endif - - pool->base.funcs = &dcn21_res_pool_funcs; - - /************************************************* - * Resource + asic cap harcoding * - *************************************************/ - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - - /* max pipe num for ASIC before check pipe fuses */ - pool->base.pipe_count = pool->base.res_cap->num_timing_generator; - - dc->caps.max_downscale_ratio = 200; - dc->caps.i2c_speed_in_khz = 100; - dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a applied by default*/ - dc->caps.max_cursor_size = 256; - dc->caps.min_horizontal_blanking_period = 80; - dc->caps.dmdata_alloc_size = 2048; - - dc->caps.max_slave_planes = 1; - dc->caps.max_slave_yuv_planes = 1; - dc->caps.max_slave_rgb_planes = 1; - dc->caps.post_blend_color_processing = true; - dc->caps.force_dp_tps4_for_cp2520 = true; - dc->caps.extended_aux_timeout_support = true; - dc->caps.dmcub_support = true; - dc->caps.is_apu = true; - - /* Color pipeline capabilities */ - dc->caps.color.dpp.dcn_arch = 1; - dc->caps.color.dpp.input_lut_shared = 0; - dc->caps.color.dpp.icsc = 1; - dc->caps.color.dpp.dgam_ram = 1; - dc->caps.color.dpp.dgam_rom_caps.srgb = 1; - dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; - dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0; - dc->caps.color.dpp.dgam_rom_caps.pq = 0; - dc->caps.color.dpp.dgam_rom_caps.hlg = 0; - dc->caps.color.dpp.post_csc = 0; - dc->caps.color.dpp.gamma_corr = 0; - dc->caps.color.dpp.dgam_rom_for_yuv = 1; - - dc->caps.color.dpp.hw_3d_lut = 1; - dc->caps.color.dpp.ogam_ram = 1; - // no OGAM ROM on DCN2 - dc->caps.color.dpp.ogam_rom_caps.srgb = 0; - dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; - dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.dpp.ogam_rom_caps.pq = 0; - dc->caps.color.dpp.ogam_rom_caps.hlg = 0; - dc->caps.color.dpp.ocsc = 0; - - dc->caps.color.mpc.gamut_remap = 0; - dc->caps.color.mpc.num_3dluts = 0; - dc->caps.color.mpc.shared_3d_lut = 0; - dc->caps.color.mpc.ogam_ram = 1; - dc->caps.color.mpc.ogam_rom_caps.srgb = 0; - dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; - dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.mpc.ogam_rom_caps.pq = 0; - dc->caps.color.mpc.ogam_rom_caps.hlg = 0; - dc->caps.color.mpc.ocsc = 1; - - dc->caps.dp_hdmi21_pcon_support = true; - - if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) - dc->debug = debug_defaults_drv; - - // Init the vm_helper - if (dc->vm_helper) - vm_helper_init(dc->vm_helper, 16); - - /************************************************* - * Create resources * - *************************************************/ - - pool->base.clock_sources[DCN20_CLK_SRC_PLL0] = - dcn21_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL0, - &clk_src_regs[0], false); - pool->base.clock_sources[DCN20_CLK_SRC_PLL1] = - dcn21_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL1, - &clk_src_regs[1], false); - pool->base.clock_sources[DCN20_CLK_SRC_PLL2] = - dcn21_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL2, - &clk_src_regs[2], false); - pool->base.clock_sources[DCN20_CLK_SRC_PLL3] = - dcn21_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL3, - &clk_src_regs[3], false); - pool->base.clock_sources[DCN20_CLK_SRC_PLL4] = - dcn21_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL4, - &clk_src_regs[4], false); - - pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21; - - /* todo: not reuse phy_pll registers */ - pool->base.dp_clock_source = - dcn21_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_ID_DP_DTO, - &clk_src_regs[0], true); - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - - pool->base.dccg = dccg21_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); - if (pool->base.dccg == NULL) { - dm_error("DC: failed to create dccg!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - if (!dc->config.disable_dmcu) { - pool->base.dmcu = dcn21_dmcu_create(ctx, - &dmcu_regs, - &dmcu_shift, - &dmcu_mask); - if (pool->base.dmcu == NULL) { - dm_error("DC: failed to create dmcu!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - dc->debug.dmub_command_table = false; - } - - if (dc->config.disable_dmcu) { - pool->base.psr = dmub_psr_create(ctx); - - if (pool->base.psr == NULL) { - dm_error("DC: failed to create psr obj!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - - if (dc->config.disable_dmcu) - pool->base.abm = dmub_abm_create(ctx, - &abm_regs, - &abm_shift, - &abm_mask); - else - pool->base.abm = dce_abm_create(ctx, - &abm_regs, - &abm_shift, - &abm_mask); - - pool->base.pp_smu = dcn21_pp_smu_create(ctx); - - num_pipes = dcn2_1_ip.max_num_dpp; - - for (i = 0; i < dcn2_1_ip.max_num_dpp; i++) - if (pipe_fuses & 1 << i) - num_pipes--; - dcn2_1_ip.max_num_dpp = num_pipes; - dcn2_1_ip.max_num_otg = num_pipes; - - dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21); - - init_data.ctx = dc->ctx; - pool->base.irqs = dal_irq_service_dcn21_create(&init_data); - if (!pool->base.irqs) - goto create_fail; - - j = 0; - /* mem input -> ipp -> dpp -> opp -> TG */ - for (i = 0; i < pool->base.pipe_count; i++) { - /* if pipe is disabled, skip instance of HW pipe, - * i.e, skip ASIC register instance - */ - if ((pipe_fuses & (1 << i)) != 0) - continue; - - pool->base.hubps[j] = dcn21_hubp_create(ctx, i); - if (pool->base.hubps[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create memory input!\n"); - goto create_fail; - } - - pool->base.ipps[j] = dcn21_ipp_create(ctx, i); - if (pool->base.ipps[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create input pixel processor!\n"); - goto create_fail; - } - - pool->base.dpps[j] = dcn21_dpp_create(ctx, i); - if (pool->base.dpps[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create dpps!\n"); - goto create_fail; - } - - pool->base.opps[j] = dcn21_opp_create(ctx, i); - if (pool->base.opps[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create output pixel processor!\n"); - goto create_fail; - } - - pool->base.timing_generators[j] = dcn21_timing_generator_create( - ctx, i); - if (pool->base.timing_generators[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto create_fail; - } - j++; - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - pool->base.engines[i] = dcn21_aux_engine_create(ctx, i); - if (pool->base.engines[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create aux engine!!\n"); - goto create_fail; - } - pool->base.hw_i2cs[i] = dcn21_i2c_hw_create(ctx, i); - if (pool->base.hw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create hw i2c!!\n"); - goto create_fail; - } - pool->base.sw_i2cs[i] = NULL; - } - - pool->base.timing_generator_count = j; - pool->base.pipe_count = j; - pool->base.mpcc_count = j; - - pool->base.mpc = dcn21_mpc_create(ctx); - if (pool->base.mpc == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mpc!\n"); - goto create_fail; - } - - pool->base.hubbub = dcn21_hubbub_create(ctx); - if (pool->base.hubbub == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create hubbub!\n"); - goto create_fail; - } - - for (i = 0; i < pool->base.res_cap->num_dsc; i++) { - pool->base.dscs[i] = dcn21_dsc_create(ctx, i); - if (pool->base.dscs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create display stream compressor %d!\n", i); - goto create_fail; - } - } - - if (!dcn20_dwbc_create(ctx, &pool->base)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create dwbc!\n"); - goto create_fail; - } - if (!dcn20_mmhubbub_create(ctx, &pool->base)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mcif_wb!\n"); - goto create_fail; - } - - if (!resource_construct(num_virtual_links, dc, &pool->base, - &res_create_funcs)) - goto create_fail; - - dcn21_hw_sequencer_construct(dc); - - dc->caps.max_planes = pool->base.pipe_count; - - for (i = 0; i < dc->caps.max_planes; ++i) - dc->caps.planes[i] = plane_cap; - - dc->cap_funcs = cap_funcs; - - return true; - -create_fail: - - dcn21_resource_destruct(pool); - - return false; -} - -struct resource_pool *dcn21_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc) -{ - struct dcn21_resource_pool *pool = - kzalloc(sizeof(struct dcn21_resource_pool), GFP_KERNEL); - - if (!pool) - return NULL; - - if (dcn21_resource_construct(init_data->num_virtual_links, dc, pool)) - return &pool->base; - - BREAK_TO_DEBUGGER(); - kfree(pool); - return NULL; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h deleted file mode 100644 index f7ecc002c2..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef _DCN21_RESOURCE_H_ -#define _DCN21_RESOURCE_H_ - -#include "core_types.h" - -#define TO_DCN21_RES_POOL(pool)\ - container_of(pool, struct dcn21_resource_pool, base) - -struct dc; -struct resource_pool; -struct _vcs_dpi_display_pipe_params_st; - -extern struct _vcs_dpi_ip_params_st dcn2_1_ip; -extern struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc; - -struct dcn21_resource_pool { - struct resource_pool base; -}; -struct resource_pool *dcn21_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc); -bool dcn21_fast_validate_bw( - struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int *pipe_cnt_out, - int *pipe_split_from, - int *vlevel_out, - bool fast_validate); - -#endif /* _DCN21_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile index af4d2065d2..b5b2aa3b37 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile @@ -23,12 +23,9 @@ # # -DCN30 := \ - dcn30_init.o \ - dcn30_hubbub.o \ +DCN30 := dcn30_hubbub.o \ dcn30_hubp.o \ dcn30_dpp.o \ - dcn30_optc.o \ dcn30_dccg.o \ dcn30_mpc.o dcn30_vpg.o \ dcn30_afmt.o \ @@ -38,7 +35,6 @@ DCN30 := \ dcn30_dwb_cm.o \ dcn30_cm_common.o \ dcn30_mmhubbub.o \ - dcn30_resource.o \ dcn30_dio_link_encoder.o diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c index e43f77c11c..5f97a868ad 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c @@ -56,16 +56,13 @@ static void dpp3_enable_cm_block( static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base) { - enum dc_lut_mode mode; + enum dc_lut_mode mode = LUT_BYPASS; uint32_t state_mode; uint32_t lut_mode; struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &state_mode); - if (state_mode == 0) - mode = LUT_BYPASS; - if (state_mode == 2) {//Programmable RAM LUT REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &lut_mode); if (lut_mode == 0) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c index 0d98918bf0..1b9d9495f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c @@ -130,6 +130,28 @@ bool dwb3_disable(struct dwbc *dwbc) return true; } +void dwb3_set_fc_enable(struct dwbc *dwbc, enum dwb_frame_capture_enable enable) +{ + struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); + unsigned int pre_locked; + + REG_GET(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, &pre_locked); + + /* Lock DWB registers */ + if (pre_locked == 0) + REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 1); + + /* Disable FC */ + REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, enable); + + /* Unlock DWB registers */ + if (pre_locked == 0) + REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 0); + + DC_LOG_DWB("%s dwb3_fc_disabled at inst = %d", __func__, dwbc->inst); +} + + bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params) { struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc); @@ -226,6 +248,7 @@ static const struct dwbc_funcs dcn30_dwbc_funcs = { .disable = dwb3_disable, .update = dwb3_update, .is_enabled = dwb3_is_enabled, + .set_fc_enable = dwb3_set_fc_enable, .set_stereo = dwb3_set_stereo, .set_new_content = dwb3_set_new_content, .dwb_program_output_csc = NULL, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h index a5d1b81e76..332634b76a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h @@ -877,6 +877,8 @@ bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params); bool dwb3_is_enabled(struct dwbc *dwbc); +void dwb3_set_fc_enable(struct dwbc *dwbc, enum dwb_frame_capture_enable enable); + void dwb3_set_stereo(struct dwbc *dwbc, struct dwb_stereo_params *stereo_params); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c index 701c7d8bc0..03a50c32fc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c @@ -243,6 +243,9 @@ static bool dwb3_program_ogam_lut( return false; } + if (params->hw_points_num == 0) + return false; + REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2); current_mode = dwb3_get_ogam_current(dwbc30); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c deleted file mode 100644 index 9894caedff..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Copyright 2016-2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dce110/dce110_hwseq.h" -#include "dcn10/dcn10_hwseq.h" -#include "dcn20/dcn20_hwseq.h" -#include "dcn21/dcn21_hwseq.h" -#include "dcn30/dcn30_hwseq.h" - -#include "dcn30_init.h" - -static const struct hw_sequencer_funcs dcn30_funcs = { - .program_gamut_remap = dcn30_program_gamut_remap, - .init_hw = dcn30_init_hw, - .apply_ctx_to_hw = dce110_apply_ctx_to_hw, - .apply_ctx_for_surface = NULL, - .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, - .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, - .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, - .update_plane_addr = dcn20_update_plane_addr, - .update_dchub = dcn10_update_dchub, - .update_pending_status = dcn10_update_pending_status, - .program_output_csc = dcn20_program_output_csc, - .enable_accelerated_mode = dce110_enable_accelerated_mode, - .enable_timing_synchronization = dcn10_enable_timing_synchronization, - .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, - .update_info_frame = dcn30_update_info_frame, - .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, - .enable_stream = dcn20_enable_stream, - .disable_stream = dce110_disable_stream, - .unblank_stream = dcn20_unblank_stream, - .blank_stream = dce110_blank_stream, - .enable_audio_stream = dce110_enable_audio_stream, - .disable_audio_stream = dce110_disable_audio_stream, - .disable_plane = dcn20_disable_plane, - .disable_pixel_data = dcn20_disable_pixel_data, - .pipe_control_lock = dcn20_pipe_control_lock, - .interdependent_update_lock = dcn10_lock_all_pipes, - .cursor_lock = dcn10_cursor_lock, - .prepare_bandwidth = dcn30_prepare_bandwidth, - .optimize_bandwidth = dcn20_optimize_bandwidth, - .update_bandwidth = dcn20_update_bandwidth, - .set_drr = dcn10_set_drr, - .get_position = dcn10_get_position, - .set_static_screen_control = dcn30_set_static_screen_control, - .setup_stereo = dcn10_setup_stereo, - .set_avmute = dcn30_set_avmute, - .log_hw_state = dcn10_log_hw_state, - .get_hw_state = dcn10_get_hw_state, - .clear_status_bits = dcn10_clear_status_bits, - .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, - .edp_backlight_control = dce110_edp_backlight_control, - .edp_power_control = dce110_edp_power_control, - .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, - .edp_wait_for_T12 = dce110_edp_wait_for_T12, - .set_cursor_position = dcn10_set_cursor_position, - .set_cursor_attribute = dcn10_set_cursor_attribute, - .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, - .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, - .set_clock = dcn10_set_clock, - .get_clock = dcn10_get_clock, - .program_triplebuffer = dcn20_program_triple_buffer, - .enable_writeback = dcn30_enable_writeback, - .disable_writeback = dcn30_disable_writeback, - .update_writeback = dcn30_update_writeback, - .mmhubbub_warmup = dcn30_mmhubbub_warmup, - .dmdata_status_done = dcn20_dmdata_status_done, - .program_dmdata_engine = dcn30_program_dmdata_engine, - .set_dmdata_attributes = dcn20_set_dmdata_attributes, - .init_sys_ctx = dcn20_init_sys_ctx, - .init_vm_ctx = dcn20_init_vm_ctx, - .set_flip_control_gsl = dcn20_set_flip_control_gsl, - .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, - .calc_vupdate_position = dcn10_calc_vupdate_position, - .apply_idle_power_optimizations = dcn30_apply_idle_power_optimizations, - .does_plane_fit_in_mall = dcn30_does_plane_fit_in_mall, - .set_backlight_level = dcn21_set_backlight_level, - .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, - .hardware_release = dcn30_hardware_release, - .set_pipe = dcn21_set_pipe, - .enable_lvds_link_output = dce110_enable_lvds_link_output, - .enable_tmds_link_output = dce110_enable_tmds_link_output, - .enable_dp_link_output = dce110_enable_dp_link_output, - .disable_link_output = dce110_disable_link_output, - .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, - .get_dcc_en_bits = dcn10_get_dcc_en_bits, - .update_visual_confirm_color = dcn10_update_visual_confirm_color, - .is_abm_supported = dcn21_is_abm_supported -}; - -static const struct hwseq_private_funcs dcn30_private_funcs = { - .init_pipes = dcn10_init_pipes, - .update_plane_addr = dcn20_update_plane_addr, - .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, - .update_mpcc = dcn20_update_mpcc, - .set_input_transfer_func = dcn30_set_input_transfer_func, - .set_output_transfer_func = dcn30_set_output_transfer_func, - .power_down = dce110_power_down, - .enable_display_power_gating = dcn10_dummy_display_power_gating, - .blank_pixel_data = dcn20_blank_pixel_data, - .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, - .enable_stream_timing = dcn20_enable_stream_timing, - .edp_backlight_control = dce110_edp_backlight_control, - .disable_stream_gating = dcn20_disable_stream_gating, - .enable_stream_gating = dcn20_enable_stream_gating, - .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, - .did_underflow_occur = dcn10_did_underflow_occur, - .init_blank = dcn20_init_blank, - .disable_vga = dcn20_disable_vga, - .bios_golden_init = dcn10_bios_golden_init, - .plane_atomic_disable = dcn20_plane_atomic_disable, - .plane_atomic_power_down = dcn10_plane_atomic_power_down, - .enable_power_gating_plane = dcn20_enable_power_gating_plane, - .dpp_pg_control = dcn20_dpp_pg_control, - .hubp_pg_control = dcn20_hubp_pg_control, - .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, - .update_odm = dcn20_update_odm, - .dsc_pg_control = dcn20_dsc_pg_control, - .set_hdr_multiplier = dcn10_set_hdr_multiplier, - .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, - .wait_for_blank_complete = dcn20_wait_for_blank_complete, - .dccg_init = dcn20_dccg_init, - .set_blend_lut = dcn30_set_blend_lut, - .set_shaper_3dlut = dcn20_set_shaper_3dlut, -}; - -void dcn30_hw_sequencer_construct(struct dc *dc) -{ - dc->hwss = dcn30_funcs; - dc->hwseq->funcs = dcn30_private_funcs; - -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h deleted file mode 100644 index c280ff90bf..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_DCN30_INIT_H__ -#define __DC_DCN30_INIT_H__ - -struct dc; - -void dcn30_hw_sequencer_construct(struct dc *dc); - -#endif /* __DC_DCN30_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c deleted file mode 100644 index b97bdb868a..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c +++ /dev/null @@ -1,393 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "reg_helper.h" -#include "dcn30_optc.h" -#include "dc.h" -#include "dcn_calc_math.h" -#include "dc_dmub_srv.h" - -#include "dml/dcn30/dcn30_fpu.h" -#include "dc_trace.h" - -#define REG(reg)\ - optc1->tg_regs->reg - -#define CTX \ - optc1->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - optc1->tg_shift->field_name, optc1->tg_mask->field_name - -void optc3_triplebuffer_lock(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_UPDATE(OTG_GLOBAL_CONTROL2, - OTG_MASTER_UPDATE_LOCK_SEL, optc->inst); - - REG_SET(OTG_VUPDATE_KEEPOUT, 0, - OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1); - - REG_SET(OTG_MASTER_UPDATE_LOCK, 0, - OTG_MASTER_UPDATE_LOCK, 1); - - REG_WAIT(OTG_MASTER_UPDATE_LOCK, - UPDATE_LOCK_STATUS, 1, - 1, 10); - - TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true); -} - -void optc3_lock_doublebuffer_enable(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - uint32_t v_blank_start = 0; - uint32_t v_blank_end = 0; - uint32_t h_blank_start = 0; - uint32_t h_blank_end = 0; - - REG_GET_2(OTG_V_BLANK_START_END, - OTG_V_BLANK_START, &v_blank_start, - OTG_V_BLANK_END, &v_blank_end); - REG_GET_2(OTG_H_BLANK_START_END, - OTG_H_BLANK_START, &h_blank_start, - OTG_H_BLANK_END, &h_blank_end); - - REG_UPDATE_2(OTG_GLOBAL_CONTROL1, - MASTER_UPDATE_LOCK_DB_START_Y, v_blank_start - 1, - MASTER_UPDATE_LOCK_DB_END_Y, v_blank_start); - REG_UPDATE_2(OTG_GLOBAL_CONTROL4, - DIG_UPDATE_POSITION_X, h_blank_start - 180 - 1, - DIG_UPDATE_POSITION_Y, v_blank_start - 1); - // there is a DIG_UPDATE_VCOUNT_MODE and it is 0. - - REG_UPDATE_3(OTG_GLOBAL_CONTROL0, - MASTER_UPDATE_LOCK_DB_START_X, h_blank_start - 200 - 1, - MASTER_UPDATE_LOCK_DB_END_X, h_blank_start - 180, - MASTER_UPDATE_LOCK_DB_EN, 1); - REG_UPDATE(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 1); - - REG_SET_3(OTG_VUPDATE_KEEPOUT, 0, - MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0, - MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, 100, - OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1); - - TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true); -} - -void optc3_lock_doublebuffer_disable(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_UPDATE_2(OTG_GLOBAL_CONTROL0, - MASTER_UPDATE_LOCK_DB_START_X, 0, - MASTER_UPDATE_LOCK_DB_END_X, 0); - REG_UPDATE_2(OTG_GLOBAL_CONTROL1, - MASTER_UPDATE_LOCK_DB_START_Y, 0, - MASTER_UPDATE_LOCK_DB_END_Y, 0); - - REG_UPDATE(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 0); - REG_UPDATE(OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_EN, 0); - - TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true); -} - -void optc3_lock(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_UPDATE(OTG_GLOBAL_CONTROL2, - OTG_MASTER_UPDATE_LOCK_SEL, optc->inst); - REG_SET(OTG_MASTER_UPDATE_LOCK, 0, - OTG_MASTER_UPDATE_LOCK, 1); - - REG_WAIT(OTG_MASTER_UPDATE_LOCK, - UPDATE_LOCK_STATUS, 1, - 1, 10); - - TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true); -} - -void optc3_set_out_mux(struct timing_generator *optc, enum otg_out_mux_dest dest) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_UPDATE(OTG_CONTROL, OTG_OUT_MUX, dest); -} - -void optc3_program_blank_color(struct timing_generator *optc, - const struct tg_color *blank_color) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET_3(OTG_BLANK_DATA_COLOR, 0, - OTG_BLANK_DATA_COLOR_BLUE_CB, blank_color->color_b_cb, - OTG_BLANK_DATA_COLOR_GREEN_Y, blank_color->color_g_y, - OTG_BLANK_DATA_COLOR_RED_CR, blank_color->color_r_cr); - - REG_SET_3(OTG_BLANK_DATA_COLOR_EXT, 0, - OTG_BLANK_DATA_COLOR_BLUE_CB_EXT, blank_color->color_b_cb >> 10, - OTG_BLANK_DATA_COLOR_GREEN_Y_EXT, blank_color->color_g_y >> 10, - OTG_BLANK_DATA_COLOR_RED_CR_EXT, blank_color->color_r_cr >> 10); -} - -void optc3_set_drr_trigger_window(struct timing_generator *optc, - uint32_t window_start, uint32_t window_end) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET_2(OTG_DRR_TRIGGER_WINDOW, 0, - OTG_DRR_TRIGGER_WINDOW_START_X, window_start, - OTG_DRR_TRIGGER_WINDOW_END_X, window_end); -} - -void optc3_set_vtotal_change_limit(struct timing_generator *optc, - uint32_t limit) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - - REG_SET(OTG_DRR_V_TOTAL_CHANGE, 0, - OTG_DRR_V_TOTAL_CHANGE_LIMIT, limit); -} - - -/* Set DSC-related configuration. - * dsc_mode: 0 disables DSC, other values enable DSC in specified format - * sc_bytes_per_pixel: Bytes per pixel in u3.28 format - * dsc_slice_width: Slice width in pixels - */ -void optc3_set_dsc_config(struct timing_generator *optc, - enum optc_dsc_mode dsc_mode, - uint32_t dsc_bytes_per_pixel, - uint32_t dsc_slice_width) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - optc2_set_dsc_config(optc, dsc_mode, dsc_bytes_per_pixel, dsc_slice_width); - REG_UPDATE(OTG_V_SYNC_A_CNTL, OTG_V_SYNC_MODE, 0); -} - -void optc3_set_odm_bypass(struct timing_generator *optc, - const struct dc_crtc_timing *dc_crtc_timing) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - enum h_timing_div_mode h_div = H_TIMING_NO_DIV; - - REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, - OPTC_NUM_OF_INPUT_SEGMENT, 0, - OPTC_SEG0_SRC_SEL, optc->inst, - OPTC_SEG1_SRC_SEL, 0xf, - OPTC_SEG2_SRC_SEL, 0xf, - OPTC_SEG3_SRC_SEL, 0xf - ); - - h_div = optc1_is_two_pixels_per_containter(dc_crtc_timing); - REG_UPDATE(OTG_H_TIMING_CNTL, - OTG_H_TIMING_DIV_MODE, h_div); - - REG_SET(OPTC_MEMORY_CONFIG, 0, - OPTC_MEM_SEL, 0); - optc1->opp_count = 1; -} - -void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, - struct dc_crtc_timing *timing) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right) - / opp_cnt; - uint32_t memory_mask = 0; - - /* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic - * REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1); - * Program OTG register MASTER_UPDATE_LOCK_DB_X/Y to the position before DP frame start - * REG_SET_2(OTG_GLOBAL_CONTROL1, 0, - * MASTER_UPDATE_LOCK_DB_X, 160, - * MASTER_UPDATE_LOCK_DB_Y, 240); - */ - - ASSERT(opp_cnt == 2 || opp_cnt == 4); - - /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192, - * however, for ODM combine we can simplify by always using 4. - */ - if (opp_cnt == 2) { - /* To make sure there's no memory overlap, each instance "reserves" 2 - * memories and they are uniquely combined here. - */ - memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2); - } else if (opp_cnt == 4) { - /* To make sure there's no memory overlap, each instance "reserves" 1 - * memory and they are uniquely combined here. - */ - memory_mask = 0x1 << (opp_id[0] * 2) | 0x1 << (opp_id[1] * 2) | 0x1 << (opp_id[2] * 2) | 0x1 << (opp_id[3] * 2); - } - - if (REG(OPTC_MEMORY_CONFIG)) - REG_SET(OPTC_MEMORY_CONFIG, 0, - OPTC_MEM_SEL, memory_mask); - - if (opp_cnt == 2) { - REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0, - OPTC_NUM_OF_INPUT_SEGMENT, 1, - OPTC_SEG0_SRC_SEL, opp_id[0], - OPTC_SEG1_SRC_SEL, opp_id[1]); - } else if (opp_cnt == 4) { - REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, - OPTC_NUM_OF_INPUT_SEGMENT, 3, - OPTC_SEG0_SRC_SEL, opp_id[0], - OPTC_SEG1_SRC_SEL, opp_id[1], - OPTC_SEG2_SRC_SEL, opp_id[2], - OPTC_SEG3_SRC_SEL, opp_id[3]); - } - - REG_UPDATE(OPTC_WIDTH_CONTROL, - OPTC_SEGMENT_WIDTH, mpcc_hactive); - - REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_MODE, opp_cnt - 1); - optc1->opp_count = opp_cnt; -} - -/** - * optc3_set_timing_double_buffer() - DRR double buffering control - * - * Sets double buffer point for V_TOTAL, H_TOTAL, VTOTAL_MIN, - * VTOTAL_MAX, VTOTAL_MIN_SEL and VTOTAL_MAX_SEL registers. - * - * @optc: timing_generator instance. - * @enable: Enable DRR double buffering control if true, disable otherwise. - * - * Options: any time, start of frame, dp start of frame (range timing) - */ -static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool enable) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - uint32_t mode = enable ? 2 : 0; - - REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL, - OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode); -} - -void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, 0, 2, 100000); /* 1 vupdate at 5hz */ - -} - -void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max) -{ - struct dc *dc = optc->ctx->dc; - - if (dc->caps.dmub_caps.mclk_sw && !dc->debug.disable_fams) - dc_dmub_srv_drr_update_cmd(dc, optc->inst, vtotal_min, vtotal_max); - else - optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max); -} - -void optc3_tg_init(struct timing_generator *optc) -{ - optc3_set_timing_double_buffer(optc, true); - optc1_clear_optc_underflow(optc); -} - -static struct timing_generator_funcs dcn30_tg_funcs = { - .validate_timing = optc1_validate_timing, - .program_timing = optc1_program_timing, - .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, - .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, - .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, - .program_global_sync = optc1_program_global_sync, - .enable_crtc = optc2_enable_crtc, - .disable_crtc = optc1_disable_crtc, - /* used by enable_timing_synchronization. Not need for FPGA */ - .is_counter_moving = optc1_is_counter_moving, - .get_position = optc1_get_position, - .get_frame_count = optc1_get_vblank_counter, - .get_scanoutpos = optc1_get_crtc_scanoutpos, - .get_otg_active_size = optc1_get_otg_active_size, - .set_early_control = optc1_set_early_control, - /* used by enable_timing_synchronization. Not need for FPGA */ - .wait_for_state = optc1_wait_for_state, - .set_blank_color = optc3_program_blank_color, - .did_triggered_reset_occur = optc1_did_triggered_reset_occur, - .triplebuffer_lock = optc3_triplebuffer_lock, - .triplebuffer_unlock = optc2_triplebuffer_unlock, - .enable_reset_trigger = optc1_enable_reset_trigger, - .enable_crtc_reset = optc1_enable_crtc_reset, - .disable_reset_trigger = optc1_disable_reset_trigger, - .lock = optc3_lock, - .unlock = optc1_unlock, - .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, - .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, - .enable_optc_clock = optc1_enable_optc_clock, - .set_drr = optc1_set_drr, - .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, - .set_vtotal_min_max = optc3_set_vtotal_min_max, - .set_static_screen_control = optc1_set_static_screen_control, - .program_stereo = optc1_program_stereo, - .is_stereo_left_eye = optc1_is_stereo_left_eye, - .tg_init = optc3_tg_init, - .is_tg_enabled = optc1_is_tg_enabled, - .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, - .clear_optc_underflow = optc1_clear_optc_underflow, - .setup_global_swap_lock = NULL, - .get_crc = optc1_get_crc, - .configure_crc = optc2_configure_crc, - .set_dsc_config = optc3_set_dsc_config, - .get_dsc_status = optc2_get_dsc_status, - .set_dwb_source = NULL, - .set_odm_bypass = optc3_set_odm_bypass, - .set_odm_combine = optc3_set_odm_combine, - .get_optc_source = optc2_get_optc_source, - .set_out_mux = optc3_set_out_mux, - .set_drr_trigger_window = optc3_set_drr_trigger_window, - .set_vtotal_change_limit = optc3_set_vtotal_change_limit, - .set_gsl = optc2_set_gsl, - .set_gsl_source_select = optc2_set_gsl_source_select, - .set_vtg_params = optc1_set_vtg_params, - .program_manual_trigger = optc2_program_manual_trigger, - .setup_manual_trigger = optc2_setup_manual_trigger, - .get_hw_timing = optc1_get_hw_timing, - .wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear, -}; - -void dcn30_timing_generator_init(struct optc *optc1) -{ - optc1->base.funcs = &dcn30_tg_funcs; - - optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; - optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; - - optc1->min_h_blank = 32; - optc1->min_v_blank = 3; - optc1->min_v_blank_interlace = 5; - optc1->min_h_sync_width = 4; - optc1->min_v_sync_width = 1; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h deleted file mode 100644 index d3a056c12b..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h +++ /dev/null @@ -1,359 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_OPTC_DCN30_H__ -#define __DC_OPTC_DCN30_H__ - -#include "dcn20/dcn20_optc.h" - -#define V_TOTAL_REGS_DCN30_SRI(inst) - -#define OPTC_COMMON_REG_LIST_DCN3_BASE(inst) \ - SRI(OTG_VSTARTUP_PARAM, OTG, inst),\ - SRI(OTG_VUPDATE_PARAM, OTG, inst),\ - SRI(OTG_VREADY_PARAM, OTG, inst),\ - SRI(OTG_MASTER_UPDATE_LOCK, OTG, inst),\ - SRI(OTG_GLOBAL_CONTROL0, OTG, inst),\ - SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\ - SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\ - SRI(OTG_GLOBAL_CONTROL4, OTG, inst),\ - SRI(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst),\ - SRI(OTG_H_TOTAL, OTG, inst),\ - SRI(OTG_H_BLANK_START_END, OTG, inst),\ - SRI(OTG_H_SYNC_A, OTG, inst),\ - SRI(OTG_H_SYNC_A_CNTL, OTG, inst),\ - SRI(OTG_H_TIMING_CNTL, OTG, inst),\ - SRI(OTG_V_TOTAL, OTG, inst),\ - SRI(OTG_V_BLANK_START_END, OTG, inst),\ - SRI(OTG_V_SYNC_A, OTG, inst),\ - SRI(OTG_V_SYNC_A_CNTL, OTG, inst),\ - SRI(OTG_CONTROL, OTG, inst),\ - SRI(OTG_STEREO_CONTROL, OTG, inst),\ - SRI(OTG_3D_STRUCTURE_CONTROL, OTG, inst),\ - SRI(OTG_STEREO_STATUS, OTG, inst),\ - SRI(OTG_V_TOTAL_MAX, OTG, inst),\ - SRI(OTG_V_TOTAL_MIN, OTG, inst),\ - SRI(OTG_V_TOTAL_CONTROL, OTG, inst),\ - V_TOTAL_REGS_DCN30_SRI(inst)\ - SRI(OTG_TRIGA_CNTL, OTG, inst),\ - SRI(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\ - SRI(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\ - SRI(OTG_STATUS_FRAME_COUNT, OTG, inst),\ - SRI(OTG_STATUS, OTG, inst),\ - SRI(OTG_STATUS_POSITION, OTG, inst),\ - SRI(OTG_NOM_VERT_POSITION, OTG, inst),\ - SRI(OTG_BLANK_DATA_COLOR, OTG, inst),\ - SRI(OTG_BLANK_DATA_COLOR_EXT, OTG, inst),\ - SRI(OTG_M_CONST_DTO0, OTG, inst),\ - SRI(OTG_M_CONST_DTO1, OTG, inst),\ - SRI(OTG_CLOCK_CONTROL, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\ - SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\ - SRI(OPTC_DATA_SOURCE_SELECT, ODM, inst),\ - SRI(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\ - SRI(CONTROL, VTG, inst),\ - SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\ - SRI(OTG_GSL_CONTROL, OTG, inst),\ - SRI(OTG_CRC_CNTL, OTG, inst),\ - SRI(OTG_CRC_CNTL2, OTG, inst),\ - SRI(OTG_CRC0_DATA_RG, OTG, inst),\ - SRI(OTG_CRC0_DATA_B, OTG, inst),\ - SRI(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\ - SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\ - SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\ - SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst),\ - SR(GSL_SOURCE_SELECT),\ - SRI(OTG_TRIGA_MANUAL_TRIG, OTG, inst),\ - SRI(OTG_DRR_CONTROL, OTG, inst) - - -#define OPTC_COMMON_REG_LIST_DCN3_0(inst) \ - OPTC_COMMON_REG_LIST_DCN3_BASE(inst),\ - SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\ - SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\ - SRI(OTG_GSL_WINDOW_X, OTG, inst),\ - SRI(OTG_GSL_WINDOW_Y, OTG, inst),\ - SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\ - SRI(OTG_DSC_START_POSITION, OTG, inst),\ - SRI(OTG_CRC_CNTL2, OTG, inst),\ - SRI(OTG_DRR_TRIGGER_WINDOW, OTG, inst),\ - SRI(OTG_DRR_V_TOTAL_CHANGE, OTG, inst),\ - SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\ - SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\ - SRI(OPTC_WIDTH_CONTROL, ODM, inst),\ - SRI(OPTC_MEMORY_CONFIG, ODM, inst),\ - SR(DWB_SOURCE_SELECT) - -#define DCN30_VTOTAL_REGS_SF(mask_sh) - -#define OPTC_COMMON_MASK_SH_LIST_DCN3_BASE(mask_sh)\ - SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\ - SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_OFFSET, mask_sh),\ - SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_WIDTH, mask_sh),\ - SF(OTG0_OTG_VREADY_PARAM, VREADY_OFFSET, mask_sh),\ - SF(OTG0_OTG_MASTER_UPDATE_LOCK, OTG_MASTER_UPDATE_LOCK, mask_sh),\ - SF(OTG0_OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_START_X, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_END_X, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_EN, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_START_Y, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_END_Y, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL2, OTG_MASTER_UPDATE_LOCK_SEL, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_X, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_Y, mask_sh),\ - SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\ - SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\ - SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\ - SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\ - SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_START, mask_sh),\ - SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_END, mask_sh),\ - SF(OTG0_OTG_H_SYNC_A_CNTL, OTG_H_SYNC_A_POL, mask_sh),\ - SF(OTG0_OTG_V_TOTAL, OTG_V_TOTAL, mask_sh),\ - SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_START, mask_sh),\ - SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_END, mask_sh),\ - SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_START, mask_sh),\ - SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_END, mask_sh),\ - SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_A_POL, mask_sh),\ - SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_MODE, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_MASTER_EN, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_START_POINT_CNTL, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_OUT_MUX, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EYE_FLAG_POLARITY, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, mask_sh),\ - SF(OTG0_OTG_STEREO_STATUS, OTG_STEREO_CURRENT_EYE, mask_sh),\ - SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_EN, mask_sh),\ - SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_V_UPDATE_MODE, mask_sh),\ - SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_STEREO_SEL_OVR, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_MAX, OTG_V_TOTAL_MAX, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_MIN, OTG_V_TOTAL_MIN, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MIN_SEL, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MAX_SEL, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_FORCE_LOCK_ON_EVENT, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK_EN, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MIN_EN, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MAX_EN, mask_sh),\ - DCN30_VTOTAL_REGS_SF(mask_sh)\ - SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_CLEAR, mask_sh),\ - SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_MODE, mask_sh),\ - SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_OCCURRED, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_SELECT, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_PIPE_SELECT, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_RISING_EDGE_DETECT_CNTL, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_POLARITY_SELECT, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FREQUENCY_SELECT, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_DELAY, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_CLEAR, mask_sh),\ - SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_EVENT_MASK, mask_sh),\ - SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_FRAME_COUNT, mask_sh),\ - SF(OTG0_OTG_STATUS_FRAME_COUNT, OTG_FRAME_COUNT, mask_sh),\ - SF(OTG0_OTG_STATUS, OTG_V_BLANK, mask_sh),\ - SF(OTG0_OTG_STATUS, OTG_V_ACTIVE_DISP, mask_sh),\ - SF(OTG0_OTG_STATUS_POSITION, OTG_HORZ_COUNT, mask_sh),\ - SF(OTG0_OTG_STATUS_POSITION, OTG_VERT_COUNT, mask_sh),\ - SF(OTG0_OTG_NOM_VERT_POSITION, OTG_VERT_COUNT_NOM, mask_sh),\ - SF(OTG0_OTG_BLANK_DATA_COLOR, OTG_BLANK_DATA_COLOR_BLUE_CB, mask_sh),\ - SF(OTG0_OTG_BLANK_DATA_COLOR, OTG_BLANK_DATA_COLOR_GREEN_Y, mask_sh),\ - SF(OTG0_OTG_BLANK_DATA_COLOR, OTG_BLANK_DATA_COLOR_RED_CR, mask_sh),\ - SF(OTG0_OTG_BLANK_DATA_COLOR_EXT, OTG_BLANK_DATA_COLOR_BLUE_CB_EXT, mask_sh),\ - SF(OTG0_OTG_BLANK_DATA_COLOR_EXT, OTG_BLANK_DATA_COLOR_GREEN_Y_EXT, mask_sh),\ - SF(OTG0_OTG_BLANK_DATA_COLOR_EXT, OTG_BLANK_DATA_COLOR_RED_CR_EXT, mask_sh),\ - SF(OTG0_OTG_M_CONST_DTO0, OTG_M_CONST_DTO_PHASE, mask_sh),\ - SF(OTG0_OTG_M_CONST_DTO1, OTG_M_CONST_DTO_MODULO, mask_sh),\ - SF(OTG0_OTG_CLOCK_CONTROL, OTG_BUSY, mask_sh),\ - SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_EN, mask_sh),\ - SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_ON, mask_sh),\ - SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_START, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_END, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT1_POSITION, OTG_VERTICAL_INTERRUPT1_LINE_START, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\ - SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\ - SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_ON, mask_sh),\ - SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\ - SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\ - SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\ - SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\ - SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\ - SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\ - SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED, mask_sh),\ - SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_CLEAR, mask_sh),\ - SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_AUTO_FORCE_VSYNC_MODE, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL0_EN, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL1_EN, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DSC_MODE, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\ - SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\ - SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\ - SF(OTG0_OTG_CRC0_DATA_B, CRC0_B_CB, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_START, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_END, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_START, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_END, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_START, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh),\ - SF(OTG0_OTG_TRIGA_MANUAL_TRIG, OTG_TRIGA_MANUAL_TRIG, mask_sh),\ - SF(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, mask_sh),\ - SF(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, mask_sh),\ - SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL2, MANUAL_FLOW_CONTROL_SEL, mask_sh),\ - SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh) - -#define OPTC_COMMON_MASK_SH_LIST_DCN3_0(mask_sh)\ - OPTC_COMMON_MASK_SH_LIST_DCN3_BASE(mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\ - SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\ - SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \ - SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\ - SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\ - SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \ - SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \ - SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \ - SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \ - SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \ - SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DSC_MODE, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\ - SF(ODM0_OPTC_MEMORY_CONFIG, OPTC_MEM_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, mask_sh),\ - SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\ - SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\ - SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\ - SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_SEGMENT_WIDTH, mask_sh),\ - SF(DWB_SOURCE_SELECT, OPTC_DWB0_SOURCE_SELECT, mask_sh),\ - SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh),\ - SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_START_X, mask_sh),\ - SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\ - SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\ - SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_BY2, mask_sh),\ - SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, mask_sh),\ - SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\ - SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_BLANK_DATA_DOUBLE_BUFFER_EN, mask_sh) - -#define OPTC_COMMON_MASK_SH_LIST_DCN30(mask_sh)\ - OPTC_COMMON_MASK_SH_LIST_DCN3_BASE(mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\ - SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\ - SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \ - SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\ - SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\ - SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \ - SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \ - SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \ - SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \ - SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \ - SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DSC_MODE, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG2_SRC_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG3_SRC_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\ - SF(ODM0_OPTC_MEMORY_CONFIG, OPTC_MEM_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, mask_sh),\ - SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\ - SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\ - SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\ - SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_SEGMENT_WIDTH, mask_sh),\ - SF(DWB_SOURCE_SELECT, OPTC_DWB0_SOURCE_SELECT, mask_sh),\ - SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh),\ - SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_START_X, mask_sh),\ - SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\ - SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\ - SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\ - SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, mask_sh),\ - SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh) - -void dcn30_timing_generator_init(struct optc *optc1); - -void optc3_set_out_mux(struct timing_generator *optc, enum otg_out_mux_dest dest); - -void optc3_lock(struct timing_generator *optc); - -void optc3_lock_doublebuffer_enable(struct timing_generator *optc); - -void optc3_lock_doublebuffer_disable(struct timing_generator *optc); - -void optc3_set_drr_trigger_window(struct timing_generator *optc, - uint32_t window_start, uint32_t window_end); - -void optc3_triplebuffer_lock(struct timing_generator *optc); - -void optc3_program_blank_color(struct timing_generator *optc, - const struct tg_color *blank_color); - -void optc3_set_vtotal_change_limit(struct timing_generator *optc, - uint32_t limit); - -void optc3_set_dsc_config(struct timing_generator *optc, - enum optc_dsc_mode dsc_mode, - uint32_t dsc_bytes_per_pixel, - uint32_t dsc_slice_width); - -void optc3_set_timing_db_mode(struct timing_generator *optc, bool enable); - -void optc3_set_odm_bypass(struct timing_generator *optc, - const struct dc_crtc_timing *dc_crtc_timing); -void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, - struct dc_crtc_timing *timing); -void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc); -void optc3_tg_init(struct timing_generator *optc); -void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max); -#endif /* __DC_OPTC_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c deleted file mode 100644 index 7b259cb5f4..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ /dev/null @@ -1,2611 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - - -#include "dm_services.h" -#include "dc.h" - -#include "dcn30_init.h" - -#include "resource.h" -#include "include/irq_service_interface.h" -#include "dcn20/dcn20_resource.h" - -#include "dcn30_resource.h" - -#include "dcn10/dcn10_ipp.h" -#include "dcn30/dcn30_hubbub.h" -#include "dcn30/dcn30_mpc.h" -#include "dcn30/dcn30_hubp.h" -#include "irq/dcn30/irq_service_dcn30.h" -#include "dcn30/dcn30_dpp.h" -#include "dcn30/dcn30_optc.h" -#include "dcn20/dcn20_hwseq.h" -#include "dcn30/dcn30_hwseq.h" -#include "dce110/dce110_hwseq.h" -#include "dcn30/dcn30_opp.h" -#include "dcn20/dcn20_dsc.h" -#include "dcn30/dcn30_vpg.h" -#include "dcn30/dcn30_afmt.h" -#include "dcn30/dcn30_dio_stream_encoder.h" -#include "dcn30/dcn30_dio_link_encoder.h" -#include "dce/dce_clock_source.h" -#include "dce/dce_audio.h" -#include "dce/dce_hwseq.h" -#include "clk_mgr.h" -#include "virtual/virtual_stream_encoder.h" -#include "dce110/dce110_resource.h" -#include "dml/display_mode_vba.h" -#include "dcn30/dcn30_dccg.h" -#include "dcn10/dcn10_resource.h" -#include "link.h" -#include "dce/dce_panel_cntl.h" - -#include "dcn30/dcn30_dwb.h" -#include "dcn30/dcn30_mmhubbub.h" - -#include "sienna_cichlid_ip_offset.h" -#include "dcn/dcn_3_0_0_offset.h" -#include "dcn/dcn_3_0_0_sh_mask.h" - -#include "nbio/nbio_7_4_offset.h" - -#include "dpcs/dpcs_3_0_0_offset.h" -#include "dpcs/dpcs_3_0_0_sh_mask.h" - -#include "mmhub/mmhub_2_0_0_offset.h" -#include "mmhub/mmhub_2_0_0_sh_mask.h" - -#include "reg_helper.h" -#include "dce/dmub_abm.h" -#include "dce/dmub_psr.h" -#include "dce/dce_aux.h" -#include "dce/dce_i2c.h" - -#include "dml/dcn30/dcn30_fpu.h" -#include "dml/dcn30/display_mode_vba_30.h" -#include "vm_helper.h" -#include "dcn20/dcn20_vmid.h" -#include "amdgpu_socbb.h" -#include "dc_dmub_srv.h" - -#define DC_LOGGER \ - dc->ctx->logger -#define DC_LOGGER_INIT(logger) - -enum dcn30_clk_src_array_id { - DCN30_CLK_SRC_PLL0, - DCN30_CLK_SRC_PLL1, - DCN30_CLK_SRC_PLL2, - DCN30_CLK_SRC_PLL3, - DCN30_CLK_SRC_PLL4, - DCN30_CLK_SRC_PLL5, - DCN30_CLK_SRC_TOTAL -}; - -/* begin ********************* - * macros to expend register list macro defined in HW object header file - */ - -/* DCN */ -#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg - -#define BASE(seg) BASE_INNER(seg) - -#define SR(reg_name)\ - .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -#define SRI(reg_name, block, id)\ - .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define SRI2(reg_name, block, id)\ - .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -#define SRIR(var_name, reg_name, block, id)\ - .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define SRII(reg_name, block, id)\ - .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define SRII_MPC_RMU(reg_name, block, id)\ - .RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define SRII_DWB(reg_name, temp_name, block, id)\ - .reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## temp_name - -#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ - .field_name = reg_name ## __ ## field_name ## post_fix - -#define DCCG_SRII(reg_name, block, id)\ - .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define VUPDATE_SRII(reg_name, block, id)\ - .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ - mm ## reg_name ## _ ## block ## id - -/* NBIO */ -#define NBIO_BASE_INNER(seg) \ - NBIO_BASE__INST0_SEG ## seg - -#define NBIO_BASE(seg) \ - NBIO_BASE_INNER(seg) - -#define NBIO_SR(reg_name)\ - .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -/* MMHUB */ -#define MMHUB_BASE_INNER(seg) \ - MMHUB_BASE__INST0_SEG ## seg - -#define MMHUB_BASE(seg) \ - MMHUB_BASE_INNER(seg) - -#define MMHUB_SR(reg_name)\ - .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \ - mmMM ## reg_name - -/* CLOCK */ -#define CLK_BASE_INNER(seg) \ - CLK_BASE__INST0_SEG ## seg - -#define CLK_BASE(seg) \ - CLK_BASE_INNER(seg) - -#define CLK_SRI(reg_name, block, inst)\ - .reg_name = CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## _ ## inst ## _ ## reg_name - - -static const struct bios_registers bios_regs = { - NBIO_SR(BIOS_SCRATCH_3), - NBIO_SR(BIOS_SCRATCH_6) -}; - -#define clk_src_regs(index, pllid)\ -[index] = {\ - CS_COMMON_REG_LIST_DCN2_0(index, pllid),\ -} - -static const struct dce110_clk_src_regs clk_src_regs[] = { - clk_src_regs(0, A), - clk_src_regs(1, B), - clk_src_regs(2, C), - clk_src_regs(3, D), - clk_src_regs(4, E), - clk_src_regs(5, F) -}; - -static const struct dce110_clk_src_shift cs_shift = { - CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) -}; - -static const struct dce110_clk_src_mask cs_mask = { - CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) -}; - -#define abm_regs(id)\ -[id] = {\ - ABM_DCN30_REG_LIST(id)\ -} - -static const struct dce_abm_registers abm_regs[] = { - abm_regs(0), - abm_regs(1), - abm_regs(2), - abm_regs(3), - abm_regs(4), - abm_regs(5), -}; - -static const struct dce_abm_shift abm_shift = { - ABM_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dce_abm_mask abm_mask = { - ABM_MASK_SH_LIST_DCN30(_MASK) -}; - - - -#define audio_regs(id)\ -[id] = {\ - AUD_COMMON_REG_LIST(id)\ -} - -static const struct dce_audio_registers audio_regs[] = { - audio_regs(0), - audio_regs(1), - audio_regs(2), - audio_regs(3), - audio_regs(4), - audio_regs(5), - audio_regs(6) -}; - -#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ - AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) - -static const struct dce_audio_shift audio_shift = { - DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_audio_mask audio_mask = { - DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) -}; - -#define vpg_regs(id)\ -[id] = {\ - VPG_DCN3_REG_LIST(id)\ -} - -static const struct dcn30_vpg_registers vpg_regs[] = { - vpg_regs(0), - vpg_regs(1), - vpg_regs(2), - vpg_regs(3), - vpg_regs(4), - vpg_regs(5), - vpg_regs(6), -}; - -static const struct dcn30_vpg_shift vpg_shift = { - DCN3_VPG_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn30_vpg_mask vpg_mask = { - DCN3_VPG_MASK_SH_LIST(_MASK) -}; - -#define afmt_regs(id)\ -[id] = {\ - AFMT_DCN3_REG_LIST(id)\ -} - -static const struct dcn30_afmt_registers afmt_regs[] = { - afmt_regs(0), - afmt_regs(1), - afmt_regs(2), - afmt_regs(3), - afmt_regs(4), - afmt_regs(5), - afmt_regs(6), -}; - -static const struct dcn30_afmt_shift afmt_shift = { - DCN3_AFMT_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn30_afmt_mask afmt_mask = { - DCN3_AFMT_MASK_SH_LIST(_MASK) -}; - -#define stream_enc_regs(id)\ -[id] = {\ - SE_DCN3_REG_LIST(id)\ -} - -static const struct dcn10_stream_enc_registers stream_enc_regs[] = { - stream_enc_regs(0), - stream_enc_regs(1), - stream_enc_regs(2), - stream_enc_regs(3), - stream_enc_regs(4), - stream_enc_regs(5) -}; - -static const struct dcn10_stream_encoder_shift se_shift = { - SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn10_stream_encoder_mask se_mask = { - SE_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - - -#define aux_regs(id)\ -[id] = {\ - DCN2_AUX_REG_LIST(id)\ -} - -static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { - aux_regs(0), - aux_regs(1), - aux_regs(2), - aux_regs(3), - aux_regs(4), - aux_regs(5) -}; - -#define hpd_regs(id)\ -[id] = {\ - HPD_REG_LIST(id)\ -} - -static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { - hpd_regs(0), - hpd_regs(1), - hpd_regs(2), - hpd_regs(3), - hpd_regs(4), - hpd_regs(5) -}; - -#define link_regs(id, phyid)\ -[id] = {\ - LE_DCN3_REG_LIST(id), \ - UNIPHY_DCN2_REG_LIST(phyid), \ - DPCS_DCN2_REG_LIST(id), \ - SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ -} - -static const struct dce110_aux_registers_shift aux_shift = { - DCN_AUX_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce110_aux_registers_mask aux_mask = { - DCN_AUX_MASK_SH_LIST(_MASK) -}; - -static const struct dcn10_link_enc_registers link_enc_regs[] = { - link_regs(0, A), - link_regs(1, B), - link_regs(2, C), - link_regs(3, D), - link_regs(4, E), - link_regs(5, F) -}; - -static const struct dcn10_link_enc_shift le_shift = { - LINK_ENCODER_MASK_SH_LIST_DCN30(__SHIFT),\ - DPCS_DCN2_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn10_link_enc_mask le_mask = { - LINK_ENCODER_MASK_SH_LIST_DCN30(_MASK),\ - DPCS_DCN2_MASK_SH_LIST(_MASK) -}; - - -static const struct dce_panel_cntl_registers panel_cntl_regs[] = { - { DCN_PANEL_CNTL_REG_LIST() } -}; - -static const struct dce_panel_cntl_shift panel_cntl_shift = { - DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_panel_cntl_mask panel_cntl_mask = { - DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) -}; - -#define dpp_regs(id)\ -[id] = {\ - DPP_REG_LIST_DCN30(id),\ -} - -static const struct dcn3_dpp_registers dpp_regs[] = { - dpp_regs(0), - dpp_regs(1), - dpp_regs(2), - dpp_regs(3), - dpp_regs(4), - dpp_regs(5), -}; - -static const struct dcn3_dpp_shift tf_shift = { - DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) -}; - -static const struct dcn3_dpp_mask tf_mask = { - DPP_REG_LIST_SH_MASK_DCN30(_MASK) -}; - -#define opp_regs(id)\ -[id] = {\ - OPP_REG_LIST_DCN30(id),\ -} - -static const struct dcn20_opp_registers opp_regs[] = { - opp_regs(0), - opp_regs(1), - opp_regs(2), - opp_regs(3), - opp_regs(4), - opp_regs(5) -}; - -static const struct dcn20_opp_shift opp_shift = { - OPP_MASK_SH_LIST_DCN20(__SHIFT) -}; - -static const struct dcn20_opp_mask opp_mask = { - OPP_MASK_SH_LIST_DCN20(_MASK) -}; - -#define aux_engine_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST0(id), \ - .AUXN_IMPCAL = 0, \ - .AUXP_IMPCAL = 0, \ - .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ -} - -static const struct dce110_aux_registers aux_engine_regs[] = { - aux_engine_regs(0), - aux_engine_regs(1), - aux_engine_regs(2), - aux_engine_regs(3), - aux_engine_regs(4), - aux_engine_regs(5) -}; - -#define dwbc_regs_dcn3(id)\ -[id] = {\ - DWBC_COMMON_REG_LIST_DCN30(id),\ -} - -static const struct dcn30_dwbc_registers dwbc30_regs[] = { - dwbc_regs_dcn3(0), -}; - -static const struct dcn30_dwbc_shift dwbc30_shift = { - DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_dwbc_mask dwbc30_mask = { - DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -#define mcif_wb_regs_dcn3(id)\ -[id] = {\ - MCIF_WB_COMMON_REG_LIST_DCN30(id),\ -} - -static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { - mcif_wb_regs_dcn3(0) -}; - -static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -#define dsc_regsDCN20(id)\ -[id] = {\ - DSC_REG_LIST_DCN20(id)\ -} - -static const struct dcn20_dsc_registers dsc_regs[] = { - dsc_regsDCN20(0), - dsc_regsDCN20(1), - dsc_regsDCN20(2), - dsc_regsDCN20(3), - dsc_regsDCN20(4), - dsc_regsDCN20(5) -}; - -static const struct dcn20_dsc_shift dsc_shift = { - DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) -}; - -static const struct dcn20_dsc_mask dsc_mask = { - DSC_REG_LIST_SH_MASK_DCN20(_MASK) -}; - -static const struct dcn30_mpc_registers mpc_regs = { - MPC_REG_LIST_DCN3_0(0), - MPC_REG_LIST_DCN3_0(1), - MPC_REG_LIST_DCN3_0(2), - MPC_REG_LIST_DCN3_0(3), - MPC_REG_LIST_DCN3_0(4), - MPC_REG_LIST_DCN3_0(5), - MPC_OUT_MUX_REG_LIST_DCN3_0(0), - MPC_OUT_MUX_REG_LIST_DCN3_0(1), - MPC_OUT_MUX_REG_LIST_DCN3_0(2), - MPC_OUT_MUX_REG_LIST_DCN3_0(3), - MPC_OUT_MUX_REG_LIST_DCN3_0(4), - MPC_OUT_MUX_REG_LIST_DCN3_0(5), - MPC_RMU_GLOBAL_REG_LIST_DCN3AG, - MPC_RMU_REG_LIST_DCN3AG(0), - MPC_RMU_REG_LIST_DCN3AG(1), - MPC_RMU_REG_LIST_DCN3AG(2), - MPC_DWB_MUX_REG_LIST_DCN3_0(0), -}; - -static const struct dcn30_mpc_shift mpc_shift = { - MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_mpc_mask mpc_mask = { - MPC_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -#define optc_regs(id)\ -[id] = {OPTC_COMMON_REG_LIST_DCN3_0(id)} - - -static const struct dcn_optc_registers optc_regs[] = { - optc_regs(0), - optc_regs(1), - optc_regs(2), - optc_regs(3), - optc_regs(4), - optc_regs(5) -}; - -static const struct dcn_optc_shift optc_shift = { - OPTC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn_optc_mask optc_mask = { - OPTC_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -#define hubp_regs(id)\ -[id] = {\ - HUBP_REG_LIST_DCN30(id)\ -} - -static const struct dcn_hubp2_registers hubp_regs[] = { - hubp_regs(0), - hubp_regs(1), - hubp_regs(2), - hubp_regs(3), - hubp_regs(4), - hubp_regs(5) -}; - -static const struct dcn_hubp2_shift hubp_shift = { - HUBP_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn_hubp2_mask hubp_mask = { - HUBP_MASK_SH_LIST_DCN30(_MASK) -}; - -static const struct dcn_hubbub_registers hubbub_reg = { - HUBBUB_REG_LIST_DCN30(0) -}; - -static const struct dcn_hubbub_shift hubbub_shift = { - HUBBUB_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn_hubbub_mask hubbub_mask = { - HUBBUB_MASK_SH_LIST_DCN30(_MASK) -}; - -static const struct dccg_registers dccg_regs = { - DCCG_REG_LIST_DCN30() -}; - -static const struct dccg_shift dccg_shift = { - DCCG_MASK_SH_LIST_DCN3(__SHIFT) -}; - -static const struct dccg_mask dccg_mask = { - DCCG_MASK_SH_LIST_DCN3(_MASK) -}; - -static const struct dce_hwseq_registers hwseq_reg = { - HWSEQ_DCN30_REG_LIST() -}; - -static const struct dce_hwseq_shift hwseq_shift = { - HWSEQ_DCN30_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_hwseq_mask hwseq_mask = { - HWSEQ_DCN30_MASK_SH_LIST(_MASK) -}; -#define vmid_regs(id)\ -[id] = {\ - DCN20_VMID_REG_LIST(id)\ -} - -static const struct dcn_vmid_registers vmid_regs[] = { - vmid_regs(0), - vmid_regs(1), - vmid_regs(2), - vmid_regs(3), - vmid_regs(4), - vmid_regs(5), - vmid_regs(6), - vmid_regs(7), - vmid_regs(8), - vmid_regs(9), - vmid_regs(10), - vmid_regs(11), - vmid_regs(12), - vmid_regs(13), - vmid_regs(14), - vmid_regs(15) -}; - -static const struct dcn20_vmid_shift vmid_shifts = { - DCN20_VMID_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn20_vmid_mask vmid_masks = { - DCN20_VMID_MASK_SH_LIST(_MASK) -}; - -static const struct resource_caps res_cap_dcn3 = { - .num_timing_generator = 6, - .num_opp = 6, - .num_video_plane = 6, - .num_audio = 6, - .num_stream_encoder = 6, - .num_pll = 6, - .num_dwb = 1, - .num_ddc = 6, - .num_vmid = 16, - .num_mpc_3dlut = 3, - .num_dsc = 6, -}; - -static const struct dc_plane_cap plane_cap = { - .type = DC_PLANE_TYPE_DCN_UNIVERSAL, - .per_pixel_alpha = true, - - .pixel_format_support = { - .argb8888 = true, - .nv12 = true, - .fp16 = true, - .p010 = true, - .ayuv = false, - }, - - .max_upscale_factor = { - .argb8888 = 16000, - .nv12 = 16000, - .fp16 = 16000 - }, - - /* 6:1 downscaling ratio: 1000/6 = 166.666 */ - .max_downscale_factor = { - .argb8888 = 167, - .nv12 = 167, - .fp16 = 167 - }, - 16, - 16 -}; - -static const struct dc_debug_options debug_defaults_drv = { - .disable_dmcu = true, //No DMCU on DCN30 - .force_abm_enable = false, - .timing_trace = false, - .clock_trace = true, - .disable_pplib_clock_request = true, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, - .force_single_disp_pipe_split = false, - .disable_dcc = DCC_ENABLE, - .vsr_support = true, - .performance_trace = false, - .max_downscale_src_width = 7680,/*upto 8K*/ - .disable_pplib_wm_range = false, - .scl_reset_length10 = true, - .sanity_checks = false, - .underflow_assert_delay_us = 0xFFFFFFFF, - .dwb_fi_phase = -1, // -1 = disable, - .dmub_command_table = true, - .use_max_lb = true, - .exit_idle_opt_for_cursor_updates = true, - .enable_legacy_fast_update = false, - .using_dml2 = false, -}; - -static const struct dc_panel_config panel_config_defaults = { - .psr = { - .disable_psr = false, - .disallow_psrsu = false, - .disallow_replay = false, - }, -}; - -static void dcn30_dpp_destroy(struct dpp **dpp) -{ - kfree(TO_DCN20_DPP(*dpp)); - *dpp = NULL; -} - -static struct dpp *dcn30_dpp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn3_dpp *dpp = - kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); - - if (!dpp) - return NULL; - - if (dpp3_construct(dpp, ctx, inst, - &dpp_regs[inst], &tf_shift, &tf_mask)) - return &dpp->base; - - BREAK_TO_DEBUGGER(); - kfree(dpp); - return NULL; -} - -static struct output_pixel_processor *dcn30_opp_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_opp *opp = - kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); - - if (!opp) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dcn20_opp_construct(opp, ctx, inst, - &opp_regs[inst], &opp_shift, &opp_mask); - return &opp->base; -} - -static struct dce_aux *dcn30_aux_engine_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); - - if (!aux_engine) - return NULL; - - dce110_aux_engine_construct(aux_engine, ctx, inst, - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst], - &aux_mask, - &aux_shift, - ctx->dc->caps.extended_aux_timeout_support); - - return &aux_engine->base; -} - -#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) } - -static const struct dce_i2c_registers i2c_hw_regs[] = { - i2c_inst_regs(1), - i2c_inst_regs(2), - i2c_inst_regs(3), - i2c_inst_regs(4), - i2c_inst_regs(5), - i2c_inst_regs(6), -}; - -static const struct dce_i2c_shift i2c_shifts = { - I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dce_i2c_mask i2c_masks = { - I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -static struct dce_i2c_hw *dcn30_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); - - if (!dce_i2c_hw) - return NULL; - - dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, - &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); - - return dce_i2c_hw; -} - -static struct mpc *dcn30_mpc_create( - struct dc_context *ctx, - int num_mpcc, - int num_rmu) -{ - struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), - GFP_KERNEL); - - if (!mpc30) - return NULL; - - dcn30_mpc_construct(mpc30, ctx, - &mpc_regs, - &mpc_shift, - &mpc_mask, - num_mpcc, - num_rmu); - - return &mpc30->base; -} - -static struct hubbub *dcn30_hubbub_create(struct dc_context *ctx) -{ - int i; - - struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), - GFP_KERNEL); - - if (!hubbub3) - return NULL; - - hubbub3_construct(hubbub3, ctx, - &hubbub_reg, - &hubbub_shift, - &hubbub_mask); - - - for (i = 0; i < res_cap_dcn3.num_vmid; i++) { - struct dcn20_vmid *vmid = &hubbub3->vmid[i]; - - vmid->ctx = ctx; - - vmid->regs = &vmid_regs[i]; - vmid->shifts = &vmid_shifts; - vmid->masks = &vmid_masks; - } - - return &hubbub3->base; -} - -static struct timing_generator *dcn30_timing_generator_create( - struct dc_context *ctx, - uint32_t instance) -{ - struct optc *tgn10 = - kzalloc(sizeof(struct optc), GFP_KERNEL); - - if (!tgn10) - return NULL; - - tgn10->base.inst = instance; - tgn10->base.ctx = ctx; - - tgn10->tg_regs = &optc_regs[instance]; - tgn10->tg_shift = &optc_shift; - tgn10->tg_mask = &optc_mask; - - dcn30_timing_generator_init(tgn10); - - return &tgn10->base; -} - -static const struct encoder_feature_support link_enc_feature = { - .max_hdmi_deep_color = COLOR_DEPTH_121212, - .max_hdmi_pixel_clock = 600000, - .hdmi_ycbcr420_supported = true, - .dp_ycbcr420_supported = true, - .fec_supported = true, - .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_HBR3_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true, - .flags.bits.IS_TPS4_CAPABLE = true -}; - -static struct link_encoder *dcn30_link_encoder_create( - struct dc_context *ctx, - const struct encoder_init_data *enc_init_data) -{ - struct dcn20_link_encoder *enc20 = - kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - - if (!enc20) - return NULL; - - dcn30_link_encoder_construct(enc20, - enc_init_data, - &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], - &link_enc_aux_regs[enc_init_data->channel - 1], - &link_enc_hpd_regs[enc_init_data->hpd_source], - &le_shift, - &le_mask); - - return &enc20->enc10.base; -} - -static struct panel_cntl *dcn30_panel_cntl_create(const struct panel_cntl_init_data *init_data) -{ - struct dce_panel_cntl *panel_cntl = - kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); - - if (!panel_cntl) - return NULL; - - dce_panel_cntl_construct(panel_cntl, - init_data, - &panel_cntl_regs[init_data->inst], - &panel_cntl_shift, - &panel_cntl_mask); - - return &panel_cntl->base; -} - -static void read_dce_straps( - struct dc_context *ctx, - struct resource_straps *straps) -{ - generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), - FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); - -} - -static struct audio *dcn30_create_audio( - struct dc_context *ctx, unsigned int inst) -{ - return dce_audio_create(ctx, inst, - &audio_regs[inst], &audio_shift, &audio_mask); -} - -static struct vpg *dcn30_vpg_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL); - - if (!vpg3) - return NULL; - - vpg3_construct(vpg3, ctx, inst, - &vpg_regs[inst], - &vpg_shift, - &vpg_mask); - - return &vpg3->base; -} - -static struct afmt *dcn30_afmt_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL); - - if (!afmt3) - return NULL; - - afmt3_construct(afmt3, ctx, inst, - &afmt_regs[inst], - &afmt_shift, - &afmt_mask); - - return &afmt3->base; -} - -static struct stream_encoder *dcn30_stream_encoder_create(enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dcn10_stream_encoder *enc1; - struct vpg *vpg; - struct afmt *afmt; - int vpg_inst; - int afmt_inst; - - /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ - if (eng_id <= ENGINE_ID_DIGF) { - vpg_inst = eng_id; - afmt_inst = eng_id; - } else - return NULL; - - enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); - vpg = dcn30_vpg_create(ctx, vpg_inst); - afmt = dcn30_afmt_create(ctx, afmt_inst); - - if (!enc1 || !vpg || !afmt) { - kfree(enc1); - kfree(vpg); - kfree(afmt); - return NULL; - } - - dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, - eng_id, vpg, afmt, - &stream_enc_regs[eng_id], - &se_shift, &se_mask); - - return &enc1->base; -} - -static struct dce_hwseq *dcn30_hwseq_create(struct dc_context *ctx) -{ - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); - - if (hws) { - hws->ctx = ctx; - hws->regs = &hwseq_reg; - hws->shifts = &hwseq_shift; - hws->masks = &hwseq_mask; - } - return hws; -} -static const struct resource_create_funcs res_create_funcs = { - .read_dce_straps = read_dce_straps, - .create_audio = dcn30_create_audio, - .create_stream_encoder = dcn30_stream_encoder_create, - .create_hwseq = dcn30_hwseq_create, -}; - -static void dcn30_resource_destruct(struct dcn30_resource_pool *pool) -{ - unsigned int i; - - for (i = 0; i < pool->base.stream_enc_count; i++) { - if (pool->base.stream_enc[i] != NULL) { - if (pool->base.stream_enc[i]->vpg != NULL) { - kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); - pool->base.stream_enc[i]->vpg = NULL; - } - if (pool->base.stream_enc[i]->afmt != NULL) { - kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); - pool->base.stream_enc[i]->afmt = NULL; - } - kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); - pool->base.stream_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_dsc; i++) { - if (pool->base.dscs[i] != NULL) - dcn20_dsc_destroy(&pool->base.dscs[i]); - } - - if (pool->base.mpc != NULL) { - kfree(TO_DCN20_MPC(pool->base.mpc)); - pool->base.mpc = NULL; - } - if (pool->base.hubbub != NULL) { - kfree(pool->base.hubbub); - pool->base.hubbub = NULL; - } - for (i = 0; i < pool->base.pipe_count; i++) { - if (pool->base.dpps[i] != NULL) - dcn30_dpp_destroy(&pool->base.dpps[i]); - - if (pool->base.ipps[i] != NULL) - pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); - - if (pool->base.hubps[i] != NULL) { - kfree(TO_DCN20_HUBP(pool->base.hubps[i])); - pool->base.hubps[i] = NULL; - } - - if (pool->base.irqs != NULL) { - dal_irq_service_destroy(&pool->base.irqs); - } - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - if (pool->base.engines[i] != NULL) - dce110_engine_destroy(&pool->base.engines[i]); - if (pool->base.hw_i2cs[i] != NULL) { - kfree(pool->base.hw_i2cs[i]); - pool->base.hw_i2cs[i] = NULL; - } - if (pool->base.sw_i2cs[i] != NULL) { - kfree(pool->base.sw_i2cs[i]); - pool->base.sw_i2cs[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - if (pool->base.opps[i] != NULL) - pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - if (pool->base.timing_generators[i] != NULL) { - kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); - pool->base.timing_generators[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_dwb; i++) { - if (pool->base.dwbc[i] != NULL) { - kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); - pool->base.dwbc[i] = NULL; - } - if (pool->base.mcif_wb[i] != NULL) { - kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); - pool->base.mcif_wb[i] = NULL; - } - } - - for (i = 0; i < pool->base.audio_count; i++) { - if (pool->base.audios[i]) - dce_aud_destroy(&pool->base.audios[i]); - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] != NULL) { - dcn20_clock_source_destroy(&pool->base.clock_sources[i]); - pool->base.clock_sources[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { - if (pool->base.mpc_lut[i] != NULL) { - dc_3dlut_func_release(pool->base.mpc_lut[i]); - pool->base.mpc_lut[i] = NULL; - } - if (pool->base.mpc_shaper[i] != NULL) { - dc_transfer_func_release(pool->base.mpc_shaper[i]); - pool->base.mpc_shaper[i] = NULL; - } - } - - if (pool->base.dp_clock_source != NULL) { - dcn20_clock_source_destroy(&pool->base.dp_clock_source); - pool->base.dp_clock_source = NULL; - } - - for (i = 0; i < pool->base.pipe_count; i++) { - if (pool->base.multiple_abms[i] != NULL) - dce_abm_destroy(&pool->base.multiple_abms[i]); - } - - if (pool->base.psr != NULL) - dmub_psr_destroy(&pool->base.psr); - - if (pool->base.dccg != NULL) - dcn_dccg_destroy(&pool->base.dccg); - - if (pool->base.oem_device != NULL) { - struct dc *dc = pool->base.oem_device->ctx->dc; - - dc->link_srv->destroy_ddc_service(&pool->base.oem_device); - } -} - -static struct hubp *dcn30_hubp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn20_hubp *hubp2 = - kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); - - if (!hubp2) - return NULL; - - if (hubp3_construct(hubp2, ctx, inst, - &hubp_regs[inst], &hubp_shift, &hubp_mask)) - return &hubp2->base; - - BREAK_TO_DEBUGGER(); - kfree(hubp2); - return NULL; -} - -static bool dcn30_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t pipe_count = pool->res_cap->num_dwb; - - for (i = 0; i < pipe_count; i++) { - struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), - GFP_KERNEL); - - if (!dwbc30) { - dm_error("DC: failed to create dwbc30!\n"); - return false; - } - - dcn30_dwbc_construct(dwbc30, ctx, - &dwbc30_regs[i], - &dwbc30_shift, - &dwbc30_mask, - i); - - pool->dwbc[i] = &dwbc30->base; - } - return true; -} - -static bool dcn30_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t pipe_count = pool->res_cap->num_dwb; - - for (i = 0; i < pipe_count; i++) { - struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), - GFP_KERNEL); - - if (!mcif_wb30) { - dm_error("DC: failed to create mcif_wb30!\n"); - return false; - } - - dcn30_mmhubbub_construct(mcif_wb30, ctx, - &mcif_wb30_regs[i], - &mcif_wb30_shift, - &mcif_wb30_mask, - i); - - pool->mcif_wb[i] = &mcif_wb30->base; - } - return true; -} - -static struct display_stream_compressor *dcn30_dsc_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_dsc *dsc = - kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); - - if (!dsc) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); - return &dsc->base; -} - -enum dc_status dcn30_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) -{ - - return dcn20_add_stream_to_ctx(dc, new_ctx, dc_stream); -} - -static void dcn30_destroy_resource_pool(struct resource_pool **pool) -{ - struct dcn30_resource_pool *dcn30_pool = TO_DCN30_RES_POOL(*pool); - - dcn30_resource_destruct(dcn30_pool); - kfree(dcn30_pool); - *pool = NULL; -} - -static struct clock_source *dcn30_clock_source_create( - struct dc_context *ctx, - struct dc_bios *bios, - enum clock_source_id id, - const struct dce110_clk_src_regs *regs, - bool dp_clk_src) -{ - struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); - - if (!clk_src) - return NULL; - - if (dcn3_clk_src_construct(clk_src, ctx, bios, id, - regs, &cs_shift, &cs_mask)) { - clk_src->base.dp_clk_src = dp_clk_src; - return &clk_src->base; - } - - kfree(clk_src); - BREAK_TO_DEBUGGER(); - return NULL; -} - -int dcn30_populate_dml_pipes_from_context( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - bool fast_validate) -{ - int i, pipe_cnt; - struct resource_context *res_ctx = &context->res_ctx; - - DC_FP_START(); - dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); - DC_FP_END(); - - for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { - if (!res_ctx->pipe_ctx[i].stream) - continue; - - pipes[pipe_cnt++].pipe.scale_ratio_depth.lb_depth = - dm_lb_16; - } - - return pipe_cnt; -} - -void dcn30_populate_dml_writeback_from_context( - struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes) -{ - DC_FP_START(); - dcn30_fpu_populate_dml_writeback_from_context(dc, res_ctx, pipes); - DC_FP_END(); -} - -unsigned int dcn30_calc_max_scaled_time( - unsigned int time_per_pixel, - enum mmhubbub_wbif_mode mode, - unsigned int urgent_watermark) -{ - unsigned int time_per_byte = 0; - unsigned int total_free_entry = 0xb40; - unsigned int buf_lh_capability; - unsigned int max_scaled_time; - - if (mode == PACKED_444) /* packed mode 32 bpp */ - time_per_byte = time_per_pixel/4; - else if (mode == PACKED_444_FP16) /* packed mode 64 bpp */ - time_per_byte = time_per_pixel/8; - - if (time_per_byte == 0) - time_per_byte = 1; - - buf_lh_capability = (total_free_entry*time_per_byte*32) >> 6; /* time_per_byte is in u6.6*/ - max_scaled_time = buf_lh_capability - urgent_watermark; - return max_scaled_time; -} - -void dcn30_set_mcif_arb_params( - struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int pipe_cnt) -{ - enum mmhubbub_wbif_mode wbif_mode; - struct display_mode_lib *dml = &context->bw_ctx.dml; - struct mcif_arb_params *wb_arb_params; - int i, j, dwb_pipe; - - /* Writeback MCIF_WB arbitration parameters */ - dwb_pipe = 0; - for (i = 0; i < dc->res_pool->pipe_count; i++) { - - if (!context->res_ctx.pipe_ctx[i].stream) - continue; - - for (j = 0; j < MAX_DWB_PIPES; j++) { - struct dc_writeback_info *writeback_info = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j]; - - if (writeback_info->wb_enabled == false) - continue; - - //wb_arb_params = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j].mcif_arb_params; - wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[dwb_pipe]; - - if (writeback_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_ARGB || - writeback_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_RGBA) - wbif_mode = PACKED_444_FP16; - else - wbif_mode = PACKED_444; - - DC_FP_START(); - dcn30_fpu_set_mcif_arb_params(wb_arb_params, dml, pipes, pipe_cnt, j); - DC_FP_END(); - wb_arb_params->time_per_pixel = (1000000 << 6) / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* time_per_pixel should be in u6.6 format */ - wb_arb_params->slice_lines = 32; - wb_arb_params->arbitration_slice = 2; /* irrelevant since there is no YUV output */ - wb_arb_params->max_scaled_time = dcn30_calc_max_scaled_time(wb_arb_params->time_per_pixel, - wbif_mode, - wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */ - - dwb_pipe++; - - if (dwb_pipe >= MAX_DWB_PIPES) - return; - } - if (dwb_pipe >= MAX_DWB_PIPES) - return; - } - -} - -static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn20_get_dcc_compression_cap -}; - -bool dcn30_acquire_post_bldn_3dlut( - struct resource_context *res_ctx, - const struct resource_pool *pool, - int mpcc_id, - struct dc_3dlut **lut, - struct dc_transfer_func **shaper) -{ - int i; - bool ret = false; - union dc_3dlut_state *state; - - ASSERT(*lut == NULL && *shaper == NULL); - *lut = NULL; - *shaper = NULL; - - for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) { - if (!res_ctx->is_mpc_3dlut_acquired[i]) { - *lut = pool->mpc_lut[i]; - *shaper = pool->mpc_shaper[i]; - state = &pool->mpc_lut[i]->state; - res_ctx->is_mpc_3dlut_acquired[i] = true; - state->bits.rmu_idx_valid = 1; - state->bits.rmu_mux_num = i; - if (state->bits.rmu_mux_num == 0) - state->bits.mpc_rmu0_mux = mpcc_id; - else if (state->bits.rmu_mux_num == 1) - state->bits.mpc_rmu1_mux = mpcc_id; - else if (state->bits.rmu_mux_num == 2) - state->bits.mpc_rmu2_mux = mpcc_id; - ret = true; - break; - } - } - return ret; -} - -bool dcn30_release_post_bldn_3dlut( - struct resource_context *res_ctx, - const struct resource_pool *pool, - struct dc_3dlut **lut, - struct dc_transfer_func **shaper) -{ - int i; - bool ret = false; - - for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) { - if (pool->mpc_lut[i] == *lut && pool->mpc_shaper[i] == *shaper) { - res_ctx->is_mpc_3dlut_acquired[i] = false; - pool->mpc_lut[i]->state.raw = 0; - *lut = NULL; - *shaper = NULL; - ret = true; - break; - } - } - return ret; -} - -static bool is_soc_bounding_box_valid(struct dc *dc) -{ - uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; - - if (ASICREV_IS_SIENNA_CICHLID_P(hw_internal_rev)) - return true; - - return false; -} - -static bool init_soc_bounding_box(struct dc *dc, - struct dcn30_resource_pool *pool) -{ - struct _vcs_dpi_soc_bounding_box_st *loaded_bb = &dcn3_0_soc; - struct _vcs_dpi_ip_params_st *loaded_ip = &dcn3_0_ip; - - DC_LOGGER_INIT(dc->ctx->logger); - - if (!is_soc_bounding_box_valid(dc)) { - DC_LOG_ERROR("%s: not valid soc bounding box\n", __func__); - return false; - } - - loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator; - loaded_ip->max_num_dpp = pool->base.pipe_count; - loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk; - dcn20_patch_bounding_box(dc, loaded_bb); - DC_FP_START(); - patch_dcn30_soc_bounding_box(dc, &dcn3_0_soc); - DC_FP_END(); - - return true; -} - -static bool dcn30_split_stream_for_mpc_or_odm( - const struct dc *dc, - struct resource_context *res_ctx, - struct pipe_ctx *pri_pipe, - struct pipe_ctx *sec_pipe, - bool odm) -{ - int pipe_idx = sec_pipe->pipe_idx; - const struct resource_pool *pool = dc->res_pool; - - *sec_pipe = *pri_pipe; - - sec_pipe->pipe_idx = pipe_idx; - sec_pipe->plane_res.mi = pool->mis[pipe_idx]; - sec_pipe->plane_res.hubp = pool->hubps[pipe_idx]; - sec_pipe->plane_res.ipp = pool->ipps[pipe_idx]; - sec_pipe->plane_res.xfm = pool->transforms[pipe_idx]; - sec_pipe->plane_res.dpp = pool->dpps[pipe_idx]; - sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst; - sec_pipe->stream_res.dsc = NULL; - if (odm) { - if (pri_pipe->next_odm_pipe) { - ASSERT(pri_pipe->next_odm_pipe != sec_pipe); - sec_pipe->next_odm_pipe = pri_pipe->next_odm_pipe; - sec_pipe->next_odm_pipe->prev_odm_pipe = sec_pipe; - } - if (pri_pipe->top_pipe && pri_pipe->top_pipe->next_odm_pipe) { - pri_pipe->top_pipe->next_odm_pipe->bottom_pipe = sec_pipe; - sec_pipe->top_pipe = pri_pipe->top_pipe->next_odm_pipe; - } - if (pri_pipe->bottom_pipe && pri_pipe->bottom_pipe->next_odm_pipe) { - pri_pipe->bottom_pipe->next_odm_pipe->top_pipe = sec_pipe; - sec_pipe->bottom_pipe = pri_pipe->bottom_pipe->next_odm_pipe; - } - pri_pipe->next_odm_pipe = sec_pipe; - sec_pipe->prev_odm_pipe = pri_pipe; - - if (!sec_pipe->top_pipe) - sec_pipe->stream_res.opp = pool->opps[pipe_idx]; - else - sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp; - if (sec_pipe->stream->timing.flags.DSC == 1) { - dcn20_acquire_dsc(dc, res_ctx, &sec_pipe->stream_res.dsc, pipe_idx); - ASSERT(sec_pipe->stream_res.dsc); - if (sec_pipe->stream_res.dsc == NULL) - return false; - } - } else { - if (pri_pipe->bottom_pipe) { - ASSERT(pri_pipe->bottom_pipe != sec_pipe); - sec_pipe->bottom_pipe = pri_pipe->bottom_pipe; - sec_pipe->bottom_pipe->top_pipe = sec_pipe; - } - pri_pipe->bottom_pipe = sec_pipe; - sec_pipe->top_pipe = pri_pipe; - - ASSERT(pri_pipe->plane_state); - } - - return true; -} - -static struct pipe_ctx *dcn30_find_split_pipe( - struct dc *dc, - struct dc_state *context, - int old_index) -{ - struct pipe_ctx *pipe = NULL; - int i; - - if (old_index >= 0 && context->res_ctx.pipe_ctx[old_index].stream == NULL) { - pipe = &context->res_ctx.pipe_ctx[old_index]; - pipe->pipe_idx = old_index; - } - - if (!pipe) - for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { - if (dc->current_state->res_ctx.pipe_ctx[i].top_pipe == NULL - && dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) { - if (context->res_ctx.pipe_ctx[i].stream == NULL) { - pipe = &context->res_ctx.pipe_ctx[i]; - pipe->pipe_idx = i; - break; - } - } - } - - /* - * May need to fix pipes getting tossed from 1 opp to another on flip - * Add for debugging transient underflow during topology updates: - * ASSERT(pipe); - */ - if (!pipe) - for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { - if (context->res_ctx.pipe_ctx[i].stream == NULL) { - pipe = &context->res_ctx.pipe_ctx[i]; - pipe->pipe_idx = i; - break; - } - } - - return pipe; -} - -noinline bool dcn30_internal_validate_bw( - struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int *pipe_cnt_out, - int *vlevel_out, - bool fast_validate, - bool allow_self_refresh_only) -{ - bool out = false; - bool repopulate_pipes = false; - int split[MAX_PIPES] = { 0 }; - bool merge[MAX_PIPES] = { false }; - bool newly_split[MAX_PIPES] = { false }; - int pipe_cnt, i, pipe_idx, vlevel; - struct vba_vars_st *vba = &context->bw_ctx.dml.vba; - - ASSERT(pipes); - if (!pipes) - return false; - - context->bw_ctx.dml.vba.maxMpcComb = 0; - context->bw_ctx.dml.vba.VoltageLevel = 0; - context->bw_ctx.dml.vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; - dc->res_pool->funcs->update_soc_for_wm_a(dc, context); - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); - - if (!pipe_cnt) { - out = true; - goto validate_out; - } - - dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); - - if (!fast_validate || !allow_self_refresh_only) { - /* - * DML favors voltage over p-state, but we're more interested in - * supporting p-state over voltage. We can't support p-state in - * prefetch mode > 0 so try capping the prefetch mode to start. - */ - context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = - dm_allow_self_refresh_and_mclk_switch; - vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); - /* This may adjust vlevel and maxMpcComb */ - if (vlevel < context->bw_ctx.dml.soc.num_states) - vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); - } - if (allow_self_refresh_only && - (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states || - vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported)) { - /* - * If mode is unsupported or there's still no p-state support - * then fall back to favoring voltage. - * - * We don't actually support prefetch mode 2, so require that we - * at least support prefetch mode 1. - */ - context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = - dm_allow_self_refresh; - - vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); - if (vlevel < context->bw_ctx.dml.soc.num_states) { - memset(split, 0, sizeof(split)); - memset(merge, 0, sizeof(merge)); - vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); - } - } - - dml_log_mode_support_params(&context->bw_ctx.dml); - - if (vlevel == context->bw_ctx.dml.soc.num_states) - goto validate_fail; - - if (!dc->config.enable_windowed_mpo_odm) { - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *mpo_pipe = pipe->bottom_pipe; - - if (!pipe->stream) - continue; - - /* We only support full screen mpo with ODM */ - if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled - && pipe->plane_state && mpo_pipe - && memcmp(&mpo_pipe->plane_state->clip_rect, - &pipe->stream->src, - sizeof(struct rect)) != 0) { - ASSERT(mpo_pipe->plane_state != pipe->plane_state); - goto validate_fail; - } - pipe_idx++; - } - } - - /* merge pipes if necessary */ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - - /*skip pipes that don't need merging*/ - if (!merge[i]) - continue; - - /* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */ - if (pipe->prev_odm_pipe) { - /*split off odm pipe*/ - pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe; - if (pipe->next_odm_pipe) - pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe; - - pipe->bottom_pipe = NULL; - pipe->next_odm_pipe = NULL; - pipe->plane_state = NULL; - pipe->stream = NULL; - pipe->top_pipe = NULL; - pipe->prev_odm_pipe = NULL; - if (pipe->stream_res.dsc) - dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc); - memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); - memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); - repopulate_pipes = true; - } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { - struct pipe_ctx *top_pipe = pipe->top_pipe; - struct pipe_ctx *bottom_pipe = pipe->bottom_pipe; - - top_pipe->bottom_pipe = bottom_pipe; - if (bottom_pipe) - bottom_pipe->top_pipe = top_pipe; - - pipe->top_pipe = NULL; - pipe->bottom_pipe = NULL; - pipe->plane_state = NULL; - pipe->stream = NULL; - memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); - memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); - repopulate_pipes = true; - } else - ASSERT(0); /* Should never try to merge master pipe */ - - } - - for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; - struct pipe_ctx *hsplit_pipe = NULL; - bool odm; - int old_index = -1; - - if (!pipe->stream || newly_split[i]) - continue; - - pipe_idx++; - odm = vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled; - - if (!pipe->plane_state && !odm) - continue; - - if (split[i]) { - if (odm) { - if (split[i] == 4 && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe) - old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx; - else if (old_pipe->next_odm_pipe) - old_index = old_pipe->next_odm_pipe->pipe_idx; - } else { - if (split[i] == 4 && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe && - old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state) - old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx; - else if (old_pipe->bottom_pipe && - old_pipe->bottom_pipe->plane_state == old_pipe->plane_state) - old_index = old_pipe->bottom_pipe->pipe_idx; - } - hsplit_pipe = dcn30_find_split_pipe(dc, context, old_index); - ASSERT(hsplit_pipe); - if (!hsplit_pipe) - goto validate_fail; - - if (!dcn30_split_stream_for_mpc_or_odm( - dc, &context->res_ctx, - pipe, hsplit_pipe, odm)) - goto validate_fail; - - newly_split[hsplit_pipe->pipe_idx] = true; - repopulate_pipes = true; - } - if (split[i] == 4) { - struct pipe_ctx *pipe_4to1; - - if (odm && old_pipe->next_odm_pipe) - old_index = old_pipe->next_odm_pipe->pipe_idx; - else if (!odm && old_pipe->bottom_pipe && - old_pipe->bottom_pipe->plane_state == old_pipe->plane_state) - old_index = old_pipe->bottom_pipe->pipe_idx; - else - old_index = -1; - pipe_4to1 = dcn30_find_split_pipe(dc, context, old_index); - ASSERT(pipe_4to1); - if (!pipe_4to1) - goto validate_fail; - if (!dcn30_split_stream_for_mpc_or_odm( - dc, &context->res_ctx, - pipe, pipe_4to1, odm)) - goto validate_fail; - newly_split[pipe_4to1->pipe_idx] = true; - - if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe - && old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe) - old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx; - else if (!odm && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe && - old_pipe->bottom_pipe->bottom_pipe->bottom_pipe && - old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state) - old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx; - else - old_index = -1; - pipe_4to1 = dcn30_find_split_pipe(dc, context, old_index); - ASSERT(pipe_4to1); - if (!pipe_4to1) - goto validate_fail; - if (!dcn30_split_stream_for_mpc_or_odm( - dc, &context->res_ctx, - hsplit_pipe, pipe_4to1, odm)) - goto validate_fail; - newly_split[pipe_4to1->pipe_idx] = true; - } - if (odm) - dcn20_build_mapped_resource(dc, context, pipe->stream); - } - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - - if (pipe->plane_state) { - if (!resource_build_scaling_params(pipe)) - goto validate_fail; - } - } - - /* Actual dsc count per stream dsc validation*/ - if (!dcn20_validate_dsc(dc, context)) { - vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE; - goto validate_fail; - } - - if (repopulate_pipes) - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); - context->bw_ctx.dml.vba.VoltageLevel = vlevel; - *vlevel_out = vlevel; - *pipe_cnt_out = pipe_cnt; - - out = true; - goto validate_out; - -validate_fail: - out = false; - -validate_out: - return out; -} - -static int get_refresh_rate(struct dc_state *context) -{ - int refresh_rate = 0; - int h_v_total = 0; - struct dc_crtc_timing *timing = NULL; - - if (context == NULL || context->streams[0] == NULL) - return 0; - - /* check if refresh rate at least 120hz */ - timing = &context->streams[0]->timing; - if (timing == NULL) - return 0; - - h_v_total = timing->h_total * timing->v_total; - if (h_v_total == 0) - return 0; - - refresh_rate = ((timing->pix_clk_100hz * 100) / (h_v_total)) + 1; - return refresh_rate; -} - -#define MAX_STRETCHED_V_BLANK 500 // in micro-seconds -/* - * Scaling factor for v_blank stretch calculations considering timing in - * micro-seconds and pixel clock in 100hz. - * Note: the parenthesis are necessary to ensure the correct order of - * operation where V_SCALE is used. - */ -#define V_SCALE (10000 / MAX_STRETCHED_V_BLANK) - -static int get_frame_rate_at_max_stretch_100hz(struct dc_state *context) -{ - struct dc_crtc_timing *timing = NULL; - uint32_t sec_per_100_lines; - uint32_t max_v_blank; - uint32_t curr_v_blank; - uint32_t v_stretch_max; - uint32_t stretched_frame_pix_cnt; - uint32_t scaled_stretched_frame_pix_cnt; - uint32_t scaled_refresh_rate; - - if (context == NULL || context->streams[0] == NULL) - return 0; - - /* check if refresh rate at least 120hz */ - timing = &context->streams[0]->timing; - if (timing == NULL) - return 0; - - sec_per_100_lines = timing->pix_clk_100hz / timing->h_total + 1; - max_v_blank = sec_per_100_lines / V_SCALE + 1; - curr_v_blank = timing->v_total - timing->v_addressable; - v_stretch_max = (max_v_blank > curr_v_blank) ? (max_v_blank - curr_v_blank) : (0); - stretched_frame_pix_cnt = (v_stretch_max + timing->v_total) * timing->h_total; - scaled_stretched_frame_pix_cnt = stretched_frame_pix_cnt / 10000; - scaled_refresh_rate = (timing->pix_clk_100hz) / scaled_stretched_frame_pix_cnt + 1; - - return scaled_refresh_rate; -} - -static bool is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(struct dc_state *context) -{ - int refresh_rate_max_stretch_100hz; - int min_refresh_100hz; - - if (context == NULL || context->streams[0] == NULL) - return false; - - refresh_rate_max_stretch_100hz = get_frame_rate_at_max_stretch_100hz(context); - min_refresh_100hz = context->streams[0]->timing.min_refresh_in_uhz / 10000; - - if (refresh_rate_max_stretch_100hz < min_refresh_100hz) - return false; - - return true; -} - -bool dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context) -{ - int refresh_rate = 0; - const int minimum_refreshrate_supported = 120; - - if (context == NULL || context->streams[0] == NULL) - return false; - - if (context->streams[0]->sink->edid_caps.panel_patch.disable_fams) - return false; - - if (dc->debug.disable_fams) - return false; - - if (!dc->caps.dmub_caps.mclk_sw) - return false; - - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down) - return false; - - /* more then 1 monitor connected */ - if (context->stream_count != 1) - return false; - - refresh_rate = get_refresh_rate(context); - if (refresh_rate < minimum_refreshrate_supported) - return false; - - if (!is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(context)) - return false; - - if (!context->streams[0]->allow_freesync) - return false; - - if (context->streams[0]->vrr_active_variable && dc->debug.disable_fams_gaming) - return false; - - context->streams[0]->fpo_in_use = true; - - return true; -} - -/* - * set up FPO watermarks, pstate, dram latency - */ -void dcn30_setup_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context) -{ - ASSERT(dc != NULL && context != NULL); - if (dc == NULL || context == NULL) - return; - - /* Set wm_a.pstate so high natural MCLK switches are impossible: 4 seconds */ - context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U; -} - -void dcn30_update_soc_for_wm_a(struct dc *dc, struct dc_state *context) -{ - DC_FP_START(); - dcn30_fpu_update_soc_for_wm_a(dc, context); - DC_FP_END(); -} - -void dcn30_calculate_wm_and_dlg( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int pipe_cnt, - int vlevel) -{ - DC_FP_START(); - dcn30_fpu_calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); - DC_FP_END(); -} - -bool dcn30_validate_bandwidth(struct dc *dc, - struct dc_state *context, - bool fast_validate) -{ - bool out = false; - - BW_VAL_TRACE_SETUP(); - - int vlevel = 0; - int pipe_cnt = 0; - display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); - DC_LOGGER_INIT(dc->ctx->logger); - - BW_VAL_TRACE_COUNT(); - - DC_FP_START(); - out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true); - DC_FP_END(); - - if (pipe_cnt == 0) - goto validate_out; - - if (!out) - goto validate_fail; - - BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - - if (fast_validate) { - BW_VAL_TRACE_SKIP(fast); - goto validate_out; - } - - DC_FP_START(); - if (dc->res_pool->funcs->calculate_wm_and_dlg) - dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); - DC_FP_END(); - - BW_VAL_TRACE_END_WATERMARKS(); - - goto validate_out; - -validate_fail: - DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", - dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); - - BW_VAL_TRACE_SKIP(fail); - out = false; - -validate_out: - kfree(pipes); - - BW_VAL_TRACE_FINISH(); - - return out; -} - -void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) -{ - unsigned int i, j; - unsigned int num_states = 0; - - unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0}; - unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0}; - unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0}; - unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0}; - - unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200}; - unsigned int num_dcfclk_sta_targets = 4; - unsigned int num_uclk_states; - - struct dc_bounding_box_max_clk dcn30_bb_max_clk; - - memset(&dcn30_bb_max_clk, 0, sizeof(dcn30_bb_max_clk)); - - if (dc->ctx->dc_bios->vram_info.num_chans) - dcn3_0_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; - - DC_FP_START(); - dcn30_fpu_update_dram_channel_width_bytes(dc); - DC_FP_END(); - - if (bw_params->clk_table.entries[0].memclk_mhz) { - - for (i = 0; i < MAX_NUM_DPM_LVL; i++) { - if (bw_params->clk_table.entries[i].dcfclk_mhz > dcn30_bb_max_clk.max_dcfclk_mhz) - dcn30_bb_max_clk.max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; - if (bw_params->clk_table.entries[i].dispclk_mhz > dcn30_bb_max_clk.max_dispclk_mhz) - dcn30_bb_max_clk.max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; - if (bw_params->clk_table.entries[i].dppclk_mhz > dcn30_bb_max_clk.max_dppclk_mhz) - dcn30_bb_max_clk.max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; - if (bw_params->clk_table.entries[i].phyclk_mhz > dcn30_bb_max_clk.max_phyclk_mhz) - dcn30_bb_max_clk.max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; - } - - DC_FP_START(); - dcn30_fpu_update_max_clk(&dcn30_bb_max_clk); - DC_FP_END(); - - if (dcn30_bb_max_clk.max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { - // If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array - dcfclk_sta_targets[num_dcfclk_sta_targets] = dcn30_bb_max_clk.max_dcfclk_mhz; - num_dcfclk_sta_targets++; - } else if (dcn30_bb_max_clk.max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { - // If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates - for (i = 0; i < num_dcfclk_sta_targets; i++) { - if (dcfclk_sta_targets[i] > dcn30_bb_max_clk.max_dcfclk_mhz) { - dcfclk_sta_targets[i] = dcn30_bb_max_clk.max_dcfclk_mhz; - break; - } - } - // Update size of array since we "removed" duplicates - num_dcfclk_sta_targets = i + 1; - } - - num_uclk_states = bw_params->clk_table.num_entries; - - // Calculate optimal dcfclk for each uclk - for (i = 0; i < num_uclk_states; i++) { - DC_FP_START(); - dcn30_fpu_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16, - &optimal_dcfclk_for_uclk[i], NULL); - DC_FP_END(); - if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) { - optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz; - } - } - - // Calculate optimal uclk for each dcfclk sta target - for (i = 0; i < num_dcfclk_sta_targets; i++) { - for (j = 0; j < num_uclk_states; j++) { - if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) { - optimal_uclk_for_dcfclk_sta_targets[i] = - bw_params->clk_table.entries[j].memclk_mhz * 16; - break; - } - } - } - - i = 0; - j = 0; - // create the final dcfclk and uclk table - while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) { - if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) { - dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; - dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; - } else { - if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= dcn30_bb_max_clk.max_dcfclk_mhz) { - dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; - dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; - } else { - j = num_uclk_states; - } - } - } - - while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) { - dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; - dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; - } - - while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES && - optimal_dcfclk_for_uclk[j] <= dcn30_bb_max_clk.max_dcfclk_mhz) { - dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; - dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; - } - - dcn3_0_soc.num_states = num_states; - DC_FP_START(); - dcn30_fpu_update_bw_bounding_box(dc, bw_params, &dcn30_bb_max_clk, dcfclk_mhz, dram_speed_mts); - DC_FP_END(); - } -} - -static void dcn30_get_panel_config_defaults(struct dc_panel_config *panel_config) -{ - *panel_config = panel_config_defaults; -} - -static const struct resource_funcs dcn30_res_pool_funcs = { - .destroy = dcn30_destroy_resource_pool, - .link_enc_create = dcn30_link_encoder_create, - .panel_cntl_create = dcn30_panel_cntl_create, - .validate_bandwidth = dcn30_validate_bandwidth, - .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, - .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, - .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, - .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, - .release_pipe = dcn20_release_pipe, - .add_stream_to_ctx = dcn30_add_stream_to_ctx, - .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, - .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, - .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, - .set_mcif_arb_params = dcn30_set_mcif_arb_params, - .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, - .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, - .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, - .update_bw_bounding_box = dcn30_update_bw_bounding_box, - .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, - .get_panel_config_defaults = dcn30_get_panel_config_defaults, -}; - -#define CTX ctx - -#define REG(reg_name) \ - (DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) - -static uint32_t read_pipe_fuses(struct dc_context *ctx) -{ - uint32_t value = REG_READ(CC_DC_PIPE_DIS); - /* Support for max 6 pipes */ - value = value & 0x3f; - return value; -} - -static bool dcn30_resource_construct( - uint8_t num_virtual_links, - struct dc *dc, - struct dcn30_resource_pool *pool) -{ - int i; - struct dc_context *ctx = dc->ctx; - struct irq_service_init_data init_data; - struct ddc_service_init_data ddc_init_data = {0}; - uint32_t pipe_fuses = read_pipe_fuses(ctx); - uint32_t num_pipes = 0; - - if (!(pipe_fuses == 0 || pipe_fuses == 0x3e)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: Unexpected fuse recipe for navi2x !\n"); - /* fault to single pipe */ - pipe_fuses = 0x3e; - } - - DC_FP_START(); - - ctx->dc_bios->regs = &bios_regs; - - pool->base.res_cap = &res_cap_dcn3; - - pool->base.funcs = &dcn30_res_pool_funcs; - - /************************************************* - * Resource + asic cap harcoding * - *************************************************/ - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - pool->base.pipe_count = pool->base.res_cap->num_timing_generator; - pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; - dc->caps.max_downscale_ratio = 600; - dc->caps.i2c_speed_in_khz = 100; - dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a not applied by default*/ - dc->caps.max_cursor_size = 256; - dc->caps.min_horizontal_blanking_period = 80; - dc->caps.dmdata_alloc_size = 2048; - dc->caps.mall_size_per_mem_channel = 8; - /* total size = mall per channel * num channels * 1024 * 1024 */ - dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576; - dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; - - dc->caps.max_slave_planes = 2; - dc->caps.max_slave_yuv_planes = 2; - dc->caps.max_slave_rgb_planes = 2; - dc->caps.post_blend_color_processing = true; - dc->caps.force_dp_tps4_for_cp2520 = true; - dc->caps.extended_aux_timeout_support = true; - dc->caps.dmcub_support = true; - - /* Color pipeline capabilities */ - dc->caps.color.dpp.dcn_arch = 1; - dc->caps.color.dpp.input_lut_shared = 0; - dc->caps.color.dpp.icsc = 1; - dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr - dc->caps.color.dpp.dgam_rom_caps.srgb = 1; - dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; - dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; - dc->caps.color.dpp.dgam_rom_caps.pq = 1; - dc->caps.color.dpp.dgam_rom_caps.hlg = 1; - dc->caps.color.dpp.post_csc = 1; - dc->caps.color.dpp.gamma_corr = 1; - dc->caps.color.dpp.dgam_rom_for_yuv = 0; - - dc->caps.color.dpp.hw_3d_lut = 1; - dc->caps.color.dpp.ogam_ram = 1; - // no OGAM ROM on DCN3 - dc->caps.color.dpp.ogam_rom_caps.srgb = 0; - dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; - dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.dpp.ogam_rom_caps.pq = 0; - dc->caps.color.dpp.ogam_rom_caps.hlg = 0; - dc->caps.color.dpp.ocsc = 0; - - dc->caps.color.mpc.gamut_remap = 1; - dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //3 - dc->caps.color.mpc.ogam_ram = 1; - dc->caps.color.mpc.ogam_rom_caps.srgb = 0; - dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; - dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.mpc.ogam_rom_caps.pq = 0; - dc->caps.color.mpc.ogam_rom_caps.hlg = 0; - dc->caps.color.mpc.ocsc = 1; - - dc->caps.dp_hdmi21_pcon_support = true; - dc->caps.max_v_total = (1 << 15) - 1; - - /* read VBIOS LTTPR caps */ - { - if (ctx->dc_bios->funcs->get_lttpr_caps) { - enum bp_result bp_query_result; - uint8_t is_vbios_lttpr_enable = 0; - - bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); - dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; - } - - if (ctx->dc_bios->funcs->get_lttpr_interop) { - enum bp_result bp_query_result; - uint8_t is_vbios_interop_enabled = 0; - - bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, - &is_vbios_interop_enabled); - dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; - } - } - - if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) - dc->debug = debug_defaults_drv; - - // Init the vm_helper - if (dc->vm_helper) - vm_helper_init(dc->vm_helper, 16); - - /************************************************* - * Create resources * - *************************************************/ - - /* Clock Sources for Pixel Clock*/ - pool->base.clock_sources[DCN30_CLK_SRC_PLL0] = - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL0, - &clk_src_regs[0], false); - pool->base.clock_sources[DCN30_CLK_SRC_PLL1] = - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL1, - &clk_src_regs[1], false); - pool->base.clock_sources[DCN30_CLK_SRC_PLL2] = - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL2, - &clk_src_regs[2], false); - pool->base.clock_sources[DCN30_CLK_SRC_PLL3] = - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL3, - &clk_src_regs[3], false); - pool->base.clock_sources[DCN30_CLK_SRC_PLL4] = - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL4, - &clk_src_regs[4], false); - pool->base.clock_sources[DCN30_CLK_SRC_PLL5] = - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL5, - &clk_src_regs[5], false); - - pool->base.clk_src_count = DCN30_CLK_SRC_TOTAL; - - /* todo: not reuse phy_pll registers */ - pool->base.dp_clock_source = - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_ID_DP_DTO, - &clk_src_regs[0], true); - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - - /* DCCG */ - pool->base.dccg = dccg30_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); - if (pool->base.dccg == NULL) { - dm_error("DC: failed to create dccg!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* PP Lib and SMU interfaces */ - init_soc_bounding_box(dc, pool); - - num_pipes = dcn3_0_ip.max_num_dpp; - - for (i = 0; i < dcn3_0_ip.max_num_dpp; i++) - if (pipe_fuses & 1 << i) - num_pipes--; - - dcn3_0_ip.max_num_dpp = num_pipes; - dcn3_0_ip.max_num_otg = num_pipes; - - dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30); - - /* IRQ */ - init_data.ctx = dc->ctx; - pool->base.irqs = dal_irq_service_dcn30_create(&init_data); - if (!pool->base.irqs) - goto create_fail; - - /* HUBBUB */ - pool->base.hubbub = dcn30_hubbub_create(ctx); - if (pool->base.hubbub == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create hubbub!\n"); - goto create_fail; - } - - /* HUBPs, DPPs, OPPs and TGs */ - for (i = 0; i < pool->base.pipe_count; i++) { - pool->base.hubps[i] = dcn30_hubp_create(ctx, i); - if (pool->base.hubps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create hubps!\n"); - goto create_fail; - } - - pool->base.dpps[i] = dcn30_dpp_create(ctx, i); - if (pool->base.dpps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create dpps!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - pool->base.opps[i] = dcn30_opp_create(ctx, i); - if (pool->base.opps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create output pixel processor!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - pool->base.timing_generators[i] = dcn30_timing_generator_create( - ctx, i); - if (pool->base.timing_generators[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto create_fail; - } - } - pool->base.timing_generator_count = i; - /* PSR */ - pool->base.psr = dmub_psr_create(ctx); - - if (pool->base.psr == NULL) { - dm_error("DC: failed to create PSR obj!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* ABM */ - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - pool->base.multiple_abms[i] = dmub_abm_create(ctx, - &abm_regs[i], - &abm_shift, - &abm_mask); - if (pool->base.multiple_abms[i] == NULL) { - dm_error("DC: failed to create abm for pipe %d!\n", i); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - /* MPC and DSC */ - pool->base.mpc = dcn30_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut); - if (pool->base.mpc == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mpc!\n"); - goto create_fail; - } - - for (i = 0; i < pool->base.res_cap->num_dsc; i++) { - pool->base.dscs[i] = dcn30_dsc_create(ctx, i); - if (pool->base.dscs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create display stream compressor %d!\n", i); - goto create_fail; - } - } - - /* DWB and MMHUBBUB */ - if (!dcn30_dwbc_create(ctx, &pool->base)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create dwbc!\n"); - goto create_fail; - } - - if (!dcn30_mmhubbub_create(ctx, &pool->base)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mcif_wb!\n"); - goto create_fail; - } - - /* AUX and I2C */ - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - pool->base.engines[i] = dcn30_aux_engine_create(ctx, i); - if (pool->base.engines[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create aux engine!!\n"); - goto create_fail; - } - pool->base.hw_i2cs[i] = dcn30_i2c_hw_create(ctx, i); - if (pool->base.hw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create hw i2c!!\n"); - goto create_fail; - } - pool->base.sw_i2cs[i] = NULL; - } - - /* Audio, Stream Encoders including DIG and virtual, MPC 3D LUTs */ - if (!resource_construct(num_virtual_links, dc, &pool->base, - &res_create_funcs)) - goto create_fail; - - /* HW Sequencer and Plane caps */ - dcn30_hw_sequencer_construct(dc); - - dc->caps.max_planes = pool->base.pipe_count; - - for (i = 0; i < dc->caps.max_planes; ++i) - dc->caps.planes[i] = plane_cap; - - dc->cap_funcs = cap_funcs; - - if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { - ddc_init_data.ctx = dc->ctx; - ddc_init_data.link = NULL; - ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; - ddc_init_data.id.enum_id = 0; - ddc_init_data.id.type = OBJECT_TYPE_GENERIC; - pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data); - } else { - pool->base.oem_device = NULL; - } - - DC_FP_END(); - - return true; - -create_fail: - - DC_FP_END(); - dcn30_resource_destruct(pool); - - return false; -} - -struct resource_pool *dcn30_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc) -{ - struct dcn30_resource_pool *pool = - kzalloc(sizeof(struct dcn30_resource_pool), GFP_KERNEL); - - if (!pool) - return NULL; - - if (dcn30_resource_construct(init_data->num_virtual_links, dc, pool)) - return &pool->base; - - BREAK_TO_DEBUGGER(); - kfree(pool); - return NULL; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h deleted file mode 100644 index 8e6b8b7368..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef _DCN30_RESOURCE_H_ -#define _DCN30_RESOURCE_H_ - -#include "core_types.h" - -#define TO_DCN30_RES_POOL(pool)\ - container_of(pool, struct dcn30_resource_pool, base) - -struct dc; -struct resource_pool; -struct _vcs_dpi_display_pipe_params_st; - -extern struct _vcs_dpi_ip_params_st dcn3_0_ip; -extern struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc; - -struct dcn30_resource_pool { - struct resource_pool base; -}; -struct resource_pool *dcn30_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc); - -void dcn30_set_mcif_arb_params( - struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int pipe_cnt); - -unsigned int dcn30_calc_max_scaled_time( - unsigned int time_per_pixel, - enum mmhubbub_wbif_mode mode, - unsigned int urgent_watermark); - -bool dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate); -bool dcn30_internal_validate_bw( - struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int *pipe_cnt_out, - int *vlevel_out, - bool fast_validate, - bool allow_self_refresh_only); -void dcn30_calculate_wm_and_dlg( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int pipe_cnt, - int vlevel); -void dcn30_update_soc_for_wm_a(struct dc *dc, struct dc_state *context); -void dcn30_populate_dml_writeback_from_context( - struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes); - -int dcn30_populate_dml_pipes_from_context( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - bool fast_validate); - -bool dcn30_acquire_post_bldn_3dlut( - struct resource_context *res_ctx, - const struct resource_pool *pool, - int mpcc_id, - struct dc_3dlut **lut, - struct dc_transfer_func **shaper); - -bool dcn30_release_post_bldn_3dlut( - struct resource_context *res_ctx, - const struct resource_pool *pool, - struct dc_3dlut **lut, - struct dc_transfer_func **shaper); - -enum dc_status dcn30_add_stream_to_ctx( - struct dc *dc, - struct dc_state *new_ctx, - struct dc_stream_state *dc_stream); - -void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); - -bool dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context); -void dcn30_setup_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context); -int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, int pipe_cnt, int vlevel); - -#endif /* _DCN30_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile index 30fbc5e06d..d241f665e4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile @@ -10,9 +10,8 @@ # # Makefile for dcn30. -DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \ - dcn301_dio_link_encoder.o dcn301_panel_cntl.o dcn301_hubbub.o \ - dcn301_optc.o +DCN301 = dcn301_dccg.o \ + dcn301_dio_link_encoder.o dcn301_panel_cntl.o dcn301_hubbub.o AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c deleted file mode 100644 index 6477009ce0..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Copyright 2016-2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dce110/dce110_hwseq.h" -#include "dcn10/dcn10_hwseq.h" -#include "dcn20/dcn20_hwseq.h" -#include "dcn21/dcn21_hwseq.h" -#include "dcn30/dcn30_hwseq.h" -#include "dcn301/dcn301_hwseq.h" - -#include "dcn301_init.h" - -static const struct hw_sequencer_funcs dcn301_funcs = { - .program_gamut_remap = dcn30_program_gamut_remap, - .init_hw = dcn10_init_hw, - .power_down_on_boot = dcn10_power_down_on_boot, - .apply_ctx_to_hw = dce110_apply_ctx_to_hw, - .apply_ctx_for_surface = NULL, - .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, - .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, - .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, - .update_plane_addr = dcn20_update_plane_addr, - .update_dchub = dcn10_update_dchub, - .update_pending_status = dcn10_update_pending_status, - .program_output_csc = dcn20_program_output_csc, - .enable_accelerated_mode = dce110_enable_accelerated_mode, - .enable_timing_synchronization = dcn10_enable_timing_synchronization, - .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, - .update_info_frame = dcn30_update_info_frame, - .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, - .enable_stream = dcn20_enable_stream, - .disable_stream = dce110_disable_stream, - .unblank_stream = dcn20_unblank_stream, -#ifdef FREESYNC_POWER_OPTIMIZE - .are_streams_coarse_grain_aligned = dcn20_are_streams_coarse_grain_aligned, -#endif - .blank_stream = dce110_blank_stream, - .enable_audio_stream = dce110_enable_audio_stream, - .disable_audio_stream = dce110_disable_audio_stream, - .disable_plane = dcn20_disable_plane, - .pipe_control_lock = dcn20_pipe_control_lock, - .interdependent_update_lock = dcn10_lock_all_pipes, - .cursor_lock = dcn10_cursor_lock, - .prepare_bandwidth = dcn20_prepare_bandwidth, - .optimize_bandwidth = dcn20_optimize_bandwidth, - .update_bandwidth = dcn20_update_bandwidth, - .set_drr = dcn10_set_drr, - .get_position = dcn10_get_position, - .set_static_screen_control = dcn10_set_static_screen_control, - .setup_stereo = dcn10_setup_stereo, - .set_avmute = dcn30_set_avmute, - .log_hw_state = dcn10_log_hw_state, - .get_hw_state = dcn10_get_hw_state, - .clear_status_bits = dcn10_clear_status_bits, - .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, - .edp_backlight_control = dce110_edp_backlight_control, - .edp_power_control = dce110_edp_power_control, - .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, - .set_cursor_position = dcn10_set_cursor_position, - .set_cursor_attribute = dcn10_set_cursor_attribute, - .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, - .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, - .set_clock = dcn10_set_clock, - .get_clock = dcn10_get_clock, - .program_triplebuffer = dcn20_program_triple_buffer, - .enable_writeback = dcn30_enable_writeback, - .disable_writeback = dcn30_disable_writeback, - .update_writeback = dcn30_update_writeback, - .mmhubbub_warmup = dcn30_mmhubbub_warmup, - .dmdata_status_done = dcn20_dmdata_status_done, - .program_dmdata_engine = dcn30_program_dmdata_engine, - .set_dmdata_attributes = dcn20_set_dmdata_attributes, - .init_sys_ctx = dcn20_init_sys_ctx, - .init_vm_ctx = dcn20_init_vm_ctx, - .set_flip_control_gsl = dcn20_set_flip_control_gsl, - .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, - .calc_vupdate_position = dcn10_calc_vupdate_position, - .set_backlight_level = dcn21_set_backlight_level, - .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, - .set_pipe = dcn21_set_pipe, - .enable_lvds_link_output = dce110_enable_lvds_link_output, - .enable_tmds_link_output = dce110_enable_tmds_link_output, - .enable_dp_link_output = dce110_enable_dp_link_output, - .disable_link_output = dce110_disable_link_output, - .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, - .get_dcc_en_bits = dcn10_get_dcc_en_bits, - .optimize_pwr_state = dcn21_optimize_pwr_state, - .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, - .update_visual_confirm_color = dcn10_update_visual_confirm_color, -}; - -static const struct hwseq_private_funcs dcn301_private_funcs = { - .init_pipes = dcn10_init_pipes, - .update_plane_addr = dcn20_update_plane_addr, - .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, - .update_mpcc = dcn20_update_mpcc, - .set_input_transfer_func = dcn30_set_input_transfer_func, - .set_output_transfer_func = dcn30_set_output_transfer_func, - .power_down = dce110_power_down, - .enable_display_power_gating = dcn10_dummy_display_power_gating, - .blank_pixel_data = dcn20_blank_pixel_data, - .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, - .enable_stream_timing = dcn20_enable_stream_timing, - .edp_backlight_control = dce110_edp_backlight_control, - .disable_stream_gating = dcn20_disable_stream_gating, - .enable_stream_gating = dcn20_enable_stream_gating, - .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, - .did_underflow_occur = dcn10_did_underflow_occur, - .init_blank = dcn20_init_blank, - .disable_vga = dcn20_disable_vga, - .bios_golden_init = dcn10_bios_golden_init, - .plane_atomic_disable = dcn20_plane_atomic_disable, - .plane_atomic_power_down = dcn10_plane_atomic_power_down, - .enable_power_gating_plane = dcn20_enable_power_gating_plane, - .dpp_pg_control = dcn20_dpp_pg_control, - .hubp_pg_control = dcn20_hubp_pg_control, - .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, - .update_odm = dcn20_update_odm, - .dsc_pg_control = dcn20_dsc_pg_control, - .set_hdr_multiplier = dcn10_set_hdr_multiplier, - .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, - .wait_for_blank_complete = dcn20_wait_for_blank_complete, - .dccg_init = dcn20_dccg_init, - .set_blend_lut = dcn30_set_blend_lut, - .set_shaper_3dlut = dcn20_set_shaper_3dlut, -}; - -void dcn301_hw_sequencer_construct(struct dc *dc) -{ - dc->hwss = dcn301_funcs; - dc->hwseq->funcs = dcn301_private_funcs; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.h deleted file mode 100644 index 0bca48ccbf..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_DCN30_INIT_H__ -#define __DC_DCN30_INIT_H__ - -struct dc; - -void dcn301_hw_sequencer_construct(struct dc *dc); - -#endif /* __DC_DCN30_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c deleted file mode 100644 index b3cfcb8879..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "reg_helper.h" -#include "dcn301_optc.h" -#include "dc.h" -#include "dcn_calc_math.h" -#include "dc_dmub_srv.h" - -#include "dml/dcn30/dcn30_fpu.h" -#include "dc_trace.h" - -#define REG(reg)\ - optc1->tg_regs->reg - -#define CTX \ - optc1->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - optc1->tg_shift->field_name, optc1->tg_mask->field_name - - -/** - * optc301_set_drr() - Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*. - * - * @optc: timing_generator instance. - * @params: parameters used for Dynamic Refresh Rate. - */ -void optc301_set_drr( - struct timing_generator *optc, - const struct drr_params *params) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - if (params != NULL && - params->vertical_total_max > 0 && - params->vertical_total_min > 0) { - - if (params->vertical_total_mid != 0) { - - REG_SET(OTG_V_TOTAL_MID, 0, - OTG_V_TOTAL_MID, params->vertical_total_mid - 1); - - REG_UPDATE_2(OTG_V_TOTAL_CONTROL, - OTG_VTOTAL_MID_REPLACING_MAX_EN, 1, - OTG_VTOTAL_MID_FRAME_NUM, - (uint8_t)params->vertical_total_mid_frame_num); - - } - - optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1); - - REG_UPDATE_5(OTG_V_TOTAL_CONTROL, - OTG_V_TOTAL_MIN_SEL, 1, - OTG_V_TOTAL_MAX_SEL, 1, - OTG_FORCE_LOCK_ON_EVENT, 0, - OTG_SET_V_TOTAL_MIN_MASK_EN, 0, - OTG_SET_V_TOTAL_MIN_MASK, 0); - // Setup manual flow control for EOF via TRIG_A - optc->funcs->setup_manual_trigger(optc); - - } else { - REG_UPDATE_4(OTG_V_TOTAL_CONTROL, - OTG_SET_V_TOTAL_MIN_MASK, 0, - OTG_V_TOTAL_MIN_SEL, 0, - OTG_V_TOTAL_MAX_SEL, 0, - OTG_FORCE_LOCK_ON_EVENT, 0); - - optc->funcs->set_vtotal_min_max(optc, 0, 0); - } -} - - -void optc301_setup_manual_trigger(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET_8(OTG_TRIGA_CNTL, 0, - OTG_TRIGA_SOURCE_SELECT, 21, - OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst, - OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1, - OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0, - OTG_TRIGA_POLARITY_SELECT, 0, - OTG_TRIGA_FREQUENCY_SELECT, 0, - OTG_TRIGA_DELAY, 0, - OTG_TRIGA_CLEAR, 1); -} - -static struct timing_generator_funcs dcn30_tg_funcs = { - .validate_timing = optc1_validate_timing, - .program_timing = optc1_program_timing, - .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, - .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, - .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, - .program_global_sync = optc1_program_global_sync, - .enable_crtc = optc2_enable_crtc, - .disable_crtc = optc1_disable_crtc, - /* used by enable_timing_synchronization. Not need for FPGA */ - .is_counter_moving = optc1_is_counter_moving, - .get_position = optc1_get_position, - .get_frame_count = optc1_get_vblank_counter, - .get_scanoutpos = optc1_get_crtc_scanoutpos, - .get_otg_active_size = optc1_get_otg_active_size, - .set_early_control = optc1_set_early_control, - /* used by enable_timing_synchronization. Not need for FPGA */ - .wait_for_state = optc1_wait_for_state, - .set_blank_color = optc3_program_blank_color, - .did_triggered_reset_occur = optc1_did_triggered_reset_occur, - .triplebuffer_lock = optc3_triplebuffer_lock, - .triplebuffer_unlock = optc2_triplebuffer_unlock, - .enable_reset_trigger = optc1_enable_reset_trigger, - .enable_crtc_reset = optc1_enable_crtc_reset, - .disable_reset_trigger = optc1_disable_reset_trigger, - .lock = optc3_lock, - .unlock = optc1_unlock, - .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, - .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, - .enable_optc_clock = optc1_enable_optc_clock, - .set_drr = optc301_set_drr, - .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, - .set_vtotal_min_max = optc3_set_vtotal_min_max, - .set_static_screen_control = optc1_set_static_screen_control, - .program_stereo = optc1_program_stereo, - .is_stereo_left_eye = optc1_is_stereo_left_eye, - .tg_init = optc3_tg_init, - .is_tg_enabled = optc1_is_tg_enabled, - .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, - .clear_optc_underflow = optc1_clear_optc_underflow, - .setup_global_swap_lock = NULL, - .get_crc = optc1_get_crc, - .configure_crc = optc2_configure_crc, - .set_dsc_config = optc3_set_dsc_config, - .get_dsc_status = optc2_get_dsc_status, - .set_dwb_source = NULL, - .set_odm_bypass = optc3_set_odm_bypass, - .set_odm_combine = optc3_set_odm_combine, - .get_optc_source = optc2_get_optc_source, - .set_out_mux = optc3_set_out_mux, - .set_drr_trigger_window = optc3_set_drr_trigger_window, - .set_vtotal_change_limit = optc3_set_vtotal_change_limit, - .set_gsl = optc2_set_gsl, - .set_gsl_source_select = optc2_set_gsl_source_select, - .set_vtg_params = optc1_set_vtg_params, - .program_manual_trigger = optc2_program_manual_trigger, - .setup_manual_trigger = optc301_setup_manual_trigger, - .get_hw_timing = optc1_get_hw_timing, - .wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear, -}; - -void dcn301_timing_generator_init(struct optc *optc1) -{ - optc1->base.funcs = &dcn30_tg_funcs; - - optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; - optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; - - optc1->min_h_blank = 32; - optc1->min_v_blank = 3; - optc1->min_v_blank_interlace = 5; - optc1->min_h_sync_width = 4; - optc1->min_v_sync_width = 1; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h deleted file mode 100644 index b49585682a..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_OPTC_DCN301_H__ -#define __DC_OPTC_DCN301_H__ - -#include "dcn20/dcn20_optc.h" -#include "dcn30/dcn30_optc.h" - -void dcn301_timing_generator_init(struct optc *optc1); -void optc301_setup_manual_trigger(struct timing_generator *optc); -void optc301_set_drr(struct timing_generator *optc, const struct drr_params *params); - -#endif /* __DC_OPTC_DCN301_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c deleted file mode 100644 index ce04caf35d..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ /dev/null @@ -1,1728 +0,0 @@ -/* - * Copyright 2019-2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - - -#include "dm_services.h" -#include "dc.h" - -#include "dcn301_init.h" - -#include "resource.h" -#include "include/irq_service_interface.h" -#include "dcn30/dcn30_resource.h" -#include "dcn301_resource.h" - -#include "dcn20/dcn20_resource.h" - -#include "dcn10/dcn10_ipp.h" -#include "dcn301/dcn301_hubbub.h" -#include "dcn30/dcn30_mpc.h" -#include "dcn30/dcn30_hubp.h" -#include "irq/dcn30/irq_service_dcn30.h" -#include "dcn30/dcn30_dpp.h" -#include "dcn301/dcn301_optc.h" -#include "dcn20/dcn20_hwseq.h" -#include "dcn30/dcn30_hwseq.h" -#include "dce110/dce110_hwseq.h" -#include "dcn30/dcn30_opp.h" -#include "dcn20/dcn20_dsc.h" -#include "dcn30/dcn30_vpg.h" -#include "dcn30/dcn30_afmt.h" -#include "dce/dce_clock_source.h" -#include "dce/dce_audio.h" -#include "dce/dce_hwseq.h" -#include "clk_mgr.h" -#include "virtual/virtual_stream_encoder.h" -#include "dce110/dce110_resource.h" -#include "dml/display_mode_vba.h" -#include "dcn301/dcn301_dccg.h" -#include "dcn10/dcn10_resource.h" -#include "dcn30/dcn30_dio_stream_encoder.h" -#include "dcn301/dcn301_dio_link_encoder.h" -#include "dcn301_panel_cntl.h" - -#include "vangogh_ip_offset.h" - -#include "dcn30/dcn30_dwb.h" -#include "dcn30/dcn30_mmhubbub.h" - -#include "dcn/dcn_3_0_1_offset.h" -#include "dcn/dcn_3_0_1_sh_mask.h" - -#include "nbio/nbio_7_2_0_offset.h" - -#include "dpcs/dpcs_3_0_0_offset.h" -#include "dpcs/dpcs_3_0_0_sh_mask.h" - -#include "reg_helper.h" -#include "dce/dmub_abm.h" -#include "dce/dce_aux.h" -#include "dce/dce_i2c.h" - -#include "dml/dcn30/dcn30_fpu.h" - -#include "dml/dcn30/display_mode_vba_30.h" -#include "dml/dcn301/dcn301_fpu.h" -#include "vm_helper.h" -#include "dcn20/dcn20_vmid.h" -#include "amdgpu_socbb.h" - -#define TO_DCN301_RES_POOL(pool)\ - container_of(pool, struct dcn301_resource_pool, base) - -#define DC_LOGGER \ - dc->ctx->logger -#define DC_LOGGER_INIT(logger) - -enum dcn301_clk_src_array_id { - DCN301_CLK_SRC_PLL0, - DCN301_CLK_SRC_PLL1, - DCN301_CLK_SRC_PLL2, - DCN301_CLK_SRC_PLL3, - DCN301_CLK_SRC_TOTAL -}; - -/* begin ********************* - * macros to expend register list macro defined in HW object header file - */ - -/* DCN */ -#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg - -#define BASE(seg) BASE_INNER(seg) - -#define SR(reg_name)\ - .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -#define SRI(reg_name, block, id)\ - .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define SRI2(reg_name, block, id)\ - .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -#define SRIR(var_name, reg_name, block, id)\ - .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define SRII(reg_name, block, id)\ - .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define SRII2(reg_name_pre, reg_name_post, id)\ - .reg_name_pre ## _ ## reg_name_post[id] = BASE(mm ## reg_name_pre \ - ## id ## _ ## reg_name_post ## _BASE_IDX) + \ - mm ## reg_name_pre ## id ## _ ## reg_name_post - -#define SRII_MPC_RMU(reg_name, block, id)\ - .RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define SRII_DWB(reg_name, temp_name, block, id)\ - .reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## temp_name - -#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ - .field_name = reg_name ## __ ## field_name ## post_fix - -#define DCCG_SRII(reg_name, block, id)\ - .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define VUPDATE_SRII(reg_name, block, id)\ - .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ - mm ## reg_name ## _ ## block ## id - -/* NBIO */ -#define NBIO_BASE_INNER(seg) \ - NBIO_BASE__INST0_SEG ## seg - -#define NBIO_BASE(seg) \ - NBIO_BASE_INNER(seg) - -#define NBIO_SR(reg_name)\ - .reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \ - regBIF_BX0_ ## reg_name - -/* MMHUB */ -#define MMHUB_BASE_INNER(seg) \ - MMHUB_BASE__INST0_SEG ## seg - -#define MMHUB_BASE(seg) \ - MMHUB_BASE_INNER(seg) - -#define MMHUB_SR(reg_name)\ - .reg_name = MMHUB_BASE(regMM ## reg_name ## _BASE_IDX) + \ - regMM ## reg_name - -/* CLOCK */ -#define CLK_BASE_INNER(seg) \ - CLK_BASE__INST0_SEG ## seg - -#define CLK_BASE(seg) \ - CLK_BASE_INNER(seg) - -#define CLK_SRI(reg_name, block, inst)\ - .reg_name = CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## _ ## inst ## _ ## reg_name - -static const struct bios_registers bios_regs = { - NBIO_SR(BIOS_SCRATCH_3), - NBIO_SR(BIOS_SCRATCH_6) -}; - -#define clk_src_regs(index, pllid)\ -[index] = {\ - CS_COMMON_REG_LIST_DCN3_01(index, pllid),\ -} - -static const struct dce110_clk_src_regs clk_src_regs[] = { - clk_src_regs(0, A), - clk_src_regs(1, B), - clk_src_regs(2, C), - clk_src_regs(3, D) -}; - -static const struct dce110_clk_src_shift cs_shift = { - CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) -}; - -static const struct dce110_clk_src_mask cs_mask = { - CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) -}; - -#define abm_regs(id)\ -[id] = {\ - ABM_DCN301_REG_LIST(id)\ -} - -static const struct dce_abm_registers abm_regs[] = { - abm_regs(0), - abm_regs(1), - abm_regs(2), - abm_regs(3), -}; - -static const struct dce_abm_shift abm_shift = { - ABM_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dce_abm_mask abm_mask = { - ABM_MASK_SH_LIST_DCN30(_MASK) -}; - -#define audio_regs(id)\ -[id] = {\ - AUD_COMMON_REG_LIST(id)\ -} - -static const struct dce_audio_registers audio_regs[] = { - audio_regs(0), - audio_regs(1), - audio_regs(2), - audio_regs(3), - audio_regs(4), - audio_regs(5), - audio_regs(6) -}; - -#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ - AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) - -static const struct dce_audio_shift audio_shift = { - DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_audio_mask audio_mask = { - DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) -}; - -#define vpg_regs(id)\ -[id] = {\ - VPG_DCN3_REG_LIST(id)\ -} - -static const struct dcn30_vpg_registers vpg_regs[] = { - vpg_regs(0), - vpg_regs(1), - vpg_regs(2), - vpg_regs(3), -}; - -static const struct dcn30_vpg_shift vpg_shift = { - DCN3_VPG_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn30_vpg_mask vpg_mask = { - DCN3_VPG_MASK_SH_LIST(_MASK) -}; - -#define afmt_regs(id)\ -[id] = {\ - AFMT_DCN3_REG_LIST(id)\ -} - -static const struct dcn30_afmt_registers afmt_regs[] = { - afmt_regs(0), - afmt_regs(1), - afmt_regs(2), - afmt_regs(3), -}; - -static const struct dcn30_afmt_shift afmt_shift = { - DCN3_AFMT_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn30_afmt_mask afmt_mask = { - DCN3_AFMT_MASK_SH_LIST(_MASK) -}; - -#define stream_enc_regs(id)\ -[id] = {\ - SE_DCN3_REG_LIST(id)\ -} - -static const struct dcn10_stream_enc_registers stream_enc_regs[] = { - stream_enc_regs(0), - stream_enc_regs(1), - stream_enc_regs(2), - stream_enc_regs(3), -}; - -static const struct dcn10_stream_encoder_shift se_shift = { - SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn10_stream_encoder_mask se_mask = { - SE_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - - -#define aux_regs(id)\ -[id] = {\ - DCN2_AUX_REG_LIST(id)\ -} - -static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { - aux_regs(0), - aux_regs(1), - aux_regs(2), - aux_regs(3), -}; - -#define hpd_regs(id)\ -[id] = {\ - HPD_REG_LIST(id)\ -} - -static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { - hpd_regs(0), - hpd_regs(1), - hpd_regs(2), - hpd_regs(3), -}; - - -#define link_regs(id, phyid)\ -[id] = {\ - LE_DCN301_REG_LIST(id), \ - UNIPHY_DCN2_REG_LIST(phyid), \ - DPCS_DCN2_REG_LIST(id), \ - SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ -} - -static const struct dce110_aux_registers_shift aux_shift = { - DCN_AUX_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce110_aux_registers_mask aux_mask = { - DCN_AUX_MASK_SH_LIST(_MASK) -}; - -static const struct dcn10_link_enc_registers link_enc_regs[] = { - link_regs(0, A), - link_regs(1, B), - link_regs(2, C), - link_regs(3, D), -}; - -static const struct dcn10_link_enc_shift le_shift = { - LINK_ENCODER_MASK_SH_LIST_DCN301(__SHIFT),\ - DPCS_DCN2_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn10_link_enc_mask le_mask = { - LINK_ENCODER_MASK_SH_LIST_DCN301(_MASK),\ - DPCS_DCN2_MASK_SH_LIST(_MASK) -}; - -#define panel_cntl_regs(id)\ -[id] = {\ - DCN301_PANEL_CNTL_REG_LIST(id),\ -} - -static const struct dce_panel_cntl_registers panel_cntl_regs[] = { - panel_cntl_regs(0), - panel_cntl_regs(1), -}; - -static const struct dcn301_panel_cntl_shift panel_cntl_shift = { - DCN301_PANEL_CNTL_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn301_panel_cntl_mask panel_cntl_mask = { - DCN301_PANEL_CNTL_MASK_SH_LIST(_MASK) -}; - -#define dpp_regs(id)\ -[id] = {\ - DPP_REG_LIST_DCN30(id),\ -} - -static const struct dcn3_dpp_registers dpp_regs[] = { - dpp_regs(0), - dpp_regs(1), - dpp_regs(2), - dpp_regs(3), -}; - -static const struct dcn3_dpp_shift tf_shift = { - DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) -}; - -static const struct dcn3_dpp_mask tf_mask = { - DPP_REG_LIST_SH_MASK_DCN30(_MASK) -}; - -#define opp_regs(id)\ -[id] = {\ - OPP_REG_LIST_DCN30(id),\ -} - -static const struct dcn20_opp_registers opp_regs[] = { - opp_regs(0), - opp_regs(1), - opp_regs(2), - opp_regs(3), -}; - -static const struct dcn20_opp_shift opp_shift = { - OPP_MASK_SH_LIST_DCN20(__SHIFT) -}; - -static const struct dcn20_opp_mask opp_mask = { - OPP_MASK_SH_LIST_DCN20(_MASK) -}; - -#define aux_engine_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST0(id), \ - .AUXN_IMPCAL = 0, \ - .AUXP_IMPCAL = 0, \ - .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ -} - -static const struct dce110_aux_registers aux_engine_regs[] = { - aux_engine_regs(0), - aux_engine_regs(1), - aux_engine_regs(2), - aux_engine_regs(3), -}; - -#define dwbc_regs_dcn3(id)\ -[id] = {\ - DWBC_COMMON_REG_LIST_DCN30(id),\ -} - -static const struct dcn30_dwbc_registers dwbc30_regs[] = { - dwbc_regs_dcn3(0), -}; - -static const struct dcn30_dwbc_shift dwbc30_shift = { - DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_dwbc_mask dwbc30_mask = { - DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -#define mcif_wb_regs_dcn3(id)\ -[id] = {\ - MCIF_WB_COMMON_REG_LIST_DCN30(id),\ -} - -static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { - mcif_wb_regs_dcn3(0) -}; - -static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -#define dsc_regsDCN20(id)\ -[id] = {\ - DSC_REG_LIST_DCN20(id)\ -} - -static const struct dcn20_dsc_registers dsc_regs[] = { - dsc_regsDCN20(0), - dsc_regsDCN20(1), - dsc_regsDCN20(2), -}; - -static const struct dcn20_dsc_shift dsc_shift = { - DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) -}; - -static const struct dcn20_dsc_mask dsc_mask = { - DSC_REG_LIST_SH_MASK_DCN20(_MASK) -}; - -static const struct dcn30_mpc_registers mpc_regs = { - MPC_REG_LIST_DCN3_0(0), - MPC_REG_LIST_DCN3_0(1), - MPC_REG_LIST_DCN3_0(2), - MPC_REG_LIST_DCN3_0(3), - MPC_OUT_MUX_REG_LIST_DCN3_0(0), - MPC_OUT_MUX_REG_LIST_DCN3_0(1), - MPC_OUT_MUX_REG_LIST_DCN3_0(2), - MPC_OUT_MUX_REG_LIST_DCN3_0(3), - MPC_RMU_GLOBAL_REG_LIST_DCN3AG, - MPC_RMU_REG_LIST_DCN3AG(0), - MPC_RMU_REG_LIST_DCN3AG(1), - MPC_DWB_MUX_REG_LIST_DCN3_0(0), -}; - -static const struct dcn30_mpc_shift mpc_shift = { - MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_mpc_mask mpc_mask = { - MPC_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -#define optc_regs(id)\ -[id] = {OPTC_COMMON_REG_LIST_DCN3_0(id)} - - -static const struct dcn_optc_registers optc_regs[] = { - optc_regs(0), - optc_regs(1), - optc_regs(2), - optc_regs(3), -}; - -static const struct dcn_optc_shift optc_shift = { - OPTC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn_optc_mask optc_mask = { - OPTC_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -#define hubp_regs(id)\ -[id] = {\ - HUBP_REG_LIST_DCN30(id)\ -} - -static const struct dcn_hubp2_registers hubp_regs[] = { - hubp_regs(0), - hubp_regs(1), - hubp_regs(2), - hubp_regs(3), -}; - -static const struct dcn_hubp2_shift hubp_shift = { - HUBP_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn_hubp2_mask hubp_mask = { - HUBP_MASK_SH_LIST_DCN30(_MASK) -}; - -static const struct dcn_hubbub_registers hubbub_reg = { - HUBBUB_REG_LIST_DCN301(0) -}; - -static const struct dcn_hubbub_shift hubbub_shift = { - HUBBUB_MASK_SH_LIST_DCN301(__SHIFT) -}; - -static const struct dcn_hubbub_mask hubbub_mask = { - HUBBUB_MASK_SH_LIST_DCN301(_MASK) -}; - -static const struct dccg_registers dccg_regs = { - DCCG_REG_LIST_DCN301() -}; - -static const struct dccg_shift dccg_shift = { - DCCG_MASK_SH_LIST_DCN301(__SHIFT) -}; - -static const struct dccg_mask dccg_mask = { - DCCG_MASK_SH_LIST_DCN301(_MASK) -}; - -static const struct dce_hwseq_registers hwseq_reg = { - HWSEQ_DCN301_REG_LIST() -}; - -static const struct dce_hwseq_shift hwseq_shift = { - HWSEQ_DCN301_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_hwseq_mask hwseq_mask = { - HWSEQ_DCN301_MASK_SH_LIST(_MASK) -}; -#define vmid_regs(id)\ -[id] = {\ - DCN20_VMID_REG_LIST(id)\ -} - -static const struct dcn_vmid_registers vmid_regs[] = { - vmid_regs(0), - vmid_regs(1), - vmid_regs(2), - vmid_regs(3), - vmid_regs(4), - vmid_regs(5), - vmid_regs(6), - vmid_regs(7), - vmid_regs(8), - vmid_regs(9), - vmid_regs(10), - vmid_regs(11), - vmid_regs(12), - vmid_regs(13), - vmid_regs(14), - vmid_regs(15) -}; - -static const struct dcn20_vmid_shift vmid_shifts = { - DCN20_VMID_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn20_vmid_mask vmid_masks = { - DCN20_VMID_MASK_SH_LIST(_MASK) -}; - -static struct resource_caps res_cap_dcn301 = { - .num_timing_generator = 4, - .num_opp = 4, - .num_video_plane = 4, - .num_audio = 4, - .num_stream_encoder = 4, - .num_pll = 4, - .num_dwb = 1, - .num_ddc = 4, - .num_vmid = 16, - .num_mpc_3dlut = 2, - .num_dsc = 3, -}; - -static const struct dc_plane_cap plane_cap = { - .type = DC_PLANE_TYPE_DCN_UNIVERSAL, - .per_pixel_alpha = true, - - .pixel_format_support = { - .argb8888 = true, - .nv12 = true, - .fp16 = true, - .p010 = true, - .ayuv = false, - }, - - .max_upscale_factor = { - .argb8888 = 16000, - .nv12 = 16000, - .fp16 = 16000 - }, - - /* 6:1 downscaling ratio: 1000/6 = 166.666 */ - .max_downscale_factor = { - .argb8888 = 167, - .nv12 = 167, - .fp16 = 167 - }, - 64, - 64 -}; - -static const struct dc_debug_options debug_defaults_drv = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = false, - .clock_trace = true, - .disable_dpp_power_gate = false, - .disable_hubp_power_gate = false, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, - .force_single_disp_pipe_split = false, - .disable_dcc = DCC_ENABLE, - .vsr_support = true, - .performance_trace = false, - .max_downscale_src_width = 7680,/*upto 8K*/ - .scl_reset_length10 = true, - .sanity_checks = false, - .underflow_assert_delay_us = 0xFFFFFFFF, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .use_max_lb = false, - .exit_idle_opt_for_cursor_updates = true, - .using_dml2 = false, -}; - -static void dcn301_dpp_destroy(struct dpp **dpp) -{ - kfree(TO_DCN20_DPP(*dpp)); - *dpp = NULL; -} - -static struct dpp *dcn301_dpp_create(struct dc_context *ctx, uint32_t inst) -{ - struct dcn3_dpp *dpp = - kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); - - if (!dpp) - return NULL; - - if (dpp3_construct(dpp, ctx, inst, - &dpp_regs[inst], &tf_shift, &tf_mask)) - return &dpp->base; - - BREAK_TO_DEBUGGER(); - kfree(dpp); - return NULL; -} -static struct output_pixel_processor *dcn301_opp_create(struct dc_context *ctx, - uint32_t inst) -{ - struct dcn20_opp *opp = - kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); - - if (!opp) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dcn20_opp_construct(opp, ctx, inst, - &opp_regs[inst], &opp_shift, &opp_mask); - return &opp->base; -} - -static struct dce_aux *dcn301_aux_engine_create(struct dc_context *ctx, uint32_t inst) -{ - struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); - - if (!aux_engine) - return NULL; - - dce110_aux_engine_construct(aux_engine, ctx, inst, - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst], - &aux_mask, - &aux_shift, - ctx->dc->caps.extended_aux_timeout_support); - - return &aux_engine->base; -} -#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } - -static const struct dce_i2c_registers i2c_hw_regs[] = { - i2c_inst_regs(1), - i2c_inst_regs(2), - i2c_inst_regs(3), - i2c_inst_regs(4), -}; - -static const struct dce_i2c_shift i2c_shifts = { - I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT) -}; - -static const struct dce_i2c_mask i2c_masks = { - I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) -}; - -static struct dce_i2c_hw *dcn301_i2c_hw_create(struct dc_context *ctx, uint32_t inst) -{ - struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); - - if (!dce_i2c_hw) - return NULL; - - dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, - &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); - - return dce_i2c_hw; -} -static struct mpc *dcn301_mpc_create( - struct dc_context *ctx, - int num_mpcc, - int num_rmu) -{ - struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), - GFP_KERNEL); - - if (!mpc30) - return NULL; - - dcn30_mpc_construct(mpc30, ctx, - &mpc_regs, - &mpc_shift, - &mpc_mask, - num_mpcc, - num_rmu); - - return &mpc30->base; -} - -static struct hubbub *dcn301_hubbub_create(struct dc_context *ctx) -{ - int i; - - struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), - GFP_KERNEL); - - if (!hubbub3) - return NULL; - - hubbub301_construct(hubbub3, ctx, - &hubbub_reg, - &hubbub_shift, - &hubbub_mask); - - - for (i = 0; i < res_cap_dcn301.num_vmid; i++) { - struct dcn20_vmid *vmid = &hubbub3->vmid[i]; - - vmid->ctx = ctx; - - vmid->regs = &vmid_regs[i]; - vmid->shifts = &vmid_shifts; - vmid->masks = &vmid_masks; - } - - hubbub3->num_vmid = res_cap_dcn301.num_vmid; - - return &hubbub3->base; -} - -static struct timing_generator *dcn301_timing_generator_create( - struct dc_context *ctx, uint32_t instance) -{ - struct optc *tgn10 = - kzalloc(sizeof(struct optc), GFP_KERNEL); - - if (!tgn10) - return NULL; - - tgn10->base.inst = instance; - tgn10->base.ctx = ctx; - - tgn10->tg_regs = &optc_regs[instance]; - tgn10->tg_shift = &optc_shift; - tgn10->tg_mask = &optc_mask; - - dcn301_timing_generator_init(tgn10); - - return &tgn10->base; -} - -static const struct encoder_feature_support link_enc_feature = { - .max_hdmi_deep_color = COLOR_DEPTH_121212, - .max_hdmi_pixel_clock = 600000, - .hdmi_ycbcr420_supported = true, - .dp_ycbcr420_supported = true, - .fec_supported = true, - .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_HBR3_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true, - .flags.bits.IS_TPS4_CAPABLE = true -}; - -static struct link_encoder *dcn301_link_encoder_create( - struct dc_context *ctx, - const struct encoder_init_data *enc_init_data) -{ - struct dcn20_link_encoder *enc20 = - kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - - if (!enc20) - return NULL; - - dcn301_link_encoder_construct(enc20, - enc_init_data, - &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], - &link_enc_aux_regs[enc_init_data->channel - 1], - &link_enc_hpd_regs[enc_init_data->hpd_source], - &le_shift, - &le_mask); - - return &enc20->enc10.base; -} - -static struct panel_cntl *dcn301_panel_cntl_create(const struct panel_cntl_init_data *init_data) -{ - struct dcn301_panel_cntl *panel_cntl = - kzalloc(sizeof(struct dcn301_panel_cntl), GFP_KERNEL); - - if (!panel_cntl) - return NULL; - - dcn301_panel_cntl_construct(panel_cntl, - init_data, - &panel_cntl_regs[init_data->inst], - &panel_cntl_shift, - &panel_cntl_mask); - - return &panel_cntl->base; -} - - -#define CTX ctx - -#define REG(reg_name) \ - (DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) - -static uint32_t read_pipe_fuses(struct dc_context *ctx) -{ - uint32_t value = REG_READ(CC_DC_PIPE_DIS); - /* RV1 support max 4 pipes */ - value = value & 0xf; - return value; -} - - -static void read_dce_straps( - struct dc_context *ctx, - struct resource_straps *straps) -{ - generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), - FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); - -} - -static struct audio *dcn301_create_audio( - struct dc_context *ctx, unsigned int inst) -{ - return dce_audio_create(ctx, inst, - &audio_regs[inst], &audio_shift, &audio_mask); -} - -static struct vpg *dcn301_vpg_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL); - - if (!vpg3) - return NULL; - - vpg3_construct(vpg3, ctx, inst, - &vpg_regs[inst], - &vpg_shift, - &vpg_mask); - - return &vpg3->base; -} - -static struct afmt *dcn301_afmt_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL); - - if (!afmt3) - return NULL; - - afmt3_construct(afmt3, ctx, inst, - &afmt_regs[inst], - &afmt_shift, - &afmt_mask); - - return &afmt3->base; -} - -static struct stream_encoder *dcn301_stream_encoder_create(enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dcn10_stream_encoder *enc1; - struct vpg *vpg; - struct afmt *afmt; - int vpg_inst; - int afmt_inst; - - /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ - if (eng_id <= ENGINE_ID_DIGF) { - vpg_inst = eng_id; - afmt_inst = eng_id; - } else - return NULL; - - enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); - vpg = dcn301_vpg_create(ctx, vpg_inst); - afmt = dcn301_afmt_create(ctx, afmt_inst); - - if (!enc1 || !vpg || !afmt || eng_id >= ARRAY_SIZE(stream_enc_regs)) { - kfree(enc1); - kfree(vpg); - kfree(afmt); - return NULL; - } - - dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, - eng_id, vpg, afmt, - &stream_enc_regs[eng_id], - &se_shift, &se_mask); - - return &enc1->base; -} - -static struct dce_hwseq *dcn301_hwseq_create(struct dc_context *ctx) -{ - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); - - if (hws) { - hws->ctx = ctx; - hws->regs = &hwseq_reg; - hws->shifts = &hwseq_shift; - hws->masks = &hwseq_mask; - } - return hws; -} -static const struct resource_create_funcs res_create_funcs = { - .read_dce_straps = read_dce_straps, - .create_audio = dcn301_create_audio, - .create_stream_encoder = dcn301_stream_encoder_create, - .create_hwseq = dcn301_hwseq_create, -}; - -static void dcn301_destruct(struct dcn301_resource_pool *pool) -{ - unsigned int i; - - for (i = 0; i < pool->base.stream_enc_count; i++) { - if (pool->base.stream_enc[i] != NULL) { - if (pool->base.stream_enc[i]->vpg != NULL) { - kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); - pool->base.stream_enc[i]->vpg = NULL; - } - if (pool->base.stream_enc[i]->afmt != NULL) { - kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); - pool->base.stream_enc[i]->afmt = NULL; - } - kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); - pool->base.stream_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_dsc; i++) { - if (pool->base.dscs[i] != NULL) - dcn20_dsc_destroy(&pool->base.dscs[i]); - } - - if (pool->base.mpc != NULL) { - kfree(TO_DCN20_MPC(pool->base.mpc)); - pool->base.mpc = NULL; - } - if (pool->base.hubbub != NULL) { - kfree(pool->base.hubbub); - pool->base.hubbub = NULL; - } - for (i = 0; i < pool->base.pipe_count; i++) { - if (pool->base.dpps[i] != NULL) - dcn301_dpp_destroy(&pool->base.dpps[i]); - - if (pool->base.ipps[i] != NULL) - pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); - - if (pool->base.hubps[i] != NULL) { - kfree(TO_DCN20_HUBP(pool->base.hubps[i])); - pool->base.hubps[i] = NULL; - } - - if (pool->base.irqs != NULL) { - dal_irq_service_destroy(&pool->base.irqs); - } - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - if (pool->base.engines[i] != NULL) - dce110_engine_destroy(&pool->base.engines[i]); - if (pool->base.hw_i2cs[i] != NULL) { - kfree(pool->base.hw_i2cs[i]); - pool->base.hw_i2cs[i] = NULL; - } - if (pool->base.sw_i2cs[i] != NULL) { - kfree(pool->base.sw_i2cs[i]); - pool->base.sw_i2cs[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - if (pool->base.opps[i] != NULL) - pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - if (pool->base.timing_generators[i] != NULL) { - kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); - pool->base.timing_generators[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_dwb; i++) { - if (pool->base.dwbc[i] != NULL) { - kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); - pool->base.dwbc[i] = NULL; - } - if (pool->base.mcif_wb[i] != NULL) { - kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); - pool->base.mcif_wb[i] = NULL; - } - } - - for (i = 0; i < pool->base.audio_count; i++) { - if (pool->base.audios[i]) - dce_aud_destroy(&pool->base.audios[i]); - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] != NULL) { - dcn20_clock_source_destroy(&pool->base.clock_sources[i]); - pool->base.clock_sources[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { - if (pool->base.mpc_lut[i] != NULL) { - dc_3dlut_func_release(pool->base.mpc_lut[i]); - pool->base.mpc_lut[i] = NULL; - } - if (pool->base.mpc_shaper[i] != NULL) { - dc_transfer_func_release(pool->base.mpc_shaper[i]); - pool->base.mpc_shaper[i] = NULL; - } - } - - if (pool->base.dp_clock_source != NULL) { - dcn20_clock_source_destroy(&pool->base.dp_clock_source); - pool->base.dp_clock_source = NULL; - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - if (pool->base.multiple_abms[i] != NULL) - dce_abm_destroy(&pool->base.multiple_abms[i]); - } - - if (pool->base.dccg != NULL) - dcn_dccg_destroy(&pool->base.dccg); -} - -static struct hubp *dcn301_hubp_create(struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_hubp *hubp2 = - kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); - - if (!hubp2) - return NULL; - - if (hubp3_construct(hubp2, ctx, inst, - &hubp_regs[inst], &hubp_shift, &hubp_mask)) - return &hubp2->base; - - BREAK_TO_DEBUGGER(); - kfree(hubp2); - return NULL; -} - -static bool dcn301_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t pipe_count = pool->res_cap->num_dwb; - - for (i = 0; i < pipe_count; i++) { - struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), - GFP_KERNEL); - - if (!dwbc30) { - dm_error("DC: failed to create dwbc30!\n"); - return false; - } - - dcn30_dwbc_construct(dwbc30, ctx, - &dwbc30_regs[i], - &dwbc30_shift, - &dwbc30_mask, - i); - - pool->dwbc[i] = &dwbc30->base; - } - return true; -} - -static bool dcn301_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t pipe_count = pool->res_cap->num_dwb; - - for (i = 0; i < pipe_count; i++) { - struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), - GFP_KERNEL); - - if (!mcif_wb30) { - dm_error("DC: failed to create mcif_wb30!\n"); - return false; - } - - dcn30_mmhubbub_construct(mcif_wb30, ctx, - &mcif_wb30_regs[i], - &mcif_wb30_shift, - &mcif_wb30_mask, - i); - - pool->mcif_wb[i] = &mcif_wb30->base; - } - return true; -} - -static struct display_stream_compressor *dcn301_dsc_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_dsc *dsc = - kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); - - if (!dsc) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); - return &dsc->base; -} - - -static void dcn301_destroy_resource_pool(struct resource_pool **pool) -{ - struct dcn301_resource_pool *dcn301_pool = TO_DCN301_RES_POOL(*pool); - - dcn301_destruct(dcn301_pool); - kfree(dcn301_pool); - *pool = NULL; -} - -static struct clock_source *dcn301_clock_source_create( - struct dc_context *ctx, - struct dc_bios *bios, - enum clock_source_id id, - const struct dce110_clk_src_regs *regs, - bool dp_clk_src) -{ - struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); - - if (!clk_src) - return NULL; - - if (dcn301_clk_src_construct(clk_src, ctx, bios, id, - regs, &cs_shift, &cs_mask)) { - clk_src->base.dp_clk_src = dp_clk_src; - return &clk_src->base; - } - - kfree(clk_src); - BREAK_TO_DEBUGGER(); - return NULL; -} - -static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn20_get_dcc_compression_cap -}; - - -static bool is_soc_bounding_box_valid(struct dc *dc) -{ - uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; - - if (ASICREV_IS_VANGOGH(hw_internal_rev)) - return true; - - return false; -} - -static bool init_soc_bounding_box(struct dc *dc, - struct dcn301_resource_pool *pool) -{ - struct _vcs_dpi_soc_bounding_box_st *loaded_bb = &dcn3_01_soc; - struct _vcs_dpi_ip_params_st *loaded_ip = &dcn3_01_ip; - - DC_LOGGER_INIT(dc->ctx->logger); - - if (!is_soc_bounding_box_valid(dc)) { - DC_LOG_ERROR("%s: not valid soc bounding box\n", __func__); - return false; - } - - loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator; - loaded_ip->max_num_dpp = pool->base.pipe_count; - DC_FP_START(); - dcn20_patch_bounding_box(dc, loaded_bb); - DC_FP_END(); - - if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { - struct bp_soc_bb_info bb_info = {0}; - - if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { - DC_FP_START(); - dcn301_fpu_init_soc_bounding_box(bb_info); - DC_FP_END(); - } - } - - return true; -} - - -static void set_wm_ranges( - struct pp_smu_funcs *pp_smu, - struct _vcs_dpi_soc_bounding_box_st *loaded_bb) -{ - struct pp_smu_wm_range_sets ranges = {0}; - int i; - - ranges.num_reader_wm_sets = 0; - - if (loaded_bb->num_states == 1) { - ranges.reader_wm_sets[0].wm_inst = 0; - ranges.reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges.reader_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges.reader_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - - ranges.num_reader_wm_sets = 1; - } else if (loaded_bb->num_states > 1) { - for (i = 0; i < 4 && i < loaded_bb->num_states; i++) { - ranges.reader_wm_sets[i].wm_inst = i; - ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - DC_FP_START(); - dcn301_fpu_set_wm_ranges(i, &ranges, loaded_bb); - DC_FP_END(); - ranges.num_reader_wm_sets = i + 1; - } - - ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges.reader_wm_sets[ranges.num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - } - - ranges.num_writer_wm_sets = 1; - - ranges.writer_wm_sets[0].wm_inst = 0; - ranges.writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges.writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - ranges.writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges.writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - - /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ - pp_smu->nv_funcs.set_wm_ranges(&pp_smu->nv_funcs.pp_smu, &ranges); -} - -static void dcn301_calculate_wm_and_dlg( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int pipe_cnt, - int vlevel) -{ - DC_FP_START(); - dcn301_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel); - DC_FP_END(); -} - -static struct resource_funcs dcn301_res_pool_funcs = { - .destroy = dcn301_destroy_resource_pool, - .link_enc_create = dcn301_link_encoder_create, - .panel_cntl_create = dcn301_panel_cntl_create, - .validate_bandwidth = dcn30_validate_bandwidth, - .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg, - .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, - .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, - .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, - .release_pipe = dcn20_release_pipe, - .add_stream_to_ctx = dcn30_add_stream_to_ctx, - .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, - .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, - .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, - .set_mcif_arb_params = dcn30_set_mcif_arb_params, - .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, - .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, - .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, - .update_bw_bounding_box = dcn301_update_bw_bounding_box, - .patch_unknown_plane_state = dcn20_patch_unknown_plane_state -}; - -static bool dcn301_resource_construct( - uint8_t num_virtual_links, - struct dc *dc, - struct dcn301_resource_pool *pool) -{ - int i, j; - struct dc_context *ctx = dc->ctx; - struct irq_service_init_data init_data; - uint32_t pipe_fuses = read_pipe_fuses(ctx); - uint32_t num_pipes = 0; - - DC_LOGGER_INIT(dc->ctx->logger); - - ctx->dc_bios->regs = &bios_regs; - - if (dc->ctx->asic_id.chip_id == DEVICE_ID_VGH_1435) - res_cap_dcn301.num_pll = 2; - pool->base.res_cap = &res_cap_dcn301; - - pool->base.funcs = &dcn301_res_pool_funcs; - - /************************************************* - * Resource + asic cap harcoding * - *************************************************/ - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - pool->base.pipe_count = pool->base.res_cap->num_timing_generator; - pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; - dc->caps.max_downscale_ratio = 600; - dc->caps.i2c_speed_in_khz = 100; - dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a enabled by default*/ - dc->caps.max_cursor_size = 256; - dc->caps.min_horizontal_blanking_period = 80; - dc->caps.dmdata_alloc_size = 2048; - dc->caps.max_slave_planes = 2; - dc->caps.max_slave_yuv_planes = 2; - dc->caps.max_slave_rgb_planes = 2; - dc->caps.is_apu = true; - dc->caps.post_blend_color_processing = true; - dc->caps.force_dp_tps4_for_cp2520 = true; - dc->caps.extended_aux_timeout_support = true; - dc->caps.dmcub_support = true; - - /* Color pipeline capabilities */ - dc->caps.color.dpp.dcn_arch = 1; - dc->caps.color.dpp.input_lut_shared = 0; - dc->caps.color.dpp.icsc = 1; - dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr - dc->caps.color.dpp.dgam_rom_caps.srgb = 1; - dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; - dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; - dc->caps.color.dpp.dgam_rom_caps.pq = 1; - dc->caps.color.dpp.dgam_rom_caps.hlg = 1; - dc->caps.color.dpp.post_csc = 1; - dc->caps.color.dpp.gamma_corr = 1; - dc->caps.color.dpp.dgam_rom_for_yuv = 0; - - dc->caps.color.dpp.hw_3d_lut = 1; - dc->caps.color.dpp.ogam_ram = 1; - // no OGAM ROM on DCN301 - dc->caps.color.dpp.ogam_rom_caps.srgb = 0; - dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; - dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.dpp.ogam_rom_caps.pq = 0; - dc->caps.color.dpp.ogam_rom_caps.hlg = 0; - dc->caps.color.dpp.ocsc = 0; - - dc->caps.color.mpc.gamut_remap = 1; - dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2 - dc->caps.color.mpc.ogam_ram = 1; - dc->caps.color.mpc.ogam_rom_caps.srgb = 0; - dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; - dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.mpc.ogam_rom_caps.pq = 0; - dc->caps.color.mpc.ogam_rom_caps.hlg = 0; - dc->caps.color.mpc.ocsc = 1; - - dc->caps.dp_hdmi21_pcon_support = true; - - /* read VBIOS LTTPR caps */ - if (ctx->dc_bios->funcs->get_lttpr_caps) { - enum bp_result bp_query_result; - uint8_t is_vbios_lttpr_enable = 0; - - bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); - dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; - } - - if (ctx->dc_bios->funcs->get_lttpr_interop) { - enum bp_result bp_query_result; - uint8_t is_vbios_interop_enabled = 0; - - bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled); - dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; - } - - if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) - dc->debug = debug_defaults_drv; - - // Init the vm_helper - if (dc->vm_helper) - vm_helper_init(dc->vm_helper, 16); - - /************************************************* - * Create resources * - *************************************************/ - - /* Clock Sources for Pixel Clock*/ - pool->base.clock_sources[DCN301_CLK_SRC_PLL0] = - dcn301_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL0, - &clk_src_regs[0], false); - pool->base.clock_sources[DCN301_CLK_SRC_PLL1] = - dcn301_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL1, - &clk_src_regs[1], false); - pool->base.clock_sources[DCN301_CLK_SRC_PLL2] = - dcn301_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL2, - &clk_src_regs[2], false); - pool->base.clock_sources[DCN301_CLK_SRC_PLL3] = - dcn301_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL3, - &clk_src_regs[3], false); - - pool->base.clk_src_count = DCN301_CLK_SRC_TOTAL; - - /* todo: not reuse phy_pll registers */ - pool->base.dp_clock_source = - dcn301_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_ID_DP_DTO, - &clk_src_regs[0], true); - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - - /* DCCG */ - pool->base.dccg = dccg301_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); - if (pool->base.dccg == NULL) { - dm_error("DC: failed to create dccg!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - init_soc_bounding_box(dc, pool); - - if (!dc->debug.disable_pplib_wm_range && pool->base.pp_smu->nv_funcs.set_wm_ranges) - set_wm_ranges(pool->base.pp_smu, &dcn3_01_soc); - - num_pipes = dcn3_01_ip.max_num_dpp; - - for (i = 0; i < dcn3_01_ip.max_num_dpp; i++) - if (pipe_fuses & 1 << i) - num_pipes--; - dcn3_01_ip.max_num_dpp = num_pipes; - dcn3_01_ip.max_num_otg = num_pipes; - - - dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30); - - /* IRQ */ - init_data.ctx = dc->ctx; - pool->base.irqs = dal_irq_service_dcn30_create(&init_data); - if (!pool->base.irqs) - goto create_fail; - - /* HUBBUB */ - pool->base.hubbub = dcn301_hubbub_create(ctx); - if (pool->base.hubbub == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create hubbub!\n"); - goto create_fail; - } - - j = 0; - /* HUBPs, DPPs, OPPs and TGs */ - for (i = 0; i < pool->base.pipe_count; i++) { - - /* if pipe is disabled, skip instance of HW pipe, - * i.e, skip ASIC register instance - */ - if ((pipe_fuses & (1 << i)) != 0) { - DC_LOG_DEBUG("%s: fusing pipe %d\n", __func__, i); - continue; - } - - pool->base.hubps[j] = dcn301_hubp_create(ctx, i); - if (pool->base.hubps[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create hubps!\n"); - goto create_fail; - } - - pool->base.dpps[j] = dcn301_dpp_create(ctx, i); - if (pool->base.dpps[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create dpps!\n"); - goto create_fail; - } - - pool->base.opps[j] = dcn301_opp_create(ctx, i); - if (pool->base.opps[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create output pixel processor!\n"); - goto create_fail; - } - - pool->base.timing_generators[j] = dcn301_timing_generator_create(ctx, i); - if (pool->base.timing_generators[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto create_fail; - } - j++; - } - pool->base.timing_generator_count = j; - pool->base.pipe_count = j; - pool->base.mpcc_count = j; - - /* ABM (or ABMs for NV2x) */ - /* TODO: */ - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - pool->base.multiple_abms[i] = dmub_abm_create(ctx, - &abm_regs[i], - &abm_shift, - &abm_mask); - if (pool->base.multiple_abms[i] == NULL) { - dm_error("DC: failed to create abm for pipe %d!\n", i); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - - /* MPC and DSC */ - pool->base.mpc = dcn301_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut); - if (pool->base.mpc == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mpc!\n"); - goto create_fail; - } - - for (i = 0; i < pool->base.res_cap->num_dsc; i++) { - pool->base.dscs[i] = dcn301_dsc_create(ctx, i); - if (pool->base.dscs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create display stream compressor %d!\n", i); - goto create_fail; - } - } - - /* DWB and MMHUBBUB */ - if (!dcn301_dwbc_create(ctx, &pool->base)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create dwbc!\n"); - goto create_fail; - } - - if (!dcn301_mmhubbub_create(ctx, &pool->base)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mcif_wb!\n"); - goto create_fail; - } - - /* AUX and I2C */ - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - pool->base.engines[i] = dcn301_aux_engine_create(ctx, i); - if (pool->base.engines[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create aux engine!!\n"); - goto create_fail; - } - pool->base.hw_i2cs[i] = dcn301_i2c_hw_create(ctx, i); - if (pool->base.hw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create hw i2c!!\n"); - goto create_fail; - } - pool->base.sw_i2cs[i] = NULL; - } - - /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ - if (!resource_construct(num_virtual_links, dc, &pool->base, - &res_create_funcs)) - goto create_fail; - - /* HW Sequencer and Plane caps */ - dcn301_hw_sequencer_construct(dc); - - dc->caps.max_planes = pool->base.pipe_count; - - for (i = 0; i < dc->caps.max_planes; ++i) - dc->caps.planes[i] = plane_cap; - - dc->cap_funcs = cap_funcs; - - return true; - -create_fail: - - dcn301_destruct(pool); - - return false; -} - -struct resource_pool *dcn301_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc) -{ - struct dcn301_resource_pool *pool = - kzalloc(sizeof(struct dcn301_resource_pool), GFP_KERNEL); - - if (!pool) - return NULL; - - if (dcn301_resource_construct(init_data->num_virtual_links, dc, pool)) - return &pool->base; - - BREAK_TO_DEBUGGER(); - kfree(pool); - return NULL; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h deleted file mode 100644 index ae8672680c..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef _DCN301_RESOURCE_H_ -#define _DCN301_RESOURCE_H_ - -#include "core_types.h" - -struct dc; -struct resource_pool; -struct _vcs_dpi_display_pipe_params_st; - -extern struct _vcs_dpi_ip_params_st dcn3_01_ip; -extern struct _vcs_dpi_soc_bounding_box_st dcn3_01_soc; - -struct dcn301_resource_pool { - struct resource_pool base; -}; -struct resource_pool *dcn301_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc); - -#endif /* _DCN301_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile deleted file mode 100644 index 95b66baf39..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -# -# (c) Copyright 2020 Advanced Micro Devices, Inc. All the rights reserved -# -# Authors: AMD -# -# Makefile for dcn302. - -DCN3_02 = dcn302_init.o dcn302_resource.o - -AMD_DAL_DCN3_02 = $(addprefix $(AMDDALPATH)/dc/dcn302/,$(DCN3_02)) - -AMD_DISPLAY_FILES += $(AMD_DAL_DCN3_02) diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c deleted file mode 100644 index 637f9514d3..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dcn302/dcn302_hwseq.h" - -#include "dcn30/dcn30_init.h" - -#include "dc.h" - -#include "dcn302_init.h" - -void dcn302_hw_sequencer_construct(struct dc *dc) -{ - dcn30_hw_sequencer_construct(dc); - - dc->hwseq->funcs.dpp_pg_control = dcn302_dpp_pg_control; - dc->hwseq->funcs.hubp_pg_control = dcn302_hubp_pg_control; - dc->hwseq->funcs.dsc_pg_control = dcn302_dsc_pg_control; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.h b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.h deleted file mode 100644 index 899587b93a..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_DCN302_INIT_H__ -#define __DC_DCN302_INIT_H__ - -struct dc; - -void dcn302_hw_sequencer_construct(struct dc *dc); - -#endif /* __DC_DCN302_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c deleted file mode 100644 index 63ac984a04..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ /dev/null @@ -1,1518 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dcn302_init.h" -#include "dcn302_resource.h" -#include "dcn302_dccg.h" -#include "irq/dcn302/irq_service_dcn302.h" - -#include "dcn30/dcn30_dio_link_encoder.h" -#include "dcn30/dcn30_dio_stream_encoder.h" -#include "dcn30/dcn30_dwb.h" -#include "dcn30/dcn30_dpp.h" -#include "dcn30/dcn30_hubbub.h" -#include "dcn30/dcn30_hubp.h" -#include "dcn30/dcn30_mmhubbub.h" -#include "dcn30/dcn30_mpc.h" -#include "dcn30/dcn30_opp.h" -#include "dcn30/dcn30_optc.h" -#include "dcn30/dcn30_resource.h" - -#include "dcn20/dcn20_dsc.h" -#include "dcn20/dcn20_resource.h" - -#include "dml/dcn30/dcn30_fpu.h" - -#include "dcn10/dcn10_resource.h" - -#include "link.h" -#include "dce/dce_abm.h" -#include "dce/dce_audio.h" -#include "dce/dce_aux.h" -#include "dce/dce_clock_source.h" -#include "dce/dce_hwseq.h" -#include "dce/dce_i2c_hw.h" -#include "dce/dce_panel_cntl.h" -#include "dce/dmub_abm.h" -#include "dce/dmub_psr.h" -#include "clk_mgr.h" - -#include "hw_sequencer_private.h" -#include "reg_helper.h" -#include "resource.h" -#include "vm_helper.h" - -#include "dml/dcn302/dcn302_fpu.h" - -#include "dimgrey_cavefish_ip_offset.h" -#include "dcn/dcn_3_0_2_offset.h" -#include "dcn/dcn_3_0_2_sh_mask.h" -#include "dpcs/dpcs_3_0_0_offset.h" -#include "dpcs/dpcs_3_0_0_sh_mask.h" -#include "nbio/nbio_7_4_offset.h" -#include "amdgpu_socbb.h" - -#define DC_LOGGER \ - dc->ctx->logger -#define DC_LOGGER_INIT(logger) - -static const struct dc_debug_options debug_defaults_drv = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = false, - .clock_trace = true, - .disable_pplib_clock_request = true, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, - .force_single_disp_pipe_split = false, - .disable_dcc = DCC_ENABLE, - .vsr_support = true, - .performance_trace = false, - .max_downscale_src_width = 7680,/*upto 8K*/ - .disable_pplib_wm_range = false, - .scl_reset_length10 = true, - .sanity_checks = false, - .underflow_assert_delay_us = 0xFFFFFFFF, - .dwb_fi_phase = -1, // -1 = disable, - .dmub_command_table = true, - .use_max_lb = true, - .exit_idle_opt_for_cursor_updates = true, - .enable_legacy_fast_update = false, - .using_dml2 = false, -}; - -static const struct dc_panel_config panel_config_defaults = { - .psr = { - .disable_psr = false, - .disallow_psrsu = false, - .disallow_replay = false, - }, -}; - -enum dcn302_clk_src_array_id { - DCN302_CLK_SRC_PLL0, - DCN302_CLK_SRC_PLL1, - DCN302_CLK_SRC_PLL2, - DCN302_CLK_SRC_PLL3, - DCN302_CLK_SRC_PLL4, - DCN302_CLK_SRC_TOTAL -}; - -static const struct resource_caps res_cap_dcn302 = { - .num_timing_generator = 5, - .num_opp = 5, - .num_video_plane = 5, - .num_audio = 5, - .num_stream_encoder = 5, - .num_dwb = 1, - .num_ddc = 5, - .num_vmid = 16, - .num_mpc_3dlut = 2, - .num_dsc = 5, -}; - -static const struct dc_plane_cap plane_cap = { - .type = DC_PLANE_TYPE_DCN_UNIVERSAL, - .per_pixel_alpha = true, - .pixel_format_support = { - .argb8888 = true, - .nv12 = true, - .fp16 = true, - .p010 = true, - .ayuv = false, - }, - .max_upscale_factor = { - .argb8888 = 16000, - .nv12 = 16000, - .fp16 = 16000 - }, - /* 6:1 downscaling ratio: 1000/6 = 166.666 */ - .max_downscale_factor = { - .argb8888 = 167, - .nv12 = 167, - .fp16 = 167 - }, - 16, - 16 -}; - -/* NBIO */ -#define NBIO_BASE_INNER(seg) \ - NBIO_BASE__INST0_SEG ## seg - -#define NBIO_BASE(seg) \ - NBIO_BASE_INNER(seg) - -#define NBIO_SR(reg_name)\ - .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -/* DCN */ -#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg - -#define BASE(seg) BASE_INNER(seg) - -#define SR(reg_name)\ - .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name - -#define SF(reg_name, field_name, post_fix)\ - .field_name = reg_name ## __ ## field_name ## post_fix - -#define SRI(reg_name, block, id)\ - .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + mm ## block ## id ## _ ## reg_name - -#define SRI2(reg_name, block, id)\ - .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name - -#define SRII(reg_name, block, id)\ - .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define DCCG_SRII(reg_name, block, id)\ - .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define VUPDATE_SRII(reg_name, block, id)\ - .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ - mm ## reg_name ## _ ## block ## id - -#define SRII_DWB(reg_name, temp_name, block, id)\ - .reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## temp_name - -#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ - .field_name = reg_name ## __ ## field_name ## post_fix - -#define SRII_MPC_RMU(reg_name, block, id)\ - .RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -static const struct dcn_hubbub_registers hubbub_reg = { - HUBBUB_REG_LIST_DCN30(0) -}; - -static const struct dcn_hubbub_shift hubbub_shift = { - HUBBUB_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn_hubbub_mask hubbub_mask = { - HUBBUB_MASK_SH_LIST_DCN30(_MASK) -}; - -#define vmid_regs(id)\ - [id] = { DCN20_VMID_REG_LIST(id) } - -static const struct dcn_vmid_registers vmid_regs[] = { - vmid_regs(0), - vmid_regs(1), - vmid_regs(2), - vmid_regs(3), - vmid_regs(4), - vmid_regs(5), - vmid_regs(6), - vmid_regs(7), - vmid_regs(8), - vmid_regs(9), - vmid_regs(10), - vmid_regs(11), - vmid_regs(12), - vmid_regs(13), - vmid_regs(14), - vmid_regs(15) -}; - -static const struct dcn20_vmid_shift vmid_shifts = { - DCN20_VMID_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn20_vmid_mask vmid_masks = { - DCN20_VMID_MASK_SH_LIST(_MASK) -}; - -static struct hubbub *dcn302_hubbub_create(struct dc_context *ctx) -{ - int i; - - struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL); - - if (!hubbub3) - return NULL; - - hubbub3_construct(hubbub3, ctx, &hubbub_reg, &hubbub_shift, &hubbub_mask); - - for (i = 0; i < res_cap_dcn302.num_vmid; i++) { - struct dcn20_vmid *vmid = &hubbub3->vmid[i]; - - vmid->ctx = ctx; - - vmid->regs = &vmid_regs[i]; - vmid->shifts = &vmid_shifts; - vmid->masks = &vmid_masks; - } - - return &hubbub3->base; -} - -#define vpg_regs(id)\ - [id] = { VPG_DCN3_REG_LIST(id) } - -static const struct dcn30_vpg_registers vpg_regs[] = { - vpg_regs(0), - vpg_regs(1), - vpg_regs(2), - vpg_regs(3), - vpg_regs(4), - vpg_regs(5) -}; - -static const struct dcn30_vpg_shift vpg_shift = { - DCN3_VPG_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn30_vpg_mask vpg_mask = { - DCN3_VPG_MASK_SH_LIST(_MASK) -}; - -static struct vpg *dcn302_vpg_create(struct dc_context *ctx, uint32_t inst) -{ - struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL); - - if (!vpg3) - return NULL; - - vpg3_construct(vpg3, ctx, inst, &vpg_regs[inst], &vpg_shift, &vpg_mask); - - return &vpg3->base; -} - -#define afmt_regs(id)\ - [id] = { AFMT_DCN3_REG_LIST(id) } - -static const struct dcn30_afmt_registers afmt_regs[] = { - afmt_regs(0), - afmt_regs(1), - afmt_regs(2), - afmt_regs(3), - afmt_regs(4), - afmt_regs(5) -}; - -static const struct dcn30_afmt_shift afmt_shift = { - DCN3_AFMT_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn30_afmt_mask afmt_mask = { - DCN3_AFMT_MASK_SH_LIST(_MASK) -}; - -static struct afmt *dcn302_afmt_create(struct dc_context *ctx, uint32_t inst) -{ - struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL); - - if (!afmt3) - return NULL; - - afmt3_construct(afmt3, ctx, inst, &afmt_regs[inst], &afmt_shift, &afmt_mask); - - return &afmt3->base; -} - -#define audio_regs(id)\ - [id] = { AUD_COMMON_REG_LIST(id) } - -static const struct dce_audio_registers audio_regs[] = { - audio_regs(0), - audio_regs(1), - audio_regs(2), - audio_regs(3), - audio_regs(4), - audio_regs(5), - audio_regs(6) -}; - -#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ - AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) - -static const struct dce_audio_shift audio_shift = { - DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_audio_mask audio_mask = { - DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) -}; - -static struct audio *dcn302_create_audio(struct dc_context *ctx, unsigned int inst) -{ - return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); -} - -#define stream_enc_regs(id)\ - [id] = { SE_DCN3_REG_LIST(id) } - -static const struct dcn10_stream_enc_registers stream_enc_regs[] = { - stream_enc_regs(0), - stream_enc_regs(1), - stream_enc_regs(2), - stream_enc_regs(3), - stream_enc_regs(4) -}; - -static const struct dcn10_stream_encoder_shift se_shift = { - SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn10_stream_encoder_mask se_mask = { - SE_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -static struct stream_encoder *dcn302_stream_encoder_create(enum engine_id eng_id, struct dc_context *ctx) -{ - struct dcn10_stream_encoder *enc1; - struct vpg *vpg; - struct afmt *afmt; - int vpg_inst; - int afmt_inst; - - /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ - if (eng_id <= ENGINE_ID_DIGE) { - vpg_inst = eng_id; - afmt_inst = eng_id; - } else - return NULL; - - enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); - vpg = dcn302_vpg_create(ctx, vpg_inst); - afmt = dcn302_afmt_create(ctx, afmt_inst); - - if (!enc1 || !vpg || !afmt) { - kfree(enc1); - kfree(vpg); - kfree(afmt); - return NULL; - } - - dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, &stream_enc_regs[eng_id], - &se_shift, &se_mask); - - return &enc1->base; -} - -#define clk_src_regs(index, pllid)\ - [index] = { CS_COMMON_REG_LIST_DCN3_02(index, pllid) } - -static const struct dce110_clk_src_regs clk_src_regs[] = { - clk_src_regs(0, A), - clk_src_regs(1, B), - clk_src_regs(2, C), - clk_src_regs(3, D), - clk_src_regs(4, E) -}; - -static const struct dce110_clk_src_shift cs_shift = { - CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) -}; - -static const struct dce110_clk_src_mask cs_mask = { - CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) -}; - -static struct clock_source *dcn302_clock_source_create(struct dc_context *ctx, struct dc_bios *bios, - enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) -{ - struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); - - if (!clk_src) - return NULL; - - if (dcn3_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { - clk_src->base.dp_clk_src = dp_clk_src; - return &clk_src->base; - } - - kfree(clk_src); - BREAK_TO_DEBUGGER(); - return NULL; -} - -static const struct dce_hwseq_registers hwseq_reg = { - HWSEQ_DCN302_REG_LIST() -}; - -static const struct dce_hwseq_shift hwseq_shift = { - HWSEQ_DCN302_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_hwseq_mask hwseq_mask = { - HWSEQ_DCN302_MASK_SH_LIST(_MASK) -}; - -static struct dce_hwseq *dcn302_hwseq_create(struct dc_context *ctx) -{ - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); - - if (hws) { - hws->ctx = ctx; - hws->regs = &hwseq_reg; - hws->shifts = &hwseq_shift; - hws->masks = &hwseq_mask; - } - return hws; -} - -#define hubp_regs(id)\ - [id] = { HUBP_REG_LIST_DCN30(id) } - -static const struct dcn_hubp2_registers hubp_regs[] = { - hubp_regs(0), - hubp_regs(1), - hubp_regs(2), - hubp_regs(3), - hubp_regs(4) -}; - -static const struct dcn_hubp2_shift hubp_shift = { - HUBP_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn_hubp2_mask hubp_mask = { - HUBP_MASK_SH_LIST_DCN30(_MASK) -}; - -static struct hubp *dcn302_hubp_create(struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_hubp *hubp2 = kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); - - if (!hubp2) - return NULL; - - if (hubp3_construct(hubp2, ctx, inst, &hubp_regs[inst], &hubp_shift, &hubp_mask)) - return &hubp2->base; - - BREAK_TO_DEBUGGER(); - kfree(hubp2); - return NULL; -} - -#define dpp_regs(id)\ - [id] = { DPP_REG_LIST_DCN30(id) } - -static const struct dcn3_dpp_registers dpp_regs[] = { - dpp_regs(0), - dpp_regs(1), - dpp_regs(2), - dpp_regs(3), - dpp_regs(4) -}; - -static const struct dcn3_dpp_shift tf_shift = { - DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) -}; - -static const struct dcn3_dpp_mask tf_mask = { - DPP_REG_LIST_SH_MASK_DCN30(_MASK) -}; - -static struct dpp *dcn302_dpp_create(struct dc_context *ctx, uint32_t inst) -{ - struct dcn3_dpp *dpp = kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); - - if (!dpp) - return NULL; - - if (dpp3_construct(dpp, ctx, inst, &dpp_regs[inst], &tf_shift, &tf_mask)) - return &dpp->base; - - BREAK_TO_DEBUGGER(); - kfree(dpp); - return NULL; -} - -#define opp_regs(id)\ - [id] = { OPP_REG_LIST_DCN30(id) } - -static const struct dcn20_opp_registers opp_regs[] = { - opp_regs(0), - opp_regs(1), - opp_regs(2), - opp_regs(3), - opp_regs(4) -}; - -static const struct dcn20_opp_shift opp_shift = { - OPP_MASK_SH_LIST_DCN20(__SHIFT) -}; - -static const struct dcn20_opp_mask opp_mask = { - OPP_MASK_SH_LIST_DCN20(_MASK) -}; - -static struct output_pixel_processor *dcn302_opp_create(struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_opp *opp = kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); - - if (!opp) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dcn20_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); - return &opp->base; -} - -#define optc_regs(id)\ - [id] = { OPTC_COMMON_REG_LIST_DCN3_0(id) } - -static const struct dcn_optc_registers optc_regs[] = { - optc_regs(0), - optc_regs(1), - optc_regs(2), - optc_regs(3), - optc_regs(4) -}; - -static const struct dcn_optc_shift optc_shift = { - OPTC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn_optc_mask optc_mask = { - OPTC_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -static struct timing_generator *dcn302_timing_generator_create(struct dc_context *ctx, uint32_t instance) -{ - struct optc *tgn10 = kzalloc(sizeof(struct optc), GFP_KERNEL); - - if (!tgn10) - return NULL; - - tgn10->base.inst = instance; - tgn10->base.ctx = ctx; - - tgn10->tg_regs = &optc_regs[instance]; - tgn10->tg_shift = &optc_shift; - tgn10->tg_mask = &optc_mask; - - dcn30_timing_generator_init(tgn10); - - return &tgn10->base; -} - -static const struct dcn30_mpc_registers mpc_regs = { - MPC_REG_LIST_DCN3_0(0), - MPC_REG_LIST_DCN3_0(1), - MPC_REG_LIST_DCN3_0(2), - MPC_REG_LIST_DCN3_0(3), - MPC_REG_LIST_DCN3_0(4), - MPC_OUT_MUX_REG_LIST_DCN3_0(0), - MPC_OUT_MUX_REG_LIST_DCN3_0(1), - MPC_OUT_MUX_REG_LIST_DCN3_0(2), - MPC_OUT_MUX_REG_LIST_DCN3_0(3), - MPC_OUT_MUX_REG_LIST_DCN3_0(4), - MPC_RMU_GLOBAL_REG_LIST_DCN3AG, - MPC_RMU_REG_LIST_DCN3AG(0), - MPC_RMU_REG_LIST_DCN3AG(1), - MPC_RMU_REG_LIST_DCN3AG(2), - MPC_DWB_MUX_REG_LIST_DCN3_0(0), -}; - -static const struct dcn30_mpc_shift mpc_shift = { - MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_mpc_mask mpc_mask = { - MPC_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -static struct mpc *dcn302_mpc_create(struct dc_context *ctx, int num_mpcc, int num_rmu) -{ - struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), GFP_KERNEL); - - if (!mpc30) - return NULL; - - dcn30_mpc_construct(mpc30, ctx, &mpc_regs, &mpc_shift, &mpc_mask, num_mpcc, num_rmu); - - return &mpc30->base; -} - -#define dsc_regsDCN20(id)\ -[id] = { DSC_REG_LIST_DCN20(id) } - -static const struct dcn20_dsc_registers dsc_regs[] = { - dsc_regsDCN20(0), - dsc_regsDCN20(1), - dsc_regsDCN20(2), - dsc_regsDCN20(3), - dsc_regsDCN20(4) -}; - -static const struct dcn20_dsc_shift dsc_shift = { - DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) -}; - -static const struct dcn20_dsc_mask dsc_mask = { - DSC_REG_LIST_SH_MASK_DCN20(_MASK) -}; - -static struct display_stream_compressor *dcn302_dsc_create(struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_dsc *dsc = kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); - - if (!dsc) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); - return &dsc->base; -} - -#define dwbc_regs_dcn3(id)\ -[id] = { DWBC_COMMON_REG_LIST_DCN30(id) } - -static const struct dcn30_dwbc_registers dwbc30_regs[] = { - dwbc_regs_dcn3(0) -}; - -static const struct dcn30_dwbc_shift dwbc30_shift = { - DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_dwbc_mask dwbc30_mask = { - DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -static bool dcn302_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t pipe_count = pool->res_cap->num_dwb; - - for (i = 0; i < pipe_count; i++) { - struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), GFP_KERNEL); - - if (!dwbc30) { - dm_error("DC: failed to create dwbc30!\n"); - return false; - } - - dcn30_dwbc_construct(dwbc30, ctx, &dwbc30_regs[i], &dwbc30_shift, &dwbc30_mask, i); - - pool->dwbc[i] = &dwbc30->base; - } - return true; -} - -#define mcif_wb_regs_dcn3(id)\ -[id] = { MCIF_WB_COMMON_REG_LIST_DCN30(id) } - -static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { - mcif_wb_regs_dcn3(0) -}; - -static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -static bool dcn302_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t pipe_count = pool->res_cap->num_dwb; - - for (i = 0; i < pipe_count; i++) { - struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), GFP_KERNEL); - - if (!mcif_wb30) { - dm_error("DC: failed to create mcif_wb30!\n"); - return false; - } - - dcn30_mmhubbub_construct(mcif_wb30, ctx, &mcif_wb30_regs[i], &mcif_wb30_shift, &mcif_wb30_mask, i); - - pool->mcif_wb[i] = &mcif_wb30->base; - } - return true; -} - -#define aux_engine_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST0(id), \ - .AUXN_IMPCAL = 0, \ - .AUXP_IMPCAL = 0, \ - .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ -} - -static const struct dce110_aux_registers aux_engine_regs[] = { - aux_engine_regs(0), - aux_engine_regs(1), - aux_engine_regs(2), - aux_engine_regs(3), - aux_engine_regs(4) -}; - -static const struct dce110_aux_registers_shift aux_shift = { - DCN_AUX_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce110_aux_registers_mask aux_mask = { - DCN_AUX_MASK_SH_LIST(_MASK) -}; - -static struct dce_aux *dcn302_aux_engine_create(struct dc_context *ctx, uint32_t inst) -{ - struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); - - if (!aux_engine) - return NULL; - - dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst], &aux_mask, &aux_shift, ctx->dc->caps.extended_aux_timeout_support); - - return &aux_engine->base; -} - -#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } - -static const struct dce_i2c_registers i2c_hw_regs[] = { - i2c_inst_regs(1), - i2c_inst_regs(2), - i2c_inst_regs(3), - i2c_inst_regs(4), - i2c_inst_regs(5) -}; - -static const struct dce_i2c_shift i2c_shifts = { - I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT) -}; - -static const struct dce_i2c_mask i2c_masks = { - I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) -}; - -static struct dce_i2c_hw *dcn302_i2c_hw_create(struct dc_context *ctx, uint32_t inst) -{ - struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); - - if (!dce_i2c_hw) - return NULL; - - dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); - - return dce_i2c_hw; -} - -static const struct encoder_feature_support link_enc_feature = { - .max_hdmi_deep_color = COLOR_DEPTH_121212, - .max_hdmi_pixel_clock = 600000, - .hdmi_ycbcr420_supported = true, - .dp_ycbcr420_supported = true, - .fec_supported = true, - .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_HBR3_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true, - .flags.bits.IS_TPS4_CAPABLE = true -}; - -#define link_regs(id, phyid)\ - [id] = {\ - LE_DCN3_REG_LIST(id), \ - UNIPHY_DCN2_REG_LIST(phyid), \ - DPCS_DCN2_REG_LIST(id), \ - SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ - } - -static const struct dcn10_link_enc_registers link_enc_regs[] = { - link_regs(0, A), - link_regs(1, B), - link_regs(2, C), - link_regs(3, D), - link_regs(4, E) -}; - -static const struct dcn10_link_enc_shift le_shift = { - LINK_ENCODER_MASK_SH_LIST_DCN30(__SHIFT), - DPCS_DCN2_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn10_link_enc_mask le_mask = { - LINK_ENCODER_MASK_SH_LIST_DCN30(_MASK), - DPCS_DCN2_MASK_SH_LIST(_MASK) -}; - -#define aux_regs(id)\ - [id] = { DCN2_AUX_REG_LIST(id) } - -static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { - aux_regs(0), - aux_regs(1), - aux_regs(2), - aux_regs(3), - aux_regs(4) -}; - -#define hpd_regs(id)\ - [id] = { HPD_REG_LIST(id) } - -static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { - hpd_regs(0), - hpd_regs(1), - hpd_regs(2), - hpd_regs(3), - hpd_regs(4) -}; - -static struct link_encoder *dcn302_link_encoder_create( - struct dc_context *ctx, - const struct encoder_init_data *enc_init_data) -{ - struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - - if (!enc20) - return NULL; - - dcn30_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], - &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, &le_mask); - - return &enc20->enc10.base; -} - -static const struct dce_panel_cntl_registers panel_cntl_regs[] = { - { DCN_PANEL_CNTL_REG_LIST() } -}; - -static const struct dce_panel_cntl_shift panel_cntl_shift = { - DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_panel_cntl_mask panel_cntl_mask = { - DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) -}; - -static struct panel_cntl *dcn302_panel_cntl_create(const struct panel_cntl_init_data *init_data) -{ - struct dce_panel_cntl *panel_cntl = kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); - - if (!panel_cntl) - return NULL; - - dce_panel_cntl_construct(panel_cntl, init_data, &panel_cntl_regs[init_data->inst], - &panel_cntl_shift, &panel_cntl_mask); - - return &panel_cntl->base; -} - -static void read_dce_straps(struct dc_context *ctx, struct resource_straps *straps) -{ - generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), - FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); -} - -static const struct resource_create_funcs res_create_funcs = { - .read_dce_straps = read_dce_straps, - .create_audio = dcn302_create_audio, - .create_stream_encoder = dcn302_stream_encoder_create, - .create_hwseq = dcn302_hwseq_create, -}; - -static bool is_soc_bounding_box_valid(struct dc *dc) -{ - uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; - - if (ASICREV_IS_DIMGREY_CAVEFISH_P(hw_internal_rev)) - return true; - - return false; -} - -static bool init_soc_bounding_box(struct dc *dc, struct resource_pool *pool) -{ - struct _vcs_dpi_soc_bounding_box_st *loaded_bb = &dcn3_02_soc; - struct _vcs_dpi_ip_params_st *loaded_ip = &dcn3_02_ip; - - DC_LOGGER_INIT(dc->ctx->logger); - - if (!is_soc_bounding_box_valid(dc)) { - DC_LOG_ERROR("%s: not valid soc bounding box\n", __func__); - return false; - } - - loaded_ip->max_num_otg = pool->pipe_count; - loaded_ip->max_num_dpp = pool->pipe_count; - loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk; - DC_FP_START(); - dcn20_patch_bounding_box(dc, loaded_bb); - DC_FP_END(); - - if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { - struct bp_soc_bb_info bb_info = { 0 }; - - if (dc->ctx->dc_bios->funcs->get_soc_bb_info( - dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { - - DC_FP_START(); - dcn302_fpu_init_soc_bounding_box(bb_info); - DC_FP_END(); - } - } - - return true; -} - -static void dcn302_resource_destruct(struct resource_pool *pool) -{ - unsigned int i; - - for (i = 0; i < pool->stream_enc_count; i++) { - if (pool->stream_enc[i] != NULL) { - if (pool->stream_enc[i]->vpg != NULL) { - kfree(DCN30_VPG_FROM_VPG(pool->stream_enc[i]->vpg)); - pool->stream_enc[i]->vpg = NULL; - } - if (pool->stream_enc[i]->afmt != NULL) { - kfree(DCN30_AFMT_FROM_AFMT(pool->stream_enc[i]->afmt)); - pool->stream_enc[i]->afmt = NULL; - } - kfree(DCN10STRENC_FROM_STRENC(pool->stream_enc[i])); - pool->stream_enc[i] = NULL; - } - } - - for (i = 0; i < pool->res_cap->num_dsc; i++) { - if (pool->dscs[i] != NULL) - dcn20_dsc_destroy(&pool->dscs[i]); - } - - if (pool->mpc != NULL) { - kfree(TO_DCN20_MPC(pool->mpc)); - pool->mpc = NULL; - } - - if (pool->hubbub != NULL) { - kfree(pool->hubbub); - pool->hubbub = NULL; - } - - for (i = 0; i < pool->pipe_count; i++) { - if (pool->dpps[i] != NULL) { - kfree(TO_DCN20_DPP(pool->dpps[i])); - pool->dpps[i] = NULL; - } - - if (pool->hubps[i] != NULL) { - kfree(TO_DCN20_HUBP(pool->hubps[i])); - pool->hubps[i] = NULL; - } - - if (pool->irqs != NULL) - dal_irq_service_destroy(&pool->irqs); - } - - for (i = 0; i < pool->res_cap->num_ddc; i++) { - if (pool->engines[i] != NULL) - dce110_engine_destroy(&pool->engines[i]); - if (pool->hw_i2cs[i] != NULL) { - kfree(pool->hw_i2cs[i]); - pool->hw_i2cs[i] = NULL; - } - if (pool->sw_i2cs[i] != NULL) { - kfree(pool->sw_i2cs[i]); - pool->sw_i2cs[i] = NULL; - } - } - - for (i = 0; i < pool->res_cap->num_opp; i++) { - if (pool->opps[i] != NULL) - pool->opps[i]->funcs->opp_destroy(&pool->opps[i]); - } - - for (i = 0; i < pool->res_cap->num_timing_generator; i++) { - if (pool->timing_generators[i] != NULL) { - kfree(DCN10TG_FROM_TG(pool->timing_generators[i])); - pool->timing_generators[i] = NULL; - } - } - - for (i = 0; i < pool->res_cap->num_dwb; i++) { - if (pool->dwbc[i] != NULL) { - kfree(TO_DCN30_DWBC(pool->dwbc[i])); - pool->dwbc[i] = NULL; - } - if (pool->mcif_wb[i] != NULL) { - kfree(TO_DCN30_MMHUBBUB(pool->mcif_wb[i])); - pool->mcif_wb[i] = NULL; - } - } - - for (i = 0; i < pool->audio_count; i++) { - if (pool->audios[i]) - dce_aud_destroy(&pool->audios[i]); - } - - for (i = 0; i < pool->clk_src_count; i++) { - if (pool->clock_sources[i] != NULL) - dcn20_clock_source_destroy(&pool->clock_sources[i]); - } - - if (pool->dp_clock_source != NULL) - dcn20_clock_source_destroy(&pool->dp_clock_source); - - for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) { - if (pool->mpc_lut[i] != NULL) { - dc_3dlut_func_release(pool->mpc_lut[i]); - pool->mpc_lut[i] = NULL; - } - if (pool->mpc_shaper[i] != NULL) { - dc_transfer_func_release(pool->mpc_shaper[i]); - pool->mpc_shaper[i] = NULL; - } - } - - for (i = 0; i < pool->pipe_count; i++) { - if (pool->multiple_abms[i] != NULL) - dce_abm_destroy(&pool->multiple_abms[i]); - } - - if (pool->psr != NULL) - dmub_psr_destroy(&pool->psr); - - if (pool->dccg != NULL) - dcn_dccg_destroy(&pool->dccg); - - if (pool->oem_device != NULL) { - struct dc *dc = pool->oem_device->ctx->dc; - - dc->link_srv->destroy_ddc_service(&pool->oem_device); - } -} - -static void dcn302_destroy_resource_pool(struct resource_pool **pool) -{ - dcn302_resource_destruct(*pool); - kfree(*pool); - *pool = NULL; -} - -void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) -{ - DC_FP_START(); - dcn302_fpu_update_bw_bounding_box(dc, bw_params); - DC_FP_END(); -} - -static void dcn302_get_panel_config_defaults(struct dc_panel_config *panel_config) -{ - *panel_config = panel_config_defaults; -} - -static struct resource_funcs dcn302_res_pool_funcs = { - .destroy = dcn302_destroy_resource_pool, - .link_enc_create = dcn302_link_encoder_create, - .panel_cntl_create = dcn302_panel_cntl_create, - .validate_bandwidth = dcn30_validate_bandwidth, - .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, - .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, - .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, - .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, - .release_pipe = dcn20_release_pipe, - .add_stream_to_ctx = dcn30_add_stream_to_ctx, - .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, - .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, - .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, - .set_mcif_arb_params = dcn30_set_mcif_arb_params, - .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, - .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, - .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, - .update_bw_bounding_box = dcn302_update_bw_bounding_box, - .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, - .get_panel_config_defaults = dcn302_get_panel_config_defaults, -}; - -static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn20_get_dcc_compression_cap -}; - -static const struct bios_registers bios_regs = { - NBIO_SR(BIOS_SCRATCH_3), - NBIO_SR(BIOS_SCRATCH_6) -}; - -static const struct dccg_registers dccg_regs = { - DCCG_REG_LIST_DCN3_02() -}; - -static const struct dccg_shift dccg_shift = { - DCCG_MASK_SH_LIST_DCN3_02(__SHIFT) -}; - -static const struct dccg_mask dccg_mask = { - DCCG_MASK_SH_LIST_DCN3_02(_MASK) -}; - -#define abm_regs(id)\ - [id] = { ABM_DCN302_REG_LIST(id) } - -static const struct dce_abm_registers abm_regs[] = { - abm_regs(0), - abm_regs(1), - abm_regs(2), - abm_regs(3), - abm_regs(4) -}; - -static const struct dce_abm_shift abm_shift = { - ABM_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dce_abm_mask abm_mask = { - ABM_MASK_SH_LIST_DCN30(_MASK) -}; - -static bool dcn302_resource_construct( - uint8_t num_virtual_links, - struct dc *dc, - struct resource_pool *pool) -{ - int i; - struct dc_context *ctx = dc->ctx; - struct irq_service_init_data init_data; - struct ddc_service_init_data ddc_init_data = {0}; - - ctx->dc_bios->regs = &bios_regs; - - pool->res_cap = &res_cap_dcn302; - - pool->funcs = &dcn302_res_pool_funcs; - - /************************************************* - * Resource + asic cap harcoding * - *************************************************/ - pool->underlay_pipe_index = NO_UNDERLAY_PIPE; - pool->pipe_count = pool->res_cap->num_timing_generator; - pool->mpcc_count = pool->res_cap->num_timing_generator; - dc->caps.max_downscale_ratio = 600; - dc->caps.i2c_speed_in_khz = 100; - dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a applied by derfault*/ - dc->caps.max_cursor_size = 256; - dc->caps.min_horizontal_blanking_period = 80; - dc->caps.dmdata_alloc_size = 2048; - dc->caps.mall_size_per_mem_channel = 4; - /* total size = mall per channel * num channels * 1024 * 1024 */ - dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576; - dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; - dc->caps.max_slave_planes = 2; - dc->caps.max_slave_yuv_planes = 2; - dc->caps.max_slave_rgb_planes = 2; - dc->caps.post_blend_color_processing = true; - dc->caps.force_dp_tps4_for_cp2520 = true; - dc->caps.extended_aux_timeout_support = true; - dc->caps.dmcub_support = true; - dc->caps.max_v_total = (1 << 15) - 1; - - /* Color pipeline capabilities */ - dc->caps.color.dpp.dcn_arch = 1; - dc->caps.color.dpp.input_lut_shared = 0; - dc->caps.color.dpp.icsc = 1; - dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr - dc->caps.color.dpp.dgam_rom_caps.srgb = 1; - dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; - dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; - dc->caps.color.dpp.dgam_rom_caps.pq = 1; - dc->caps.color.dpp.dgam_rom_caps.hlg = 1; - dc->caps.color.dpp.post_csc = 1; - dc->caps.color.dpp.gamma_corr = 1; - dc->caps.color.dpp.dgam_rom_for_yuv = 0; - - dc->caps.color.dpp.hw_3d_lut = 1; - dc->caps.color.dpp.ogam_ram = 1; - // no OGAM ROM on DCN3 - dc->caps.color.dpp.ogam_rom_caps.srgb = 0; - dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; - dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.dpp.ogam_rom_caps.pq = 0; - dc->caps.color.dpp.ogam_rom_caps.hlg = 0; - dc->caps.color.dpp.ocsc = 0; - - dc->caps.color.mpc.gamut_remap = 1; - dc->caps.color.mpc.num_3dluts = pool->res_cap->num_mpc_3dlut; //3 - dc->caps.color.mpc.ogam_ram = 1; - dc->caps.color.mpc.ogam_rom_caps.srgb = 0; - dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; - dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.mpc.ogam_rom_caps.pq = 0; - dc->caps.color.mpc.ogam_rom_caps.hlg = 0; - dc->caps.color.mpc.ocsc = 1; - - dc->caps.dp_hdmi21_pcon_support = true; - - /* read VBIOS LTTPR caps */ - if (ctx->dc_bios->funcs->get_lttpr_caps) { - enum bp_result bp_query_result; - uint8_t is_vbios_lttpr_enable = 0; - - bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); - dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; - } - - if (ctx->dc_bios->funcs->get_lttpr_interop) { - enum bp_result bp_query_result; - uint8_t is_vbios_interop_enabled = 0; - - bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, - &is_vbios_interop_enabled); - dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; - } - - if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) - dc->debug = debug_defaults_drv; - - // Init the vm_helper - if (dc->vm_helper) - vm_helper_init(dc->vm_helper, 16); - - /************************************************* - * Create resources * - *************************************************/ - - /* Clock Sources for Pixel Clock*/ - pool->clock_sources[DCN302_CLK_SRC_PLL0] = - dcn302_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL0, - &clk_src_regs[0], false); - pool->clock_sources[DCN302_CLK_SRC_PLL1] = - dcn302_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL1, - &clk_src_regs[1], false); - pool->clock_sources[DCN302_CLK_SRC_PLL2] = - dcn302_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL2, - &clk_src_regs[2], false); - pool->clock_sources[DCN302_CLK_SRC_PLL3] = - dcn302_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL3, - &clk_src_regs[3], false); - pool->clock_sources[DCN302_CLK_SRC_PLL4] = - dcn302_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL4, - &clk_src_regs[4], false); - - pool->clk_src_count = DCN302_CLK_SRC_TOTAL; - - /* todo: not reuse phy_pll registers */ - pool->dp_clock_source = - dcn302_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_ID_DP_DTO, - &clk_src_regs[0], true); - - for (i = 0; i < pool->clk_src_count; i++) { - if (pool->clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - - /* DCCG */ - pool->dccg = dccg30_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); - if (pool->dccg == NULL) { - dm_error("DC: failed to create dccg!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* PP Lib and SMU interfaces */ - init_soc_bounding_box(dc, pool); - - /* DML */ - dml_init_instance(&dc->dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30); - - /* IRQ */ - init_data.ctx = dc->ctx; - pool->irqs = dal_irq_service_dcn302_create(&init_data); - if (!pool->irqs) - goto create_fail; - - /* HUBBUB */ - pool->hubbub = dcn302_hubbub_create(ctx); - if (pool->hubbub == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create hubbub!\n"); - goto create_fail; - } - - /* HUBPs, DPPs, OPPs and TGs */ - for (i = 0; i < pool->pipe_count; i++) { - pool->hubps[i] = dcn302_hubp_create(ctx, i); - if (pool->hubps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create hubps!\n"); - goto create_fail; - } - - pool->dpps[i] = dcn302_dpp_create(ctx, i); - if (pool->dpps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create dpps!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->res_cap->num_opp; i++) { - pool->opps[i] = dcn302_opp_create(ctx, i); - if (pool->opps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create output pixel processor!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->res_cap->num_timing_generator; i++) { - pool->timing_generators[i] = dcn302_timing_generator_create(ctx, i); - if (pool->timing_generators[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto create_fail; - } - } - pool->timing_generator_count = i; - - /* PSR */ - pool->psr = dmub_psr_create(ctx); - if (pool->psr == NULL) { - dm_error("DC: failed to create psr!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* ABMs */ - for (i = 0; i < pool->res_cap->num_timing_generator; i++) { - pool->multiple_abms[i] = dmub_abm_create(ctx, &abm_regs[i], &abm_shift, &abm_mask); - if (pool->multiple_abms[i] == NULL) { - dm_error("DC: failed to create abm for pipe %d!\n", i); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - - /* MPC and DSC */ - pool->mpc = dcn302_mpc_create(ctx, pool->mpcc_count, pool->res_cap->num_mpc_3dlut); - if (pool->mpc == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mpc!\n"); - goto create_fail; - } - - for (i = 0; i < pool->res_cap->num_dsc; i++) { - pool->dscs[i] = dcn302_dsc_create(ctx, i); - if (pool->dscs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create display stream compressor %d!\n", i); - goto create_fail; - } - } - - /* DWB and MMHUBBUB */ - if (!dcn302_dwbc_create(ctx, pool)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create dwbc!\n"); - goto create_fail; - } - - if (!dcn302_mmhubbub_create(ctx, pool)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mcif_wb!\n"); - goto create_fail; - } - - /* AUX and I2C */ - for (i = 0; i < pool->res_cap->num_ddc; i++) { - pool->engines[i] = dcn302_aux_engine_create(ctx, i); - if (pool->engines[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC:failed to create aux engine!!\n"); - goto create_fail; - } - pool->hw_i2cs[i] = dcn302_i2c_hw_create(ctx, i); - if (pool->hw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC:failed to create hw i2c!!\n"); - goto create_fail; - } - pool->sw_i2cs[i] = NULL; - } - - /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ - if (!resource_construct(num_virtual_links, dc, pool, - &res_create_funcs)) - goto create_fail; - - /* HW Sequencer and Plane caps */ - dcn302_hw_sequencer_construct(dc); - - dc->caps.max_planes = pool->pipe_count; - - for (i = 0; i < dc->caps.max_planes; ++i) - dc->caps.planes[i] = plane_cap; - - dc->cap_funcs = cap_funcs; - - if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { - ddc_init_data.ctx = dc->ctx; - ddc_init_data.link = NULL; - ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; - ddc_init_data.id.enum_id = 0; - ddc_init_data.id.type = OBJECT_TYPE_GENERIC; - pool->oem_device = dc->link_srv->create_ddc_service(&ddc_init_data); - } else { - pool->oem_device = NULL; - } - - return true; - -create_fail: - - dcn302_resource_destruct(pool); - - return false; -} - -struct resource_pool *dcn302_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc) -{ - struct resource_pool *pool = kzalloc(sizeof(struct resource_pool), GFP_KERNEL); - - if (!pool) - return NULL; - - if (dcn302_resource_construct(init_data->num_virtual_links, dc, pool)) - return pool; - - BREAK_TO_DEBUGGER(); - kfree(pool); - return NULL; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h deleted file mode 100644 index 9f24e73b92..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef _DCN302_RESOURCE_H_ -#define _DCN302_RESOURCE_H_ - -#include "core_types.h" - -extern struct _vcs_dpi_ip_params_st dcn3_02_ip; -extern struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc; - -struct resource_pool *dcn302_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc); - -void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); - -#endif /* _DCN302_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/Makefile b/drivers/gpu/drm/amd/display/dc/dcn303/Makefile index d7b3ad780e..a954e316ac 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn303/Makefile @@ -6,7 +6,7 @@ # # Makefile for dcn303. -DCN3_03 = dcn303_init.o dcn303_resource.o +DCN3_03 = dcn303_init.o AMD_DAL_DCN3_03 = $(addprefix $(AMDDALPATH)/dc/dcn303/,$(DCN3_03)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c deleted file mode 100644 index edb4d68b81..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c +++ /dev/null @@ -1,40 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright (C) 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - */ - -#include "dcn303/dcn303_hwseq.h" -#include "dcn30/dcn30_init.h" -#include "dc.h" - -#include "dcn303_init.h" - -void dcn303_hw_sequencer_construct(struct dc *dc) -{ - dcn30_hw_sequencer_construct(dc); - - dc->hwseq->funcs.dpp_pg_control = dcn303_dpp_pg_control; - dc->hwseq->funcs.hubp_pg_control = dcn303_hubp_pg_control; - dc->hwseq->funcs.dsc_pg_control = dcn303_dsc_pg_control; - dc->hwseq->funcs.enable_power_gating_plane = dcn303_enable_power_gating_plane; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h deleted file mode 100644 index 4949981126..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h +++ /dev/null @@ -1,33 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright (C) 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - */ - -#ifndef __DC_DCN303_INIT_H__ -#define __DC_DCN303_INIT_H__ - -struct dc; - -void dcn303_hw_sequencer_construct(struct dc *dc); - -#endif /* __DC_DCN303_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c deleted file mode 100644 index 49cb7fde41..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ /dev/null @@ -1,1448 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright (C) 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - */ - -#include "dcn303_init.h" -#include "dcn303_resource.h" -#include "dcn303_dccg.h" -#include "irq/dcn303/irq_service_dcn303.h" - -#include "dcn30/dcn30_dio_link_encoder.h" -#include "dcn30/dcn30_dio_stream_encoder.h" -#include "dcn30/dcn30_dpp.h" -#include "dcn30/dcn30_dwb.h" -#include "dcn30/dcn30_hubbub.h" -#include "dcn30/dcn30_hubp.h" -#include "dcn30/dcn30_mmhubbub.h" -#include "dcn30/dcn30_mpc.h" -#include "dcn30/dcn30_opp.h" -#include "dcn30/dcn30_optc.h" -#include "dcn30/dcn30_resource.h" - -#include "dcn20/dcn20_dsc.h" -#include "dcn20/dcn20_resource.h" - -#include "dml/dcn30/dcn30_fpu.h" - -#include "dcn10/dcn10_resource.h" - -#include "link.h" - -#include "dce/dce_abm.h" -#include "dce/dce_audio.h" -#include "dce/dce_aux.h" -#include "dce/dce_clock_source.h" -#include "dce/dce_hwseq.h" -#include "dce/dce_i2c_hw.h" -#include "dce/dce_panel_cntl.h" -#include "dce/dmub_abm.h" -#include "dce/dmub_psr.h" -#include "clk_mgr.h" - -#include "hw_sequencer_private.h" -#include "reg_helper.h" -#include "resource.h" -#include "vm_helper.h" - -#include "sienna_cichlid_ip_offset.h" -#include "dcn/dcn_3_0_3_offset.h" -#include "dcn/dcn_3_0_3_sh_mask.h" -#include "dpcs/dpcs_3_0_3_offset.h" -#include "dpcs/dpcs_3_0_3_sh_mask.h" -#include "nbio/nbio_2_3_offset.h" - -#include "dml/dcn303/dcn303_fpu.h" - -#define DC_LOGGER \ - dc->ctx->logger -#define DC_LOGGER_INIT(logger) - - -static const struct dc_debug_options debug_defaults_drv = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = false, - .clock_trace = true, - .disable_pplib_clock_request = true, - .pipe_split_policy = MPC_SPLIT_AVOID, - .force_single_disp_pipe_split = false, - .disable_dcc = DCC_ENABLE, - .vsr_support = true, - .performance_trace = false, - .max_downscale_src_width = 7680,/*upto 8K*/ - .disable_pplib_wm_range = false, - .scl_reset_length10 = true, - .sanity_checks = false, - .underflow_assert_delay_us = 0xFFFFFFFF, - .dwb_fi_phase = -1, // -1 = disable, - .dmub_command_table = true, - .exit_idle_opt_for_cursor_updates = true, - .disable_idle_power_optimizations = false, - .using_dml2 = false, -}; - -static const struct dc_panel_config panel_config_defaults = { - .psr = { - .disable_psr = false, - .disallow_psrsu = false, - .disallow_replay = false, - }, -}; - -enum dcn303_clk_src_array_id { - DCN303_CLK_SRC_PLL0, - DCN303_CLK_SRC_PLL1, - DCN303_CLK_SRC_TOTAL -}; - -static const struct resource_caps res_cap_dcn303 = { - .num_timing_generator = 2, - .num_opp = 2, - .num_video_plane = 2, - .num_audio = 2, - .num_stream_encoder = 2, - .num_dwb = 1, - .num_ddc = 2, - .num_vmid = 16, - .num_mpc_3dlut = 1, - .num_dsc = 2, -}; - -static const struct dc_plane_cap plane_cap = { - .type = DC_PLANE_TYPE_DCN_UNIVERSAL, - .per_pixel_alpha = true, - .pixel_format_support = { - .argb8888 = true, - .nv12 = true, - .fp16 = true, - .p010 = true, - .ayuv = false, - }, - .max_upscale_factor = { - .argb8888 = 16000, - .nv12 = 16000, - .fp16 = 16000 - }, - .max_downscale_factor = { - .argb8888 = 600, - .nv12 = 600, - .fp16 = 600 - }, - 16, - 16 -}; - -/* NBIO */ -#define NBIO_BASE_INNER(seg) \ - NBIO_BASE__INST0_SEG ## seg - -#define NBIO_BASE(seg) \ - NBIO_BASE_INNER(seg) - -#define NBIO_SR(reg_name)\ - .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -/* DCN */ -#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg - -#define BASE(seg) BASE_INNER(seg) - -#define SR(reg_name)\ - .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name - -#define SF(reg_name, field_name, post_fix)\ - .field_name = reg_name ## __ ## field_name ## post_fix - -#define SRI(reg_name, block, id)\ - .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + mm ## block ## id ## _ ## reg_name - -#define SRI2(reg_name, block, id)\ - .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name - -#define SRII(reg_name, block, id)\ - .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define DCCG_SRII(reg_name, block, id)\ - .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -#define VUPDATE_SRII(reg_name, block, id)\ - .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ - mm ## reg_name ## _ ## block ## id - -#define SRII_DWB(reg_name, temp_name, block, id)\ - .reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## temp_name - -#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ - .field_name = reg_name ## __ ## field_name ## post_fix - -#define SRII_MPC_RMU(reg_name, block, id)\ - .RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name - -static const struct dcn_hubbub_registers hubbub_reg = { - HUBBUB_REG_LIST_DCN30(0) -}; - -static const struct dcn_hubbub_shift hubbub_shift = { - HUBBUB_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn_hubbub_mask hubbub_mask = { - HUBBUB_MASK_SH_LIST_DCN30(_MASK) -}; - -#define vmid_regs(id)\ - [id] = { DCN20_VMID_REG_LIST(id) } - -static const struct dcn_vmid_registers vmid_regs[] = { - vmid_regs(0), - vmid_regs(1), - vmid_regs(2), - vmid_regs(3), - vmid_regs(4), - vmid_regs(5), - vmid_regs(6), - vmid_regs(7), - vmid_regs(8), - vmid_regs(9), - vmid_regs(10), - vmid_regs(11), - vmid_regs(12), - vmid_regs(13), - vmid_regs(14), - vmid_regs(15) -}; - -static const struct dcn20_vmid_shift vmid_shifts = { - DCN20_VMID_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn20_vmid_mask vmid_masks = { - DCN20_VMID_MASK_SH_LIST(_MASK) -}; - -static struct hubbub *dcn303_hubbub_create(struct dc_context *ctx) -{ - int i; - - struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL); - - if (!hubbub3) - return NULL; - - hubbub3_construct(hubbub3, ctx, &hubbub_reg, &hubbub_shift, &hubbub_mask); - - for (i = 0; i < res_cap_dcn303.num_vmid; i++) { - struct dcn20_vmid *vmid = &hubbub3->vmid[i]; - - vmid->ctx = ctx; - - vmid->regs = &vmid_regs[i]; - vmid->shifts = &vmid_shifts; - vmid->masks = &vmid_masks; - } - - return &hubbub3->base; -} - -#define vpg_regs(id)\ - [id] = { VPG_DCN3_REG_LIST(id) } - -static const struct dcn30_vpg_registers vpg_regs[] = { - vpg_regs(0), - vpg_regs(1), - vpg_regs(2) -}; - -static const struct dcn30_vpg_shift vpg_shift = { - DCN3_VPG_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn30_vpg_mask vpg_mask = { - DCN3_VPG_MASK_SH_LIST(_MASK) -}; - -static struct vpg *dcn303_vpg_create(struct dc_context *ctx, uint32_t inst) -{ - struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL); - - if (!vpg3) - return NULL; - - vpg3_construct(vpg3, ctx, inst, &vpg_regs[inst], &vpg_shift, &vpg_mask); - - return &vpg3->base; -} - -#define afmt_regs(id)\ - [id] = { AFMT_DCN3_REG_LIST(id) } - -static const struct dcn30_afmt_registers afmt_regs[] = { - afmt_regs(0), - afmt_regs(1), - afmt_regs(2) -}; - -static const struct dcn30_afmt_shift afmt_shift = { - DCN3_AFMT_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn30_afmt_mask afmt_mask = { - DCN3_AFMT_MASK_SH_LIST(_MASK) -}; - -static struct afmt *dcn303_afmt_create(struct dc_context *ctx, uint32_t inst) -{ - struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL); - - if (!afmt3) - return NULL; - - afmt3_construct(afmt3, ctx, inst, &afmt_regs[inst], &afmt_shift, &afmt_mask); - - return &afmt3->base; -} - -#define audio_regs(id)\ - [id] = { AUD_COMMON_REG_LIST(id) } - -static const struct dce_audio_registers audio_regs[] = { - audio_regs(0), - audio_regs(1), - audio_regs(2), - audio_regs(3), - audio_regs(4), - audio_regs(5), - audio_regs(6) -}; - -#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ - AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) - -static const struct dce_audio_shift audio_shift = { - DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_audio_mask audio_mask = { - DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) -}; - -static struct audio *dcn303_create_audio(struct dc_context *ctx, unsigned int inst) -{ - return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); -} - -#define stream_enc_regs(id)\ - [id] = { SE_DCN3_REG_LIST(id) } - -static const struct dcn10_stream_enc_registers stream_enc_regs[] = { - stream_enc_regs(0), - stream_enc_regs(1) -}; - -static const struct dcn10_stream_encoder_shift se_shift = { - SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn10_stream_encoder_mask se_mask = { - SE_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -static struct stream_encoder *dcn303_stream_encoder_create(enum engine_id eng_id, struct dc_context *ctx) -{ - struct dcn10_stream_encoder *enc1; - struct vpg *vpg; - struct afmt *afmt; - int vpg_inst; - int afmt_inst; - - /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ - if (eng_id <= ENGINE_ID_DIGB) { - vpg_inst = eng_id; - afmt_inst = eng_id; - } else - return NULL; - - enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); - vpg = dcn303_vpg_create(ctx, vpg_inst); - afmt = dcn303_afmt_create(ctx, afmt_inst); - - if (!enc1 || !vpg || !afmt) { - kfree(enc1); - kfree(vpg); - kfree(afmt); - return NULL; - } - - dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, &stream_enc_regs[eng_id], - &se_shift, &se_mask); - - return &enc1->base; -} - -#define clk_src_regs(index, pllid)\ - [index] = { CS_COMMON_REG_LIST_DCN3_03(index, pllid) } - -static const struct dce110_clk_src_regs clk_src_regs[] = { - clk_src_regs(0, A), - clk_src_regs(1, B) -}; - -static const struct dce110_clk_src_shift cs_shift = { - CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) -}; - -static const struct dce110_clk_src_mask cs_mask = { - CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) -}; - -static struct clock_source *dcn303_clock_source_create(struct dc_context *ctx, struct dc_bios *bios, - enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) -{ - struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); - - if (!clk_src) - return NULL; - - if (dcn3_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { - clk_src->base.dp_clk_src = dp_clk_src; - return &clk_src->base; - } - - kfree(clk_src); - BREAK_TO_DEBUGGER(); - return NULL; -} - -static const struct dce_hwseq_registers hwseq_reg = { - HWSEQ_DCN303_REG_LIST() -}; - -static const struct dce_hwseq_shift hwseq_shift = { - HWSEQ_DCN303_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_hwseq_mask hwseq_mask = { - HWSEQ_DCN303_MASK_SH_LIST(_MASK) -}; - -static struct dce_hwseq *dcn303_hwseq_create(struct dc_context *ctx) -{ - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); - - if (hws) { - hws->ctx = ctx; - hws->regs = &hwseq_reg; - hws->shifts = &hwseq_shift; - hws->masks = &hwseq_mask; - } - return hws; -} - -#define hubp_regs(id)\ - [id] = { HUBP_REG_LIST_DCN30(id) } - -static const struct dcn_hubp2_registers hubp_regs[] = { - hubp_regs(0), - hubp_regs(1) -}; - -static const struct dcn_hubp2_shift hubp_shift = { - HUBP_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn_hubp2_mask hubp_mask = { - HUBP_MASK_SH_LIST_DCN30(_MASK) -}; - -static struct hubp *dcn303_hubp_create(struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_hubp *hubp2 = kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); - - if (!hubp2) - return NULL; - - if (hubp3_construct(hubp2, ctx, inst, &hubp_regs[inst], &hubp_shift, &hubp_mask)) - return &hubp2->base; - - BREAK_TO_DEBUGGER(); - kfree(hubp2); - return NULL; -} - -#define dpp_regs(id)\ - [id] = { DPP_REG_LIST_DCN30(id) } - -static const struct dcn3_dpp_registers dpp_regs[] = { - dpp_regs(0), - dpp_regs(1) -}; - -static const struct dcn3_dpp_shift tf_shift = { - DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) -}; - -static const struct dcn3_dpp_mask tf_mask = { - DPP_REG_LIST_SH_MASK_DCN30(_MASK) -}; - -static struct dpp *dcn303_dpp_create(struct dc_context *ctx, uint32_t inst) -{ - struct dcn3_dpp *dpp = kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); - - if (!dpp) - return NULL; - - if (dpp3_construct(dpp, ctx, inst, &dpp_regs[inst], &tf_shift, &tf_mask)) - return &dpp->base; - - BREAK_TO_DEBUGGER(); - kfree(dpp); - return NULL; -} - -#define opp_regs(id)\ - [id] = { OPP_REG_LIST_DCN30(id) } - -static const struct dcn20_opp_registers opp_regs[] = { - opp_regs(0), - opp_regs(1) -}; - -static const struct dcn20_opp_shift opp_shift = { - OPP_MASK_SH_LIST_DCN20(__SHIFT) -}; - -static const struct dcn20_opp_mask opp_mask = { - OPP_MASK_SH_LIST_DCN20(_MASK) -}; - -static struct output_pixel_processor *dcn303_opp_create(struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_opp *opp = kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); - - if (!opp) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dcn20_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); - return &opp->base; -} - -#define optc_regs(id)\ - [id] = { OPTC_COMMON_REG_LIST_DCN3_0(id) } - -static const struct dcn_optc_registers optc_regs[] = { - optc_regs(0), - optc_regs(1) -}; - -static const struct dcn_optc_shift optc_shift = { - OPTC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn_optc_mask optc_mask = { - OPTC_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -static struct timing_generator *dcn303_timing_generator_create(struct dc_context *ctx, uint32_t instance) -{ - struct optc *tgn10 = kzalloc(sizeof(struct optc), GFP_KERNEL); - - if (!tgn10) - return NULL; - - tgn10->base.inst = instance; - tgn10->base.ctx = ctx; - - tgn10->tg_regs = &optc_regs[instance]; - tgn10->tg_shift = &optc_shift; - tgn10->tg_mask = &optc_mask; - - dcn30_timing_generator_init(tgn10); - - return &tgn10->base; -} - -static const struct dcn30_mpc_registers mpc_regs = { - MPC_REG_LIST_DCN3_0(0), - MPC_REG_LIST_DCN3_0(1), - MPC_OUT_MUX_REG_LIST_DCN3_0(0), - MPC_OUT_MUX_REG_LIST_DCN3_0(1), - MPC_RMU_GLOBAL_REG_LIST_DCN3AG, - MPC_RMU_REG_LIST_DCN3AG(0), - MPC_DWB_MUX_REG_LIST_DCN3_0(0), -}; - -static const struct dcn30_mpc_shift mpc_shift = { - MPC_COMMON_MASK_SH_LIST_DCN303(__SHIFT) -}; - -static const struct dcn30_mpc_mask mpc_mask = { - MPC_COMMON_MASK_SH_LIST_DCN303(_MASK) -}; - -static struct mpc *dcn303_mpc_create(struct dc_context *ctx, int num_mpcc, int num_rmu) -{ - struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), GFP_KERNEL); - - if (!mpc30) - return NULL; - - dcn30_mpc_construct(mpc30, ctx, &mpc_regs, &mpc_shift, &mpc_mask, num_mpcc, num_rmu); - - return &mpc30->base; -} - -#define dsc_regsDCN20(id)\ -[id] = { DSC_REG_LIST_DCN20(id) } - -static const struct dcn20_dsc_registers dsc_regs[] = { - dsc_regsDCN20(0), - dsc_regsDCN20(1) -}; - -static const struct dcn20_dsc_shift dsc_shift = { - DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) -}; - -static const struct dcn20_dsc_mask dsc_mask = { - DSC_REG_LIST_SH_MASK_DCN20(_MASK) -}; - -static struct display_stream_compressor *dcn303_dsc_create(struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_dsc *dsc = kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); - - if (!dsc) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); - return &dsc->base; -} - -#define dwbc_regs_dcn3(id)\ -[id] = { DWBC_COMMON_REG_LIST_DCN30(id) } - -static const struct dcn30_dwbc_registers dwbc30_regs[] = { - dwbc_regs_dcn3(0) -}; - -static const struct dcn30_dwbc_shift dwbc30_shift = { - DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_dwbc_mask dwbc30_mask = { - DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -static bool dcn303_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t pipe_count = pool->res_cap->num_dwb; - - for (i = 0; i < pipe_count; i++) { - struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), GFP_KERNEL); - - if (!dwbc30) { - dm_error("DC: failed to create dwbc30!\n"); - return false; - } - - dcn30_dwbc_construct(dwbc30, ctx, &dwbc30_regs[i], &dwbc30_shift, &dwbc30_mask, i); - - pool->dwbc[i] = &dwbc30->base; - } - return true; -} - -#define mcif_wb_regs_dcn3(id)\ -[id] = { MCIF_WB_COMMON_REG_LIST_DCN30(id) } - -static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { - mcif_wb_regs_dcn3(0) -}; - -static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -static bool dcn303_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t pipe_count = pool->res_cap->num_dwb; - - for (i = 0; i < pipe_count; i++) { - struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), GFP_KERNEL); - - if (!mcif_wb30) { - dm_error("DC: failed to create mcif_wb30!\n"); - return false; - } - - dcn30_mmhubbub_construct(mcif_wb30, ctx, &mcif_wb30_regs[i], &mcif_wb30_shift, &mcif_wb30_mask, i); - - pool->mcif_wb[i] = &mcif_wb30->base; - } - return true; -} - -#define aux_engine_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST0(id), \ - .AUXN_IMPCAL = 0, \ - .AUXP_IMPCAL = 0, \ - .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ -} - -static const struct dce110_aux_registers aux_engine_regs[] = { - aux_engine_regs(0), - aux_engine_regs(1) -}; - -static const struct dce110_aux_registers_shift aux_shift = { - DCN_AUX_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce110_aux_registers_mask aux_mask = { - DCN_AUX_MASK_SH_LIST(_MASK) -}; - -static struct dce_aux *dcn303_aux_engine_create(struct dc_context *ctx, uint32_t inst) -{ - struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); - - if (!aux_engine) - return NULL; - - dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst], &aux_mask, &aux_shift, ctx->dc->caps.extended_aux_timeout_support); - - return &aux_engine->base; -} - -#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } - -static const struct dce_i2c_registers i2c_hw_regs[] = { - i2c_inst_regs(1), - i2c_inst_regs(2) -}; - -static const struct dce_i2c_shift i2c_shifts = { - I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT) -}; - -static const struct dce_i2c_mask i2c_masks = { - I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) -}; - -static struct dce_i2c_hw *dcn303_i2c_hw_create(struct dc_context *ctx, uint32_t inst) -{ - struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); - - if (!dce_i2c_hw) - return NULL; - - dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); - - return dce_i2c_hw; -} - -static const struct encoder_feature_support link_enc_feature = { - .max_hdmi_deep_color = COLOR_DEPTH_121212, - .max_hdmi_pixel_clock = 600000, - .hdmi_ycbcr420_supported = true, - .dp_ycbcr420_supported = true, - .fec_supported = true, - .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_HBR3_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true, - .flags.bits.IS_TPS4_CAPABLE = true -}; - -#define link_regs(id, phyid)\ - [id] = {\ - LE_DCN3_REG_LIST(id), \ - UNIPHY_DCN2_REG_LIST(phyid), \ - SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ - } - -static const struct dcn10_link_enc_registers link_enc_regs[] = { - link_regs(0, A), - link_regs(1, B) -}; - -static const struct dcn10_link_enc_shift le_shift = { - LINK_ENCODER_MASK_SH_LIST_DCN30(__SHIFT), - DPCS_DCN2_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn10_link_enc_mask le_mask = { - LINK_ENCODER_MASK_SH_LIST_DCN30(_MASK), - DPCS_DCN2_MASK_SH_LIST(_MASK) -}; - -#define aux_regs(id)\ - [id] = { DCN2_AUX_REG_LIST(id) } - -static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { - aux_regs(0), - aux_regs(1) -}; - -#define hpd_regs(id)\ - [id] = { HPD_REG_LIST(id) } - -static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { - hpd_regs(0), - hpd_regs(1) -}; - -static struct link_encoder *dcn303_link_encoder_create( - struct dc_context *ctx, - const struct encoder_init_data *enc_init_data) -{ - struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - - if (!enc20) - return NULL; - - dcn30_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], - &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, &le_mask); - - return &enc20->enc10.base; -} - -static const struct dce_panel_cntl_registers panel_cntl_regs[] = { - { DCN_PANEL_CNTL_REG_LIST() } -}; - -static const struct dce_panel_cntl_shift panel_cntl_shift = { - DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_panel_cntl_mask panel_cntl_mask = { - DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) -}; - -static struct panel_cntl *dcn303_panel_cntl_create(const struct panel_cntl_init_data *init_data) -{ - struct dce_panel_cntl *panel_cntl = kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); - - if (!panel_cntl) - return NULL; - - dce_panel_cntl_construct(panel_cntl, init_data, &panel_cntl_regs[init_data->inst], - &panel_cntl_shift, &panel_cntl_mask); - - return &panel_cntl->base; -} - -static void read_dce_straps(struct dc_context *ctx, struct resource_straps *straps) -{ - generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), - FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); -} - -static const struct resource_create_funcs res_create_funcs = { - .read_dce_straps = read_dce_straps, - .create_audio = dcn303_create_audio, - .create_stream_encoder = dcn303_stream_encoder_create, - .create_hwseq = dcn303_hwseq_create, -}; - -static bool is_soc_bounding_box_valid(struct dc *dc) -{ - uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; - - if (ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev)) - return true; - - return false; -} - -static bool init_soc_bounding_box(struct dc *dc, struct resource_pool *pool) -{ - struct _vcs_dpi_soc_bounding_box_st *loaded_bb = &dcn3_03_soc; - struct _vcs_dpi_ip_params_st *loaded_ip = &dcn3_03_ip; - - DC_LOGGER_INIT(dc->ctx->logger); - - if (!is_soc_bounding_box_valid(dc)) { - DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__); - return false; - } - - loaded_ip->max_num_otg = pool->pipe_count; - loaded_ip->max_num_dpp = pool->pipe_count; - loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk; - DC_FP_START(); - dcn20_patch_bounding_box(dc, loaded_bb); - DC_FP_END(); - - if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { - struct bp_soc_bb_info bb_info = { 0 }; - - if (dc->ctx->dc_bios->funcs->get_soc_bb_info( - dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { - DC_FP_START(); - dcn303_fpu_init_soc_bounding_box(bb_info); - DC_FP_END(); - } - } - - return true; -} - -static void dcn303_resource_destruct(struct resource_pool *pool) -{ - unsigned int i; - - for (i = 0; i < pool->stream_enc_count; i++) { - if (pool->stream_enc[i] != NULL) { - if (pool->stream_enc[i]->vpg != NULL) { - kfree(DCN30_VPG_FROM_VPG(pool->stream_enc[i]->vpg)); - pool->stream_enc[i]->vpg = NULL; - } - if (pool->stream_enc[i]->afmt != NULL) { - kfree(DCN30_AFMT_FROM_AFMT(pool->stream_enc[i]->afmt)); - pool->stream_enc[i]->afmt = NULL; - } - kfree(DCN10STRENC_FROM_STRENC(pool->stream_enc[i])); - pool->stream_enc[i] = NULL; - } - } - - for (i = 0; i < pool->res_cap->num_dsc; i++) { - if (pool->dscs[i] != NULL) - dcn20_dsc_destroy(&pool->dscs[i]); - } - - if (pool->mpc != NULL) { - kfree(TO_DCN20_MPC(pool->mpc)); - pool->mpc = NULL; - } - - if (pool->hubbub != NULL) { - kfree(pool->hubbub); - pool->hubbub = NULL; - } - - for (i = 0; i < pool->pipe_count; i++) { - if (pool->dpps[i] != NULL) { - kfree(TO_DCN20_DPP(pool->dpps[i])); - pool->dpps[i] = NULL; - } - - if (pool->hubps[i] != NULL) { - kfree(TO_DCN20_HUBP(pool->hubps[i])); - pool->hubps[i] = NULL; - } - - if (pool->irqs != NULL) - dal_irq_service_destroy(&pool->irqs); - } - - for (i = 0; i < pool->res_cap->num_ddc; i++) { - if (pool->engines[i] != NULL) - dce110_engine_destroy(&pool->engines[i]); - if (pool->hw_i2cs[i] != NULL) { - kfree(pool->hw_i2cs[i]); - pool->hw_i2cs[i] = NULL; - } - if (pool->sw_i2cs[i] != NULL) { - kfree(pool->sw_i2cs[i]); - pool->sw_i2cs[i] = NULL; - } - } - - for (i = 0; i < pool->res_cap->num_opp; i++) { - if (pool->opps[i] != NULL) - pool->opps[i]->funcs->opp_destroy(&pool->opps[i]); - } - - for (i = 0; i < pool->res_cap->num_timing_generator; i++) { - if (pool->timing_generators[i] != NULL) { - kfree(DCN10TG_FROM_TG(pool->timing_generators[i])); - pool->timing_generators[i] = NULL; - } - } - - for (i = 0; i < pool->res_cap->num_dwb; i++) { - if (pool->dwbc[i] != NULL) { - kfree(TO_DCN30_DWBC(pool->dwbc[i])); - pool->dwbc[i] = NULL; - } - if (pool->mcif_wb[i] != NULL) { - kfree(TO_DCN30_MMHUBBUB(pool->mcif_wb[i])); - pool->mcif_wb[i] = NULL; - } - } - - for (i = 0; i < pool->audio_count; i++) { - if (pool->audios[i]) - dce_aud_destroy(&pool->audios[i]); - } - - for (i = 0; i < pool->clk_src_count; i++) { - if (pool->clock_sources[i] != NULL) - dcn20_clock_source_destroy(&pool->clock_sources[i]); - } - - if (pool->dp_clock_source != NULL) - dcn20_clock_source_destroy(&pool->dp_clock_source); - - for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) { - if (pool->mpc_lut[i] != NULL) { - dc_3dlut_func_release(pool->mpc_lut[i]); - pool->mpc_lut[i] = NULL; - } - if (pool->mpc_shaper[i] != NULL) { - dc_transfer_func_release(pool->mpc_shaper[i]); - pool->mpc_shaper[i] = NULL; - } - } - - for (i = 0; i < pool->pipe_count; i++) { - if (pool->multiple_abms[i] != NULL) - dce_abm_destroy(&pool->multiple_abms[i]); - } - - if (pool->psr != NULL) - dmub_psr_destroy(&pool->psr); - - if (pool->dccg != NULL) - dcn_dccg_destroy(&pool->dccg); - - if (pool->oem_device != NULL) { - struct dc *dc = pool->oem_device->ctx->dc; - - dc->link_srv->destroy_ddc_service(&pool->oem_device); - } -} - -static void dcn303_destroy_resource_pool(struct resource_pool **pool) -{ - dcn303_resource_destruct(*pool); - kfree(*pool); - *pool = NULL; -} - -static void dcn303_get_panel_config_defaults(struct dc_panel_config *panel_config) -{ - *panel_config = panel_config_defaults; -} - -void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) -{ - DC_FP_START(); - dcn303_fpu_update_bw_bounding_box(dc, bw_params); - DC_FP_END(); -} - -static struct resource_funcs dcn303_res_pool_funcs = { - .destroy = dcn303_destroy_resource_pool, - .link_enc_create = dcn303_link_encoder_create, - .panel_cntl_create = dcn303_panel_cntl_create, - .validate_bandwidth = dcn30_validate_bandwidth, - .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, - .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, - .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, - .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, - .release_pipe = dcn20_release_pipe, - .add_stream_to_ctx = dcn30_add_stream_to_ctx, - .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, - .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, - .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, - .set_mcif_arb_params = dcn30_set_mcif_arb_params, - .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, - .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, - .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, - .update_bw_bounding_box = dcn303_update_bw_bounding_box, - .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, - .get_panel_config_defaults = dcn303_get_panel_config_defaults, -}; - -static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn20_get_dcc_compression_cap -}; - -static const struct bios_registers bios_regs = { - NBIO_SR(BIOS_SCRATCH_3), - NBIO_SR(BIOS_SCRATCH_6) -}; - -static const struct dccg_registers dccg_regs = { - DCCG_REG_LIST_DCN3_03() -}; - -static const struct dccg_shift dccg_shift = { - DCCG_MASK_SH_LIST_DCN3_03(__SHIFT) -}; - -static const struct dccg_mask dccg_mask = { - DCCG_MASK_SH_LIST_DCN3_03(_MASK) -}; - -#define abm_regs(id)\ - [id] = { ABM_DCN302_REG_LIST(id) } - -static const struct dce_abm_registers abm_regs[] = { - abm_regs(0), - abm_regs(1) -}; - -static const struct dce_abm_shift abm_shift = { - ABM_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dce_abm_mask abm_mask = { - ABM_MASK_SH_LIST_DCN30(_MASK) -}; - -static bool dcn303_resource_construct( - uint8_t num_virtual_links, - struct dc *dc, - struct resource_pool *pool) -{ - int i; - struct dc_context *ctx = dc->ctx; - struct irq_service_init_data init_data; - struct ddc_service_init_data ddc_init_data; - - ctx->dc_bios->regs = &bios_regs; - - pool->res_cap = &res_cap_dcn303; - - pool->funcs = &dcn303_res_pool_funcs; - - /************************************************* - * Resource + asic cap harcoding * - *************************************************/ - pool->underlay_pipe_index = NO_UNDERLAY_PIPE; - pool->pipe_count = pool->res_cap->num_timing_generator; - pool->mpcc_count = pool->res_cap->num_timing_generator; - dc->caps.max_downscale_ratio = 600; - dc->caps.i2c_speed_in_khz = 100; - dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a applied by derfault*/ - dc->caps.max_cursor_size = 256; - dc->caps.min_horizontal_blanking_period = 80; - dc->caps.dmdata_alloc_size = 2048; - dc->caps.mall_size_per_mem_channel = 4; - /* total size = mall per channel * num channels * 1024 * 1024 */ - dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * - dc->ctx->dc_bios->vram_info.num_chans * - 1024 * 1024; - dc->caps.cursor_cache_size = - dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; - dc->caps.max_slave_planes = 1; - dc->caps.post_blend_color_processing = true; - dc->caps.force_dp_tps4_for_cp2520 = true; - dc->caps.extended_aux_timeout_support = true; - dc->caps.dmcub_support = true; - dc->caps.max_v_total = (1 << 15) - 1; - - /* Color pipeline capabilities */ - dc->caps.color.dpp.dcn_arch = 1; - dc->caps.color.dpp.input_lut_shared = 0; - dc->caps.color.dpp.icsc = 1; - dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr - dc->caps.color.dpp.dgam_rom_caps.srgb = 1; - dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; - dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; - dc->caps.color.dpp.dgam_rom_caps.pq = 1; - dc->caps.color.dpp.dgam_rom_caps.hlg = 1; - dc->caps.color.dpp.post_csc = 1; - dc->caps.color.dpp.gamma_corr = 1; - dc->caps.color.dpp.dgam_rom_for_yuv = 0; - - dc->caps.color.dpp.hw_3d_lut = 1; - dc->caps.color.dpp.ogam_ram = 1; - // no OGAM ROM on DCN3 - dc->caps.color.dpp.ogam_rom_caps.srgb = 0; - dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; - dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.dpp.ogam_rom_caps.pq = 0; - dc->caps.color.dpp.ogam_rom_caps.hlg = 0; - dc->caps.color.dpp.ocsc = 0; - - dc->caps.color.mpc.gamut_remap = 1; - dc->caps.color.mpc.num_3dluts = pool->res_cap->num_mpc_3dlut; //3 - dc->caps.color.mpc.ogam_ram = 1; - dc->caps.color.mpc.ogam_rom_caps.srgb = 0; - dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; - dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.mpc.ogam_rom_caps.pq = 0; - dc->caps.color.mpc.ogam_rom_caps.hlg = 0; - dc->caps.color.mpc.ocsc = 1; - - dc->caps.dp_hdmi21_pcon_support = true; - - dc->config.dc_mode_clk_limit_support = true; - /* read VBIOS LTTPR caps */ - if (ctx->dc_bios->funcs->get_lttpr_caps) { - enum bp_result bp_query_result; - uint8_t is_vbios_lttpr_enable = 0; - - bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); - dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; - } - - if (ctx->dc_bios->funcs->get_lttpr_interop) { - enum bp_result bp_query_result; - uint8_t is_vbios_interop_enabled = 0; - - bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled); - dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; - } - - if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) - dc->debug = debug_defaults_drv; - - // Init the vm_helper - if (dc->vm_helper) - vm_helper_init(dc->vm_helper, 16); - - /************************************************* - * Create resources * - *************************************************/ - - /* Clock Sources for Pixel Clock*/ - pool->clock_sources[DCN303_CLK_SRC_PLL0] = - dcn303_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL0, - &clk_src_regs[0], false); - pool->clock_sources[DCN303_CLK_SRC_PLL1] = - dcn303_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL1, - &clk_src_regs[1], false); - - pool->clk_src_count = DCN303_CLK_SRC_TOTAL; - - /* todo: not reuse phy_pll registers */ - pool->dp_clock_source = - dcn303_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_ID_DP_DTO, - &clk_src_regs[0], true); - - for (i = 0; i < pool->clk_src_count; i++) { - if (pool->clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - - /* DCCG */ - pool->dccg = dccg30_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); - if (pool->dccg == NULL) { - dm_error("DC: failed to create dccg!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* PP Lib and SMU interfaces */ - init_soc_bounding_box(dc, pool); - - /* DML */ - dml_init_instance(&dc->dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30); - - /* IRQ */ - init_data.ctx = dc->ctx; - pool->irqs = dal_irq_service_dcn303_create(&init_data); - if (!pool->irqs) - goto create_fail; - - /* HUBBUB */ - pool->hubbub = dcn303_hubbub_create(ctx); - if (pool->hubbub == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create hubbub!\n"); - goto create_fail; - } - - /* HUBPs, DPPs, OPPs and TGs */ - for (i = 0; i < pool->pipe_count; i++) { - pool->hubps[i] = dcn303_hubp_create(ctx, i); - if (pool->hubps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create hubps!\n"); - goto create_fail; - } - - pool->dpps[i] = dcn303_dpp_create(ctx, i); - if (pool->dpps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create dpps!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->res_cap->num_opp; i++) { - pool->opps[i] = dcn303_opp_create(ctx, i); - if (pool->opps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create output pixel processor!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->res_cap->num_timing_generator; i++) { - pool->timing_generators[i] = dcn303_timing_generator_create(ctx, i); - if (pool->timing_generators[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto create_fail; - } - } - pool->timing_generator_count = i; - - /* PSR */ - pool->psr = dmub_psr_create(ctx); - if (pool->psr == NULL) { - dm_error("DC: failed to create psr!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* ABM */ - for (i = 0; i < pool->res_cap->num_timing_generator; i++) { - pool->multiple_abms[i] = dmub_abm_create(ctx, &abm_regs[i], &abm_shift, &abm_mask); - if (pool->multiple_abms[i] == NULL) { - dm_error("DC: failed to create abm for pipe %d!\n", i); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - - /* MPC and DSC */ - pool->mpc = dcn303_mpc_create(ctx, pool->mpcc_count, pool->res_cap->num_mpc_3dlut); - if (pool->mpc == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mpc!\n"); - goto create_fail; - } - - for (i = 0; i < pool->res_cap->num_dsc; i++) { - pool->dscs[i] = dcn303_dsc_create(ctx, i); - if (pool->dscs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create display stream compressor %d!\n", i); - goto create_fail; - } - } - - /* DWB and MMHUBBUB */ - if (!dcn303_dwbc_create(ctx, pool)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create dwbc!\n"); - goto create_fail; - } - - if (!dcn303_mmhubbub_create(ctx, pool)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mcif_wb!\n"); - goto create_fail; - } - - /* AUX and I2C */ - for (i = 0; i < pool->res_cap->num_ddc; i++) { - pool->engines[i] = dcn303_aux_engine_create(ctx, i); - if (pool->engines[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC:failed to create aux engine!!\n"); - goto create_fail; - } - pool->hw_i2cs[i] = dcn303_i2c_hw_create(ctx, i); - if (pool->hw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC:failed to create hw i2c!!\n"); - goto create_fail; - } - pool->sw_i2cs[i] = NULL; - } - - /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ - if (!resource_construct(num_virtual_links, dc, pool, - &res_create_funcs)) - goto create_fail; - - /* HW Sequencer and Plane caps */ - dcn303_hw_sequencer_construct(dc); - - dc->caps.max_planes = pool->pipe_count; - - for (i = 0; i < dc->caps.max_planes; ++i) - dc->caps.planes[i] = plane_cap; - - dc->cap_funcs = cap_funcs; - - if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { - ddc_init_data.ctx = dc->ctx; - ddc_init_data.link = NULL; - ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; - ddc_init_data.id.enum_id = 0; - ddc_init_data.id.type = OBJECT_TYPE_GENERIC; - pool->oem_device = dc->link_srv->create_ddc_service(&ddc_init_data); - } else { - pool->oem_device = NULL; - } - - return true; - -create_fail: - - dcn303_resource_destruct(pool); - - return false; -} - -struct resource_pool *dcn303_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc) -{ - struct resource_pool *pool = kzalloc(sizeof(struct resource_pool), GFP_KERNEL); - - if (!pool) - return NULL; - - if (dcn303_resource_construct(init_data->num_virtual_links, dc, pool)) - return pool; - - BREAK_TO_DEBUGGER(); - kfree(pool); - return NULL; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h deleted file mode 100644 index 37cf152582..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h +++ /dev/null @@ -1,38 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright (C) 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - */ - -#ifndef _DCN303_RESOURCE_H_ -#define _DCN303_RESOURCE_H_ - -#include "core_types.h" - -extern struct _vcs_dpi_ip_params_st dcn3_03_ip; -extern struct _vcs_dpi_soc_bounding_box_st dcn3_03_soc; - -struct resource_pool *dcn303_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc); - -void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); - -#endif /* _DCN303_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile index 96e45c9efb..5d93ac16c0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile @@ -10,8 +10,8 @@ # # Makefile for dcn31. -DCN31 = dcn31_resource.o dcn31_hubbub.o dcn31_init.o dcn31_hubp.o \ - dcn31_dccg.o dcn31_optc.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \ +DCN31 = dcn31_hubbub.o dcn31_hubp.o \ + dcn31_dccg.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \ dcn31_apg.o dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o \ dcn31_afmt.o dcn31_vpg.o diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c deleted file mode 100644 index 669f524bd0..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dce110/dce110_hwseq.h" -#include "dcn10/dcn10_hwseq.h" -#include "dcn20/dcn20_hwseq.h" -#include "dcn21/dcn21_hwseq.h" -#include "dcn30/dcn30_hwseq.h" -#include "dcn301/dcn301_hwseq.h" -#include "dcn31/dcn31_hwseq.h" - -#include "dcn31_init.h" - -static const struct hw_sequencer_funcs dcn31_funcs = { - .program_gamut_remap = dcn30_program_gamut_remap, - .init_hw = dcn31_init_hw, - .power_down_on_boot = dcn10_power_down_on_boot, - .apply_ctx_to_hw = dce110_apply_ctx_to_hw, - .apply_ctx_for_surface = NULL, - .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, - .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, - .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, - .update_plane_addr = dcn20_update_plane_addr, - .update_dchub = dcn10_update_dchub, - .update_pending_status = dcn10_update_pending_status, - .program_output_csc = dcn20_program_output_csc, - .enable_accelerated_mode = dce110_enable_accelerated_mode, - .enable_timing_synchronization = dcn10_enable_timing_synchronization, - .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, - .update_info_frame = dcn31_update_info_frame, - .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, - .enable_stream = dcn20_enable_stream, - .disable_stream = dce110_disable_stream, - .unblank_stream = dcn20_unblank_stream, - .blank_stream = dce110_blank_stream, - .enable_audio_stream = dce110_enable_audio_stream, - .disable_audio_stream = dce110_disable_audio_stream, - .disable_plane = dcn20_disable_plane, - .disable_pixel_data = dcn20_disable_pixel_data, - .pipe_control_lock = dcn20_pipe_control_lock, - .interdependent_update_lock = dcn10_lock_all_pipes, - .cursor_lock = dcn10_cursor_lock, - .prepare_bandwidth = dcn20_prepare_bandwidth, - .optimize_bandwidth = dcn20_optimize_bandwidth, - .update_bandwidth = dcn20_update_bandwidth, - .set_drr = dcn10_set_drr, - .get_position = dcn10_get_position, - .set_static_screen_control = dcn30_set_static_screen_control, - .setup_stereo = dcn10_setup_stereo, - .set_avmute = dcn30_set_avmute, - .log_hw_state = dcn10_log_hw_state, - .get_hw_state = dcn10_get_hw_state, - .clear_status_bits = dcn10_clear_status_bits, - .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, - .edp_backlight_control = dce110_edp_backlight_control, - .edp_power_control = dce110_edp_power_control, - .edp_wait_for_T12 = dce110_edp_wait_for_T12, - .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, - .set_cursor_position = dcn10_set_cursor_position, - .set_cursor_attribute = dcn10_set_cursor_attribute, - .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, - .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, - .set_clock = dcn10_set_clock, - .get_clock = dcn10_get_clock, - .program_triplebuffer = dcn20_program_triple_buffer, - .enable_writeback = dcn30_enable_writeback, - .disable_writeback = dcn30_disable_writeback, - .update_writeback = dcn30_update_writeback, - .mmhubbub_warmup = dcn30_mmhubbub_warmup, - .dmdata_status_done = dcn20_dmdata_status_done, - .program_dmdata_engine = dcn30_program_dmdata_engine, - .set_dmdata_attributes = dcn20_set_dmdata_attributes, - .init_sys_ctx = dcn31_init_sys_ctx, - .init_vm_ctx = dcn20_init_vm_ctx, - .set_flip_control_gsl = dcn20_set_flip_control_gsl, - .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, - .calc_vupdate_position = dcn10_calc_vupdate_position, - .power_down = dce110_power_down, - .set_backlight_level = dcn21_set_backlight_level, - .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, - .set_pipe = dcn21_set_pipe, - .enable_lvds_link_output = dce110_enable_lvds_link_output, - .enable_tmds_link_output = dce110_enable_tmds_link_output, - .enable_dp_link_output = dce110_enable_dp_link_output, - .disable_link_output = dce110_disable_link_output, - .z10_restore = dcn31_z10_restore, - .z10_save_init = dcn31_z10_save_init, - .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, - .optimize_pwr_state = dcn21_optimize_pwr_state, - .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, - .update_visual_confirm_color = dcn10_update_visual_confirm_color, -}; - -static const struct hwseq_private_funcs dcn31_private_funcs = { - .init_pipes = dcn10_init_pipes, - .update_plane_addr = dcn20_update_plane_addr, - .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, - .update_mpcc = dcn20_update_mpcc, - .set_input_transfer_func = dcn30_set_input_transfer_func, - .set_output_transfer_func = dcn30_set_output_transfer_func, - .power_down = dce110_power_down, - .enable_display_power_gating = dcn10_dummy_display_power_gating, - .blank_pixel_data = dcn20_blank_pixel_data, - .reset_hw_ctx_wrap = dcn31_reset_hw_ctx_wrap, - .enable_stream_timing = dcn20_enable_stream_timing, - .edp_backlight_control = dce110_edp_backlight_control, - .disable_stream_gating = dcn20_disable_stream_gating, - .enable_stream_gating = dcn20_enable_stream_gating, - .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, - .did_underflow_occur = dcn10_did_underflow_occur, - .init_blank = dcn20_init_blank, - .disable_vga = dcn20_disable_vga, - .bios_golden_init = dcn10_bios_golden_init, - .plane_atomic_disable = dcn20_plane_atomic_disable, - .plane_atomic_power_down = dcn10_plane_atomic_power_down, - .enable_power_gating_plane = dcn31_enable_power_gating_plane, - .hubp_pg_control = dcn31_hubp_pg_control, - .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, - .update_odm = dcn20_update_odm, - .dsc_pg_control = dcn31_dsc_pg_control, - .set_hdr_multiplier = dcn10_set_hdr_multiplier, - .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, - .wait_for_blank_complete = dcn20_wait_for_blank_complete, - .dccg_init = dcn20_dccg_init, - .set_blend_lut = dcn30_set_blend_lut, - .set_shaper_3dlut = dcn20_set_shaper_3dlut, - .setup_hpo_hw_control = dcn31_setup_hpo_hw_control, -}; - -void dcn31_hw_sequencer_construct(struct dc *dc) -{ - dc->hwss = dcn31_funcs; - dc->hwseq->funcs = dcn31_private_funcs; - -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h deleted file mode 100644 index a3db08c8bd..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_DCN31_INIT_H__ -#define __DC_DCN31_INIT_H__ - -struct dc; - -void dcn31_hw_sequencer_construct(struct dc *dc); - -#endif /* __DC_DCN31_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c deleted file mode 100644 index 63a677c8ee..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c +++ /dev/null @@ -1,310 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dcn31_optc.h" - -#include "dcn30/dcn30_optc.h" -#include "reg_helper.h" -#include "dc.h" -#include "dcn_calc_math.h" - -#define REG(reg)\ - optc1->tg_regs->reg - -#define CTX \ - optc1->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - optc1->tg_shift->field_name, optc1->tg_mask->field_name - -static void optc31_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, - struct dc_crtc_timing *timing) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right) - / opp_cnt; - uint32_t memory_mask = 0; - int mem_count_per_opp = (mpcc_hactive + 2559) / 2560; - - /* Assume less than 6 pipes */ - if (opp_cnt == 4) { - if (mem_count_per_opp == 1) - memory_mask = 0xf; - else { - ASSERT(mem_count_per_opp == 2); - memory_mask = 0xff; - } - } else if (mem_count_per_opp == 1) - memory_mask = 0x1 << (opp_id[0] * 2) | 0x1 << (opp_id[1] * 2); - else if (mem_count_per_opp == 2) - memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2); - else if (mem_count_per_opp == 3) - memory_mask = 0x77; - else if (mem_count_per_opp == 4) - memory_mask = 0xff; - - if (REG(OPTC_MEMORY_CONFIG)) - REG_SET(OPTC_MEMORY_CONFIG, 0, - OPTC_MEM_SEL, memory_mask); - - if (opp_cnt == 2) { - REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0, - OPTC_NUM_OF_INPUT_SEGMENT, 1, - OPTC_SEG0_SRC_SEL, opp_id[0], - OPTC_SEG1_SRC_SEL, opp_id[1]); - } else if (opp_cnt == 4) { - REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, - OPTC_NUM_OF_INPUT_SEGMENT, 3, - OPTC_SEG0_SRC_SEL, opp_id[0], - OPTC_SEG1_SRC_SEL, opp_id[1], - OPTC_SEG2_SRC_SEL, opp_id[2], - OPTC_SEG3_SRC_SEL, opp_id[3]); - } - - REG_UPDATE(OPTC_WIDTH_CONTROL, - OPTC_SEGMENT_WIDTH, mpcc_hactive); - - REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_MODE, opp_cnt - 1); - optc1->opp_count = opp_cnt; -} - -/* - * Enable CRTC - call ASIC Control Object to enable Timing generator. - */ -static bool optc31_enable_crtc(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - /* opp instance for OTG, 1 to 1 mapping and odm will adjust */ - REG_UPDATE(OPTC_DATA_SOURCE_SELECT, - OPTC_SEG0_SRC_SEL, optc->inst); - - /* VTG enable first is for HW workaround */ - REG_UPDATE(CONTROL, - VTG0_ENABLE, 1); - - REG_SEQ_START(); - - /* Enable CRTC */ - REG_UPDATE_2(OTG_CONTROL, - OTG_DISABLE_POINT_CNTL, 2, - OTG_MASTER_EN, 1); - - REG_SEQ_SUBMIT(); - REG_SEQ_WAIT_DONE(); - - return true; -} - -/* disable_crtc - call ASIC Control Object to disable Timing generator. */ -static bool optc31_disable_crtc(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - /* disable otg request until end of the first line - * in the vertical blank region - */ - REG_UPDATE(OTG_CONTROL, - OTG_MASTER_EN, 0); - - REG_UPDATE(CONTROL, - VTG0_ENABLE, 0); - - /* CRTC disabled, so disable clock. */ - REG_WAIT(OTG_CLOCK_CONTROL, - OTG_BUSY, 0, - 1, 100000); - optc1_clear_optc_underflow(optc); - - return true; -} - -bool optc31_immediate_disable_crtc(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_UPDATE_2(OTG_CONTROL, - OTG_DISABLE_POINT_CNTL, 0, - OTG_MASTER_EN, 0); - - REG_UPDATE(CONTROL, - VTG0_ENABLE, 0); - - /* CRTC disabled, so disable clock. */ - REG_WAIT(OTG_CLOCK_CONTROL, - OTG_BUSY, 0, - 1, 100000); - - /* clear the false state */ - optc1_clear_optc_underflow(optc); - - return true; -} - -void optc31_set_drr( - struct timing_generator *optc, - const struct drr_params *params) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - if (params != NULL && - params->vertical_total_max > 0 && - params->vertical_total_min > 0) { - - if (params->vertical_total_mid != 0) { - - REG_SET(OTG_V_TOTAL_MID, 0, - OTG_V_TOTAL_MID, params->vertical_total_mid - 1); - - REG_UPDATE_2(OTG_V_TOTAL_CONTROL, - OTG_VTOTAL_MID_REPLACING_MAX_EN, 1, - OTG_VTOTAL_MID_FRAME_NUM, - (uint8_t)params->vertical_total_mid_frame_num); - - } - - optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1); - - /* - * MIN_MASK_EN is gone and MASK is now always enabled. - * - * To get it to it work with manual trigger we need to make sure - * we program the correct bit. - */ - REG_UPDATE_4(OTG_V_TOTAL_CONTROL, - OTG_V_TOTAL_MIN_SEL, 1, - OTG_V_TOTAL_MAX_SEL, 1, - OTG_FORCE_LOCK_ON_EVENT, 0, - OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */ - - // Setup manual flow control for EOF via TRIG_A - optc->funcs->setup_manual_trigger(optc); - } else { - REG_UPDATE_4(OTG_V_TOTAL_CONTROL, - OTG_SET_V_TOTAL_MIN_MASK, 0, - OTG_V_TOTAL_MIN_SEL, 0, - OTG_V_TOTAL_MAX_SEL, 0, - OTG_FORCE_LOCK_ON_EVENT, 0); - - optc->funcs->set_vtotal_min_max(optc, 0, 0); - } -} - -void optc3_init_odm(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, - OPTC_NUM_OF_INPUT_SEGMENT, 0, - OPTC_SEG0_SRC_SEL, optc->inst, - OPTC_SEG1_SRC_SEL, 0xf, - OPTC_SEG2_SRC_SEL, 0xf, - OPTC_SEG3_SRC_SEL, 0xf - ); - - REG_SET(OTG_H_TIMING_CNTL, 0, - OTG_H_TIMING_DIV_MODE, 0); - - REG_SET(OPTC_MEMORY_CONFIG, 0, - OPTC_MEM_SEL, 0); - optc1->opp_count = 1; -} - -static struct timing_generator_funcs dcn31_tg_funcs = { - .validate_timing = optc1_validate_timing, - .program_timing = optc1_program_timing, - .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, - .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, - .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, - .program_global_sync = optc1_program_global_sync, - .enable_crtc = optc31_enable_crtc, - .disable_crtc = optc31_disable_crtc, - .immediate_disable_crtc = optc31_immediate_disable_crtc, - /* used by enable_timing_synchronization. Not need for FPGA */ - .is_counter_moving = optc1_is_counter_moving, - .get_position = optc1_get_position, - .get_frame_count = optc1_get_vblank_counter, - .get_scanoutpos = optc1_get_crtc_scanoutpos, - .get_otg_active_size = optc1_get_otg_active_size, - .set_early_control = optc1_set_early_control, - /* used by enable_timing_synchronization. Not need for FPGA */ - .wait_for_state = optc1_wait_for_state, - .set_blank_color = optc3_program_blank_color, - .did_triggered_reset_occur = optc1_did_triggered_reset_occur, - .triplebuffer_lock = optc3_triplebuffer_lock, - .triplebuffer_unlock = optc2_triplebuffer_unlock, - .enable_reset_trigger = optc1_enable_reset_trigger, - .enable_crtc_reset = optc1_enable_crtc_reset, - .disable_reset_trigger = optc1_disable_reset_trigger, - .lock = optc3_lock, - .unlock = optc1_unlock, - .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, - .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, - .enable_optc_clock = optc1_enable_optc_clock, - .set_drr = optc31_set_drr, - .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, - .set_vtotal_min_max = optc1_set_vtotal_min_max, - .set_static_screen_control = optc1_set_static_screen_control, - .program_stereo = optc1_program_stereo, - .is_stereo_left_eye = optc1_is_stereo_left_eye, - .tg_init = optc3_tg_init, - .is_tg_enabled = optc1_is_tg_enabled, - .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, - .clear_optc_underflow = optc1_clear_optc_underflow, - .setup_global_swap_lock = NULL, - .get_crc = optc1_get_crc, - .configure_crc = optc2_configure_crc, - .set_dsc_config = optc3_set_dsc_config, - .get_dsc_status = optc2_get_dsc_status, - .set_dwb_source = NULL, - .set_odm_bypass = optc3_set_odm_bypass, - .set_odm_combine = optc31_set_odm_combine, - .get_optc_source = optc2_get_optc_source, - .set_out_mux = optc3_set_out_mux, - .set_drr_trigger_window = optc3_set_drr_trigger_window, - .set_vtotal_change_limit = optc3_set_vtotal_change_limit, - .set_gsl = optc2_set_gsl, - .set_gsl_source_select = optc2_set_gsl_source_select, - .set_vtg_params = optc1_set_vtg_params, - .program_manual_trigger = optc2_program_manual_trigger, - .setup_manual_trigger = optc2_setup_manual_trigger, - .get_hw_timing = optc1_get_hw_timing, - .init_odm = optc3_init_odm, -}; - -void dcn31_timing_generator_init(struct optc *optc1) -{ - optc1->base.funcs = &dcn31_tg_funcs; - - optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; - optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; - - optc1->min_h_blank = 32; - optc1->min_v_blank = 3; - optc1->min_v_blank_interlace = 5; - optc1->min_h_sync_width = 4; - optc1->min_v_sync_width = 1; -} - diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h deleted file mode 100644 index 30b81a448c..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_OPTC_DCN31_H__ -#define __DC_OPTC_DCN31_H__ - -#include "dcn10/dcn10_optc.h" - -#define OPTC_COMMON_REG_LIST_DCN3_1(inst) \ - SRI(OTG_VSTARTUP_PARAM, OTG, inst),\ - SRI(OTG_VUPDATE_PARAM, OTG, inst),\ - SRI(OTG_VREADY_PARAM, OTG, inst),\ - SRI(OTG_MASTER_UPDATE_LOCK, OTG, inst),\ - SRI(OTG_GLOBAL_CONTROL0, OTG, inst),\ - SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\ - SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\ - SRI(OTG_GLOBAL_CONTROL4, OTG, inst),\ - SRI(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst),\ - SRI(OTG_H_TOTAL, OTG, inst),\ - SRI(OTG_H_BLANK_START_END, OTG, inst),\ - SRI(OTG_H_SYNC_A, OTG, inst),\ - SRI(OTG_H_SYNC_A_CNTL, OTG, inst),\ - SRI(OTG_H_TIMING_CNTL, OTG, inst),\ - SRI(OTG_V_TOTAL, OTG, inst),\ - SRI(OTG_V_BLANK_START_END, OTG, inst),\ - SRI(OTG_V_SYNC_A, OTG, inst),\ - SRI(OTG_V_SYNC_A_CNTL, OTG, inst),\ - SRI(OTG_CONTROL, OTG, inst),\ - SRI(OTG_STEREO_CONTROL, OTG, inst),\ - SRI(OTG_3D_STRUCTURE_CONTROL, OTG, inst),\ - SRI(OTG_STEREO_STATUS, OTG, inst),\ - SRI(OTG_V_TOTAL_MAX, OTG, inst),\ - SRI(OTG_V_TOTAL_MIN, OTG, inst),\ - SRI(OTG_V_TOTAL_CONTROL, OTG, inst),\ - SRI(OTG_TRIGA_CNTL, OTG, inst),\ - SRI(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\ - SRI(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\ - SRI(OTG_STATUS_FRAME_COUNT, OTG, inst),\ - SRI(OTG_STATUS, OTG, inst),\ - SRI(OTG_STATUS_POSITION, OTG, inst),\ - SRI(OTG_NOM_VERT_POSITION, OTG, inst),\ - SRI(OTG_M_CONST_DTO0, OTG, inst),\ - SRI(OTG_M_CONST_DTO1, OTG, inst),\ - SRI(OTG_CLOCK_CONTROL, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\ - SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\ - SRI(OPTC_DATA_SOURCE_SELECT, ODM, inst),\ - SRI(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\ - SRI(CONTROL, VTG, inst),\ - SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\ - SRI(OTG_GSL_CONTROL, OTG, inst),\ - SRI(OTG_CRC_CNTL, OTG, inst),\ - SRI(OTG_CRC0_DATA_RG, OTG, inst),\ - SRI(OTG_CRC0_DATA_B, OTG, inst),\ - SRI(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\ - SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\ - SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\ - SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst),\ - SR(GSL_SOURCE_SELECT),\ - SRI(OTG_TRIGA_MANUAL_TRIG, OTG, inst),\ - SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\ - SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\ - SRI(OTG_GSL_WINDOW_X, OTG, inst),\ - SRI(OTG_GSL_WINDOW_Y, OTG, inst),\ - SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\ - SRI(OTG_DSC_START_POSITION, OTG, inst),\ - SRI(OTG_DRR_TRIGGER_WINDOW, OTG, inst),\ - SRI(OTG_DRR_V_TOTAL_CHANGE, OTG, inst),\ - SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\ - SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\ - SRI(OPTC_WIDTH_CONTROL, ODM, inst),\ - SRI(OPTC_MEMORY_CONFIG, ODM, inst),\ - SRI(OTG_CRC_CNTL2, OTG, inst),\ - SR(DWB_SOURCE_SELECT),\ - SRI(OTG_DRR_CONTROL, OTG, inst) - -#define OPTC_COMMON_MASK_SH_LIST_DCN3_1(mask_sh)\ - SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\ - SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_OFFSET, mask_sh),\ - SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_WIDTH, mask_sh),\ - SF(OTG0_OTG_VREADY_PARAM, VREADY_OFFSET, mask_sh),\ - SF(OTG0_OTG_MASTER_UPDATE_LOCK, OTG_MASTER_UPDATE_LOCK, mask_sh),\ - SF(OTG0_OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_START_X, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_END_X, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_EN, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_START_Y, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_END_Y, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL2, OTG_MASTER_UPDATE_LOCK_SEL, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_X, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_Y, mask_sh),\ - SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\ - SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\ - SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\ - SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\ - SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_START, mask_sh),\ - SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_END, mask_sh),\ - SF(OTG0_OTG_H_SYNC_A_CNTL, OTG_H_SYNC_A_POL, mask_sh),\ - SF(OTG0_OTG_V_TOTAL, OTG_V_TOTAL, mask_sh),\ - SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_START, mask_sh),\ - SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_END, mask_sh),\ - SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_START, mask_sh),\ - SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_END, mask_sh),\ - SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_A_POL, mask_sh),\ - SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_MODE, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_MASTER_EN, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_START_POINT_CNTL, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_OUT_MUX, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EYE_FLAG_POLARITY, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, mask_sh),\ - SF(OTG0_OTG_STEREO_STATUS, OTG_STEREO_CURRENT_EYE, mask_sh),\ - SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_EN, mask_sh),\ - SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_V_UPDATE_MODE, mask_sh),\ - SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_STEREO_SEL_OVR, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_MAX, OTG_V_TOTAL_MAX, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_MIN, OTG_V_TOTAL_MIN, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MIN_SEL, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MAX_SEL, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_FORCE_LOCK_ON_EVENT, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MIN_EN, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MAX_EN, mask_sh),\ - SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_CLEAR, mask_sh),\ - SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_MODE, mask_sh),\ - SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_OCCURRED, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_SELECT, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_PIPE_SELECT, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_RISING_EDGE_DETECT_CNTL, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_POLARITY_SELECT, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FREQUENCY_SELECT, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_DELAY, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_CLEAR, mask_sh),\ - SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_EVENT_MASK, mask_sh),\ - SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_FRAME_COUNT, mask_sh),\ - SF(OTG0_OTG_STATUS_FRAME_COUNT, OTG_FRAME_COUNT, mask_sh),\ - SF(OTG0_OTG_STATUS, OTG_V_BLANK, mask_sh),\ - SF(OTG0_OTG_STATUS, OTG_V_ACTIVE_DISP, mask_sh),\ - SF(OTG0_OTG_STATUS_POSITION, OTG_HORZ_COUNT, mask_sh),\ - SF(OTG0_OTG_STATUS_POSITION, OTG_VERT_COUNT, mask_sh),\ - SF(OTG0_OTG_NOM_VERT_POSITION, OTG_VERT_COUNT_NOM, mask_sh),\ - SF(OTG0_OTG_M_CONST_DTO0, OTG_M_CONST_DTO_PHASE, mask_sh),\ - SF(OTG0_OTG_M_CONST_DTO1, OTG_M_CONST_DTO_MODULO, mask_sh),\ - SF(OTG0_OTG_CLOCK_CONTROL, OTG_BUSY, mask_sh),\ - SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_EN, mask_sh),\ - SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_ON, mask_sh),\ - SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_START, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_END, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT1_POSITION, OTG_VERTICAL_INTERRUPT1_LINE_START, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\ - SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\ - SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_ON, mask_sh),\ - SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\ - SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\ - SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\ - SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\ - SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\ - SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\ - SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED, mask_sh),\ - SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_CLEAR, mask_sh),\ - SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_AUTO_FORCE_VSYNC_MODE, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL0_EN, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL1_EN, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\ - SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\ - SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\ - SF(OTG0_OTG_CRC0_DATA_B, CRC0_B_CB, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_START, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_END, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_START, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_END, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_START, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh),\ - SF(OTG0_OTG_TRIGA_MANUAL_TRIG, OTG_TRIGA_MANUAL_TRIG, mask_sh),\ - SF(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, mask_sh),\ - SF(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, mask_sh),\ - SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL2, MANUAL_FLOW_CONTROL_SEL, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\ - SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\ - SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \ - SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\ - SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\ - SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \ - SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \ - SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \ - SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \ - SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \ - SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG2_SRC_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG3_SRC_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\ - SF(ODM0_OPTC_MEMORY_CONFIG, OPTC_MEM_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, mask_sh),\ - SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\ - SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\ - SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\ - SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_SEGMENT_WIDTH, mask_sh),\ - SF(DWB_SOURCE_SELECT, OPTC_DWB0_SOURCE_SELECT, mask_sh),\ - SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh),\ - SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_START_X, mask_sh),\ - SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\ - SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\ - SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\ - SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DSC_MODE, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\ - SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh) - -void dcn31_timing_generator_init(struct optc *optc1); - -bool optc31_immediate_disable_crtc(struct timing_generator *optc); - -void optc31_set_drr(struct timing_generator *optc, const struct drr_params *params); - -void optc3_init_odm(struct timing_generator *optc); - -#endif /* __DC_OPTC_DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c deleted file mode 100644 index 79416cfb22..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ /dev/null @@ -1,2218 +0,0 @@ -/* - * Copyright 2019 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - - -#include "dm_services.h" -#include "dc.h" - -#include "dcn31/dcn31_init.h" - -#include "resource.h" -#include "include/irq_service_interface.h" -#include "dcn31_resource.h" - -#include "dcn20/dcn20_resource.h" -#include "dcn30/dcn30_resource.h" - -#include "dml/dcn30/dcn30_fpu.h" - -#include "dcn10/dcn10_ipp.h" -#include "dcn30/dcn30_hubbub.h" -#include "dcn31/dcn31_hubbub.h" -#include "dcn30/dcn30_mpc.h" -#include "dcn31/dcn31_hubp.h" -#include "irq/dcn31/irq_service_dcn31.h" -#include "dcn30/dcn30_dpp.h" -#include "dcn31/dcn31_optc.h" -#include "dcn20/dcn20_hwseq.h" -#include "dcn30/dcn30_hwseq.h" -#include "dce110/dce110_hwseq.h" -#include "dcn30/dcn30_opp.h" -#include "dcn20/dcn20_dsc.h" -#include "dcn30/dcn30_vpg.h" -#include "dcn30/dcn30_afmt.h" -#include "dcn30/dcn30_dio_stream_encoder.h" -#include "dcn31/dcn31_hpo_dp_stream_encoder.h" -#include "dcn31/dcn31_hpo_dp_link_encoder.h" -#include "dcn31/dcn31_apg.h" -#include "dcn31/dcn31_dio_link_encoder.h" -#include "dcn31/dcn31_vpg.h" -#include "dcn31/dcn31_afmt.h" -#include "dce/dce_clock_source.h" -#include "dce/dce_audio.h" -#include "dce/dce_hwseq.h" -#include "clk_mgr.h" -#include "virtual/virtual_stream_encoder.h" -#include "dce110/dce110_resource.h" -#include "dml/display_mode_vba.h" -#include "dml/dcn31/dcn31_fpu.h" -#include "dcn31/dcn31_dccg.h" -#include "dcn10/dcn10_resource.h" -#include "dcn31_panel_cntl.h" - -#include "dcn30/dcn30_dwb.h" -#include "dcn30/dcn30_mmhubbub.h" - -// TODO: change include headers /amd/include/asic_reg after upstream -#include "yellow_carp_offset.h" -#include "dcn/dcn_3_1_2_offset.h" -#include "dcn/dcn_3_1_2_sh_mask.h" -#include "nbio/nbio_7_2_0_offset.h" -#include "dpcs/dpcs_4_2_0_offset.h" -#include "dpcs/dpcs_4_2_0_sh_mask.h" -#include "mmhub/mmhub_2_3_0_offset.h" -#include "mmhub/mmhub_2_3_0_sh_mask.h" - - -#define regDCHUBBUB_DEBUG_CTRL_0 0x04d6 -#define regDCHUBBUB_DEBUG_CTRL_0_BASE_IDX 2 -#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10 -#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x01FF0000L - -#include "reg_helper.h" -#include "dce/dmub_abm.h" -#include "dce/dmub_psr.h" -#include "dce/dce_aux.h" -#include "dce/dce_i2c.h" -#include "dce/dmub_replay.h" - -#include "dml/dcn30/display_mode_vba_30.h" -#include "vm_helper.h" -#include "dcn20/dcn20_vmid.h" - -#include "link_enc_cfg.h" - -#define DC_LOGGER \ - dc->ctx->logger -#define DC_LOGGER_INIT(logger) - -enum dcn31_clk_src_array_id { - DCN31_CLK_SRC_PLL0, - DCN31_CLK_SRC_PLL1, - DCN31_CLK_SRC_PLL2, - DCN31_CLK_SRC_PLL3, - DCN31_CLK_SRC_PLL4, - DCN30_CLK_SRC_TOTAL -}; - -/* begin ********************* - * macros to expend register list macro defined in HW object header file - */ - -/* DCN */ -#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg - -#define BASE(seg) BASE_INNER(seg) - -#define SR(reg_name)\ - .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ - reg ## reg_name - -#define SRI(reg_name, block, id)\ - .reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRI2(reg_name, block, id)\ - .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ - reg ## reg_name - -#define SRIR(var_name, reg_name, block, id)\ - .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII(reg_name, block, id)\ - .reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII_MPC_RMU(reg_name, block, id)\ - .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII_DWB(reg_name, temp_name, block, id)\ - .reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## temp_name - -#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ - .field_name = reg_name ## __ ## field_name ## post_fix - -#define DCCG_SRII(reg_name, block, id)\ - .block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define VUPDATE_SRII(reg_name, block, id)\ - .reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ - reg ## reg_name ## _ ## block ## id - -/* NBIO */ -#define NBIO_BASE_INNER(seg) \ - NBIO_BASE__INST0_SEG ## seg - -#define NBIO_BASE(seg) \ - NBIO_BASE_INNER(seg) - -#define NBIO_SR(reg_name)\ - .reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \ - regBIF_BX1_ ## reg_name - -/* MMHUB */ -#define MMHUB_BASE_INNER(seg) \ - MMHUB_BASE__INST0_SEG ## seg - -#define MMHUB_BASE(seg) \ - MMHUB_BASE_INNER(seg) - -#define MMHUB_SR(reg_name)\ - .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -/* CLOCK */ -#define CLK_BASE_INNER(seg) \ - CLK_BASE__INST0_SEG ## seg - -#define CLK_BASE(seg) \ - CLK_BASE_INNER(seg) - -#define CLK_SRI(reg_name, block, inst)\ - .reg_name = CLK_BASE(reg ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## _ ## inst ## _ ## reg_name - - -static const struct bios_registers bios_regs = { - NBIO_SR(BIOS_SCRATCH_3), - NBIO_SR(BIOS_SCRATCH_6) -}; - -#define clk_src_regs(index, pllid)\ -[index] = {\ - CS_COMMON_REG_LIST_DCN3_0(index, pllid),\ -} - -static const struct dce110_clk_src_regs clk_src_regs[] = { - clk_src_regs(0, A), - clk_src_regs(1, B), - clk_src_regs(2, C), - clk_src_regs(3, D), - clk_src_regs(4, E) -}; -/*pll_id being rempped in dmub, in driver it is logical instance*/ -static const struct dce110_clk_src_regs clk_src_regs_b0[] = { - clk_src_regs(0, A), - clk_src_regs(1, B), - clk_src_regs(2, F), - clk_src_regs(3, G), - clk_src_regs(4, E) -}; - -static const struct dce110_clk_src_shift cs_shift = { - CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) -}; - -static const struct dce110_clk_src_mask cs_mask = { - CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) -}; - -#define abm_regs(id)\ -[id] = {\ - ABM_DCN302_REG_LIST(id)\ -} - -static const struct dce_abm_registers abm_regs[] = { - abm_regs(0), - abm_regs(1), - abm_regs(2), - abm_regs(3), -}; - -static const struct dce_abm_shift abm_shift = { - ABM_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dce_abm_mask abm_mask = { - ABM_MASK_SH_LIST_DCN30(_MASK) -}; - -#define audio_regs(id)\ -[id] = {\ - AUD_COMMON_REG_LIST(id)\ -} - -static const struct dce_audio_registers audio_regs[] = { - audio_regs(0), - audio_regs(1), - audio_regs(2), - audio_regs(3), - audio_regs(4), - audio_regs(5), - audio_regs(6) -}; - -#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ - AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) - -static const struct dce_audio_shift audio_shift = { - DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_audio_mask audio_mask = { - DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) -}; - -#define vpg_regs(id)\ -[id] = {\ - VPG_DCN31_REG_LIST(id)\ -} - -static const struct dcn31_vpg_registers vpg_regs[] = { - vpg_regs(0), - vpg_regs(1), - vpg_regs(2), - vpg_regs(3), - vpg_regs(4), - vpg_regs(5), - vpg_regs(6), - vpg_regs(7), - vpg_regs(8), - vpg_regs(9), -}; - -static const struct dcn31_vpg_shift vpg_shift = { - DCN31_VPG_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_vpg_mask vpg_mask = { - DCN31_VPG_MASK_SH_LIST(_MASK) -}; - -#define afmt_regs(id)\ -[id] = {\ - AFMT_DCN31_REG_LIST(id)\ -} - -static const struct dcn31_afmt_registers afmt_regs[] = { - afmt_regs(0), - afmt_regs(1), - afmt_regs(2), - afmt_regs(3), - afmt_regs(4), - afmt_regs(5) -}; - -static const struct dcn31_afmt_shift afmt_shift = { - DCN31_AFMT_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_afmt_mask afmt_mask = { - DCN31_AFMT_MASK_SH_LIST(_MASK) -}; - -#define apg_regs(id)\ -[id] = {\ - APG_DCN31_REG_LIST(id)\ -} - -static const struct dcn31_apg_registers apg_regs[] = { - apg_regs(0), - apg_regs(1), - apg_regs(2), - apg_regs(3) -}; - -static const struct dcn31_apg_shift apg_shift = { - DCN31_APG_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_apg_mask apg_mask = { - DCN31_APG_MASK_SH_LIST(_MASK) -}; - -#define stream_enc_regs(id)\ -[id] = {\ - SE_DCN3_REG_LIST(id)\ -} - -/* Some encoders won't be initialized here - but they're logical, not physical. */ -static const struct dcn10_stream_enc_registers stream_enc_regs[ENGINE_ID_COUNT] = { - stream_enc_regs(0), - stream_enc_regs(1), - stream_enc_regs(2), - stream_enc_regs(3), - stream_enc_regs(4) -}; - -static const struct dcn10_stream_encoder_shift se_shift = { - SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn10_stream_encoder_mask se_mask = { - SE_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - - -#define aux_regs(id)\ -[id] = {\ - DCN2_AUX_REG_LIST(id)\ -} - -static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { - aux_regs(0), - aux_regs(1), - aux_regs(2), - aux_regs(3), - aux_regs(4) -}; - -#define hpd_regs(id)\ -[id] = {\ - HPD_REG_LIST(id)\ -} - -static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { - hpd_regs(0), - hpd_regs(1), - hpd_regs(2), - hpd_regs(3), - hpd_regs(4) -}; - -#define link_regs(id, phyid)\ -[id] = {\ - LE_DCN31_REG_LIST(id), \ - UNIPHY_DCN2_REG_LIST(phyid), \ - DPCS_DCN31_REG_LIST(id), \ -} - -static const struct dce110_aux_registers_shift aux_shift = { - DCN_AUX_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce110_aux_registers_mask aux_mask = { - DCN_AUX_MASK_SH_LIST(_MASK) -}; - -static const struct dcn10_link_enc_registers link_enc_regs[] = { - link_regs(0, A), - link_regs(1, B), - link_regs(2, C), - link_regs(3, D), - link_regs(4, E) -}; - -static const struct dcn10_link_enc_shift le_shift = { - LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \ - DPCS_DCN31_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn10_link_enc_mask le_mask = { - LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), \ - DPCS_DCN31_MASK_SH_LIST(_MASK) -}; - -#define hpo_dp_stream_encoder_reg_list(id)\ -[id] = {\ - DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\ -} - -static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = { - hpo_dp_stream_encoder_reg_list(0), - hpo_dp_stream_encoder_reg_list(1), - hpo_dp_stream_encoder_reg_list(2), - hpo_dp_stream_encoder_reg_list(3), -}; - -static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { - DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { - DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) -}; - -#define hpo_dp_link_encoder_reg_list(id)\ -[id] = {\ - DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\ - DCN3_1_RDPCSTX_REG_LIST(0),\ - DCN3_1_RDPCSTX_REG_LIST(1),\ - DCN3_1_RDPCSTX_REG_LIST(2),\ - DCN3_1_RDPCSTX_REG_LIST(3),\ - DCN3_1_RDPCSTX_REG_LIST(4)\ -} - -static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = { - hpo_dp_link_encoder_reg_list(0), - hpo_dp_link_encoder_reg_list(1), -}; - -static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { - DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { - DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK) -}; - -#define dpp_regs(id)\ -[id] = {\ - DPP_REG_LIST_DCN30(id),\ -} - -static const struct dcn3_dpp_registers dpp_regs[] = { - dpp_regs(0), - dpp_regs(1), - dpp_regs(2), - dpp_regs(3) -}; - -static const struct dcn3_dpp_shift tf_shift = { - DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) -}; - -static const struct dcn3_dpp_mask tf_mask = { - DPP_REG_LIST_SH_MASK_DCN30(_MASK) -}; - -#define opp_regs(id)\ -[id] = {\ - OPP_REG_LIST_DCN30(id),\ -} - -static const struct dcn20_opp_registers opp_regs[] = { - opp_regs(0), - opp_regs(1), - opp_regs(2), - opp_regs(3) -}; - -static const struct dcn20_opp_shift opp_shift = { - OPP_MASK_SH_LIST_DCN20(__SHIFT) -}; - -static const struct dcn20_opp_mask opp_mask = { - OPP_MASK_SH_LIST_DCN20(_MASK) -}; - -#define aux_engine_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST0(id), \ - .AUXN_IMPCAL = 0, \ - .AUXP_IMPCAL = 0, \ - .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ -} - -static const struct dce110_aux_registers aux_engine_regs[] = { - aux_engine_regs(0), - aux_engine_regs(1), - aux_engine_regs(2), - aux_engine_regs(3), - aux_engine_regs(4) -}; - -#define dwbc_regs_dcn3(id)\ -[id] = {\ - DWBC_COMMON_REG_LIST_DCN30(id),\ -} - -static const struct dcn30_dwbc_registers dwbc30_regs[] = { - dwbc_regs_dcn3(0), -}; - -static const struct dcn30_dwbc_shift dwbc30_shift = { - DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_dwbc_mask dwbc30_mask = { - DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -#define mcif_wb_regs_dcn3(id)\ -[id] = {\ - MCIF_WB_COMMON_REG_LIST_DCN30(id),\ -} - -static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { - mcif_wb_regs_dcn3(0) -}; - -static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -#define dsc_regsDCN20(id)\ -[id] = {\ - DSC_REG_LIST_DCN20(id)\ -} - -static const struct dcn20_dsc_registers dsc_regs[] = { - dsc_regsDCN20(0), - dsc_regsDCN20(1), - dsc_regsDCN20(2) -}; - -static const struct dcn20_dsc_shift dsc_shift = { - DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) -}; - -static const struct dcn20_dsc_mask dsc_mask = { - DSC_REG_LIST_SH_MASK_DCN20(_MASK) -}; - -static const struct dcn30_mpc_registers mpc_regs = { - MPC_REG_LIST_DCN3_0(0), - MPC_REG_LIST_DCN3_0(1), - MPC_REG_LIST_DCN3_0(2), - MPC_REG_LIST_DCN3_0(3), - MPC_OUT_MUX_REG_LIST_DCN3_0(0), - MPC_OUT_MUX_REG_LIST_DCN3_0(1), - MPC_OUT_MUX_REG_LIST_DCN3_0(2), - MPC_OUT_MUX_REG_LIST_DCN3_0(3), - MPC_RMU_GLOBAL_REG_LIST_DCN3AG, - MPC_RMU_REG_LIST_DCN3AG(0), - MPC_RMU_REG_LIST_DCN3AG(1), - //MPC_RMU_REG_LIST_DCN3AG(2), - MPC_DWB_MUX_REG_LIST_DCN3_0(0), -}; - -static const struct dcn30_mpc_shift mpc_shift = { - MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_mpc_mask mpc_mask = { - MPC_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -#define optc_regs(id)\ -[id] = {OPTC_COMMON_REG_LIST_DCN3_1(id)} - -static const struct dcn_optc_registers optc_regs[] = { - optc_regs(0), - optc_regs(1), - optc_regs(2), - optc_regs(3) -}; - -static const struct dcn_optc_shift optc_shift = { - OPTC_COMMON_MASK_SH_LIST_DCN3_1(__SHIFT) -}; - -static const struct dcn_optc_mask optc_mask = { - OPTC_COMMON_MASK_SH_LIST_DCN3_1(_MASK) -}; - -#define hubp_regs(id)\ -[id] = {\ - HUBP_REG_LIST_DCN30(id)\ -} - -static const struct dcn_hubp2_registers hubp_regs[] = { - hubp_regs(0), - hubp_regs(1), - hubp_regs(2), - hubp_regs(3) -}; - - -static const struct dcn_hubp2_shift hubp_shift = { - HUBP_MASK_SH_LIST_DCN31(__SHIFT) -}; - -static const struct dcn_hubp2_mask hubp_mask = { - HUBP_MASK_SH_LIST_DCN31(_MASK) -}; -static const struct dcn_hubbub_registers hubbub_reg = { - HUBBUB_REG_LIST_DCN31(0) -}; - -static const struct dcn_hubbub_shift hubbub_shift = { - HUBBUB_MASK_SH_LIST_DCN31(__SHIFT) -}; - -static const struct dcn_hubbub_mask hubbub_mask = { - HUBBUB_MASK_SH_LIST_DCN31(_MASK) -}; - -static const struct dccg_registers dccg_regs = { - DCCG_REG_LIST_DCN31() -}; - -static const struct dccg_shift dccg_shift = { - DCCG_MASK_SH_LIST_DCN31(__SHIFT) -}; - -static const struct dccg_mask dccg_mask = { - DCCG_MASK_SH_LIST_DCN31(_MASK) -}; - - -#define SRII2(reg_name_pre, reg_name_post, id)\ - .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \ - ## id ## _ ## reg_name_post ## _BASE_IDX) + \ - reg ## reg_name_pre ## id ## _ ## reg_name_post - - -#define HWSEQ_DCN31_REG_LIST()\ - SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ - SR(DCHUBBUB_ARB_HOSTVM_CNTL), \ - SR(DIO_MEM_PWR_CTRL), \ - SR(ODM_MEM_PWR_CTRL3), \ - SR(DMU_MEM_PWR_CNTL), \ - SR(MMHUBBUB_MEM_PWR_CNTL), \ - SR(DCCG_GATE_DISABLE_CNTL), \ - SR(DCCG_GATE_DISABLE_CNTL2), \ - SR(DCFCLK_CNTL),\ - SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ - SRII(PIXEL_RATE_CNTL, OTG, 0), \ - SRII(PIXEL_RATE_CNTL, OTG, 1),\ - SRII(PIXEL_RATE_CNTL, OTG, 2),\ - SRII(PIXEL_RATE_CNTL, OTG, 3),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\ - SR(MICROSECOND_TIME_BASE_DIV), \ - SR(MILLISECOND_TIME_BASE_DIV), \ - SR(DISPCLK_FREQ_CHANGE_CNTL), \ - SR(RBBMIF_TIMEOUT_DIS), \ - SR(RBBMIF_TIMEOUT_DIS_2), \ - SR(DCHUBBUB_CRC_CTRL), \ - SR(DPP_TOP0_DPP_CRC_CTRL), \ - SR(DPP_TOP0_DPP_CRC_VAL_B_A), \ - SR(DPP_TOP0_DPP_CRC_VAL_R_G), \ - SR(MPC_CRC_CTRL), \ - SR(MPC_CRC_RESULT_GB), \ - SR(MPC_CRC_RESULT_C), \ - SR(MPC_CRC_RESULT_AR), \ - SR(DOMAIN0_PG_CONFIG), \ - SR(DOMAIN1_PG_CONFIG), \ - SR(DOMAIN2_PG_CONFIG), \ - SR(DOMAIN3_PG_CONFIG), \ - SR(DOMAIN16_PG_CONFIG), \ - SR(DOMAIN17_PG_CONFIG), \ - SR(DOMAIN18_PG_CONFIG), \ - SR(DOMAIN0_PG_STATUS), \ - SR(DOMAIN1_PG_STATUS), \ - SR(DOMAIN2_PG_STATUS), \ - SR(DOMAIN3_PG_STATUS), \ - SR(DOMAIN16_PG_STATUS), \ - SR(DOMAIN17_PG_STATUS), \ - SR(DOMAIN18_PG_STATUS), \ - SR(D1VGA_CONTROL), \ - SR(D2VGA_CONTROL), \ - SR(D3VGA_CONTROL), \ - SR(D4VGA_CONTROL), \ - SR(D5VGA_CONTROL), \ - SR(D6VGA_CONTROL), \ - SR(DC_IP_REQUEST_CNTL), \ - SR(AZALIA_AUDIO_DTO), \ - SR(AZALIA_CONTROLLER_CLOCK_GATING), \ - SR(HPO_TOP_HW_CONTROL) - -static const struct dce_hwseq_registers hwseq_reg = { - HWSEQ_DCN31_REG_LIST() -}; - -#define HWSEQ_DCN31_MASK_SH_LIST(mask_sh)\ - HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ - HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ - HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \ - HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ - HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ - HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \ - HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \ - HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ - HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ - HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \ - HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \ - HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh) - -static const struct dce_hwseq_shift hwseq_shift = { - HWSEQ_DCN31_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_hwseq_mask hwseq_mask = { - HWSEQ_DCN31_MASK_SH_LIST(_MASK) -}; -#define vmid_regs(id)\ -[id] = {\ - DCN20_VMID_REG_LIST(id)\ -} - -static const struct dcn_vmid_registers vmid_regs[] = { - vmid_regs(0), - vmid_regs(1), - vmid_regs(2), - vmid_regs(3), - vmid_regs(4), - vmid_regs(5), - vmid_regs(6), - vmid_regs(7), - vmid_regs(8), - vmid_regs(9), - vmid_regs(10), - vmid_regs(11), - vmid_regs(12), - vmid_regs(13), - vmid_regs(14), - vmid_regs(15) -}; - -static const struct dcn20_vmid_shift vmid_shifts = { - DCN20_VMID_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn20_vmid_mask vmid_masks = { - DCN20_VMID_MASK_SH_LIST(_MASK) -}; - -static const struct resource_caps res_cap_dcn31 = { - .num_timing_generator = 4, - .num_opp = 4, - .num_video_plane = 4, - .num_audio = 5, - .num_stream_encoder = 5, - .num_dig_link_enc = 5, - .num_hpo_dp_stream_encoder = 4, - .num_hpo_dp_link_encoder = 2, - .num_pll = 5, - .num_dwb = 1, - .num_ddc = 5, - .num_vmid = 16, - .num_mpc_3dlut = 2, - .num_dsc = 3, -}; - -static const struct dc_plane_cap plane_cap = { - .type = DC_PLANE_TYPE_DCN_UNIVERSAL, - .per_pixel_alpha = true, - - .pixel_format_support = { - .argb8888 = true, - .nv12 = true, - .fp16 = true, - .p010 = true, - .ayuv = false, - }, - - .max_upscale_factor = { - .argb8888 = 16000, - .nv12 = 16000, - .fp16 = 16000 - }, - - // 6:1 downscaling ratio: 1000/6 = 166.666 - .max_downscale_factor = { - .argb8888 = 167, - .nv12 = 167, - .fp16 = 167 - }, - 64, - 64 -}; - -static const struct dc_debug_options debug_defaults_drv = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = false, - .clock_trace = true, - .disable_pplib_clock_request = false, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, - .force_single_disp_pipe_split = false, - .disable_dcc = DCC_ENABLE, - .vsr_support = true, - .performance_trace = false, - .max_downscale_src_width = 4096,/*upto true 4K*/ - .disable_pplib_wm_range = false, - .scl_reset_length10 = true, - .sanity_checks = true, - .underflow_assert_delay_us = 0xFFFFFFFF, - .dwb_fi_phase = -1, // -1 = disable, - .dmub_command_table = true, - .pstate_enabled = true, - .use_max_lb = true, - .enable_mem_low_power = { - .bits = { - .vga = true, - .i2c = true, - .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled - .dscl = true, - .cm = true, - .mpc = true, - .optc = true, - .vpg = true, - .afmt = true, - } - }, - .disable_z10 = true, - .enable_legacy_fast_update = true, - .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ - .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE, - .using_dml2 = false, -}; - -static const struct dc_panel_config panel_config_defaults = { - .psr = { - .disable_psr = false, - .disallow_psrsu = false, - .disallow_replay = false, - }, - .ilr = { - .optimize_edp_link_rate = true, - }, -}; - -static void dcn31_dpp_destroy(struct dpp **dpp) -{ - kfree(TO_DCN20_DPP(*dpp)); - *dpp = NULL; -} - -static struct dpp *dcn31_dpp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn3_dpp *dpp = - kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); - - if (!dpp) - return NULL; - - if (dpp3_construct(dpp, ctx, inst, - &dpp_regs[inst], &tf_shift, &tf_mask)) - return &dpp->base; - - BREAK_TO_DEBUGGER(); - kfree(dpp); - return NULL; -} - -static struct output_pixel_processor *dcn31_opp_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_opp *opp = - kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); - - if (!opp) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dcn20_opp_construct(opp, ctx, inst, - &opp_regs[inst], &opp_shift, &opp_mask); - return &opp->base; -} - -static struct dce_aux *dcn31_aux_engine_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); - - if (!aux_engine) - return NULL; - - dce110_aux_engine_construct(aux_engine, ctx, inst, - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst], - &aux_mask, - &aux_shift, - ctx->dc->caps.extended_aux_timeout_support); - - return &aux_engine->base; -} -#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) } - -static const struct dce_i2c_registers i2c_hw_regs[] = { - i2c_inst_regs(1), - i2c_inst_regs(2), - i2c_inst_regs(3), - i2c_inst_regs(4), - i2c_inst_regs(5), -}; - -static const struct dce_i2c_shift i2c_shifts = { - I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dce_i2c_mask i2c_masks = { - I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -static struct dce_i2c_hw *dcn31_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); - - if (!dce_i2c_hw) - return NULL; - - dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, - &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); - - return dce_i2c_hw; -} -static struct mpc *dcn31_mpc_create( - struct dc_context *ctx, - int num_mpcc, - int num_rmu) -{ - struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), - GFP_KERNEL); - - if (!mpc30) - return NULL; - - dcn30_mpc_construct(mpc30, ctx, - &mpc_regs, - &mpc_shift, - &mpc_mask, - num_mpcc, - num_rmu); - - return &mpc30->base; -} - -static struct hubbub *dcn31_hubbub_create(struct dc_context *ctx) -{ - int i; - - struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), - GFP_KERNEL); - - if (!hubbub3) - return NULL; - - hubbub31_construct(hubbub3, ctx, - &hubbub_reg, - &hubbub_shift, - &hubbub_mask, - dcn3_1_ip.det_buffer_size_kbytes, - dcn3_1_ip.pixel_chunk_size_kbytes, - dcn3_1_ip.config_return_buffer_size_in_kbytes); - - - for (i = 0; i < res_cap_dcn31.num_vmid; i++) { - struct dcn20_vmid *vmid = &hubbub3->vmid[i]; - - vmid->ctx = ctx; - - vmid->regs = &vmid_regs[i]; - vmid->shifts = &vmid_shifts; - vmid->masks = &vmid_masks; - } - - return &hubbub3->base; -} - -static struct timing_generator *dcn31_timing_generator_create( - struct dc_context *ctx, - uint32_t instance) -{ - struct optc *tgn10 = - kzalloc(sizeof(struct optc), GFP_KERNEL); - - if (!tgn10) - return NULL; - - tgn10->base.inst = instance; - tgn10->base.ctx = ctx; - - tgn10->tg_regs = &optc_regs[instance]; - tgn10->tg_shift = &optc_shift; - tgn10->tg_mask = &optc_mask; - - dcn31_timing_generator_init(tgn10); - - return &tgn10->base; -} - -static const struct encoder_feature_support link_enc_feature = { - .max_hdmi_deep_color = COLOR_DEPTH_121212, - .max_hdmi_pixel_clock = 600000, - .hdmi_ycbcr420_supported = true, - .dp_ycbcr420_supported = true, - .fec_supported = true, - .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_HBR3_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true, - .flags.bits.IS_TPS4_CAPABLE = true -}; - -static struct link_encoder *dcn31_link_encoder_create( - struct dc_context *ctx, - const struct encoder_init_data *enc_init_data) -{ - struct dcn20_link_encoder *enc20 = - kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - - if (!enc20) - return NULL; - - dcn31_link_encoder_construct(enc20, - enc_init_data, - &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], - &link_enc_aux_regs[enc_init_data->channel - 1], - &link_enc_hpd_regs[enc_init_data->hpd_source], - &le_shift, - &le_mask); - - return &enc20->enc10.base; -} - -/* Create a minimal link encoder object not associated with a particular - * physical connector. - * resource_funcs.link_enc_create_minimal - */ -static struct link_encoder *dcn31_link_enc_create_minimal( - struct dc_context *ctx, enum engine_id eng_id) -{ - struct dcn20_link_encoder *enc20; - - if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc) - return NULL; - - enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) - return NULL; - - dcn31_link_encoder_construct_minimal( - enc20, - ctx, - &link_enc_feature, - &link_enc_regs[eng_id - ENGINE_ID_DIGA], - eng_id); - - return &enc20->enc10.base; -} - -static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data) -{ - struct dcn31_panel_cntl *panel_cntl = - kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL); - - if (!panel_cntl) - return NULL; - - dcn31_panel_cntl_construct(panel_cntl, init_data); - - return &panel_cntl->base; -} - -static void read_dce_straps( - struct dc_context *ctx, - struct resource_straps *straps) -{ - generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX), - FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); - -} - -static struct audio *dcn31_create_audio( - struct dc_context *ctx, unsigned int inst) -{ - return dce_audio_create(ctx, inst, - &audio_regs[inst], &audio_shift, &audio_mask); -} - -static struct vpg *dcn31_vpg_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL); - - if (!vpg31) - return NULL; - - vpg31_construct(vpg31, ctx, inst, - &vpg_regs[inst], - &vpg_shift, - &vpg_mask); - - return &vpg31->base; -} - -static struct afmt *dcn31_afmt_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL); - - if (!afmt31) - return NULL; - - afmt31_construct(afmt31, ctx, inst, - &afmt_regs[inst], - &afmt_shift, - &afmt_mask); - - // Light sleep by default, no need to power down here - - return &afmt31->base; -} - -static struct apg *dcn31_apg_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); - - if (!apg31) - return NULL; - - apg31_construct(apg31, ctx, inst, - &apg_regs[inst], - &apg_shift, - &apg_mask); - - return &apg31->base; -} - -static struct stream_encoder *dcn31_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dcn10_stream_encoder *enc1; - struct vpg *vpg; - struct afmt *afmt; - int vpg_inst; - int afmt_inst; - - /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ - if (eng_id <= ENGINE_ID_DIGF) { - vpg_inst = eng_id; - afmt_inst = eng_id; - } else - return NULL; - - enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); - vpg = dcn31_vpg_create(ctx, vpg_inst); - afmt = dcn31_afmt_create(ctx, afmt_inst); - - if (!enc1 || !vpg || !afmt) { - kfree(enc1); - kfree(vpg); - kfree(afmt); - return NULL; - } - - dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, - eng_id, vpg, afmt, - &stream_enc_regs[eng_id], - &se_shift, &se_mask); - - return &enc1->base; -} - -static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; - struct vpg *vpg; - struct apg *apg; - uint32_t hpo_dp_inst; - uint32_t vpg_inst; - uint32_t apg_inst; - - ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); - hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; - - /* Mapping of VPG register blocks to HPO DP block instance: - * VPG[6] -> HPO_DP[0] - * VPG[7] -> HPO_DP[1] - * VPG[8] -> HPO_DP[2] - * VPG[9] -> HPO_DP[3] - */ - vpg_inst = hpo_dp_inst + 6; - - /* Mapping of APG register blocks to HPO DP block instance: - * APG[0] -> HPO_DP[0] - * APG[1] -> HPO_DP[1] - * APG[2] -> HPO_DP[2] - * APG[3] -> HPO_DP[3] - */ - apg_inst = hpo_dp_inst; - - /* allocate HPO stream encoder and create VPG sub-block */ - hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); - vpg = dcn31_vpg_create(ctx, vpg_inst); - apg = dcn31_apg_create(ctx, apg_inst); - - if (!hpo_dp_enc31 || !vpg || !apg) { - kfree(hpo_dp_enc31); - kfree(vpg); - kfree(apg); - return NULL; - } - - dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, - hpo_dp_inst, eng_id, vpg, apg, - &hpo_dp_stream_enc_regs[hpo_dp_inst], - &hpo_dp_se_shift, &hpo_dp_se_mask); - - return &hpo_dp_enc31->base; -} - -static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( - uint8_t inst, - struct dc_context *ctx) -{ - struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; - - /* allocate HPO link encoder */ - hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); - - hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, - &hpo_dp_link_enc_regs[inst], - &hpo_dp_le_shift, &hpo_dp_le_mask); - - return &hpo_dp_enc31->base; -} - -static struct dce_hwseq *dcn31_hwseq_create( - struct dc_context *ctx) -{ - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); - - if (hws) { - hws->ctx = ctx; - hws->regs = &hwseq_reg; - hws->shifts = &hwseq_shift; - hws->masks = &hwseq_mask; - } - return hws; -} -static const struct resource_create_funcs res_create_funcs = { - .read_dce_straps = read_dce_straps, - .create_audio = dcn31_create_audio, - .create_stream_encoder = dcn31_stream_encoder_create, - .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, - .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, - .create_hwseq = dcn31_hwseq_create, -}; - -static void dcn31_resource_destruct(struct dcn31_resource_pool *pool) -{ - unsigned int i; - - for (i = 0; i < pool->base.stream_enc_count; i++) { - if (pool->base.stream_enc[i] != NULL) { - if (pool->base.stream_enc[i]->vpg != NULL) { - kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); - pool->base.stream_enc[i]->vpg = NULL; - } - if (pool->base.stream_enc[i]->afmt != NULL) { - kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); - pool->base.stream_enc[i]->afmt = NULL; - } - kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); - pool->base.stream_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { - if (pool->base.hpo_dp_stream_enc[i] != NULL) { - if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { - kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); - pool->base.hpo_dp_stream_enc[i]->vpg = NULL; - } - if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { - kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); - pool->base.hpo_dp_stream_enc[i]->apg = NULL; - } - kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); - pool->base.hpo_dp_stream_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { - if (pool->base.hpo_dp_link_enc[i] != NULL) { - kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); - pool->base.hpo_dp_link_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_dsc; i++) { - if (pool->base.dscs[i] != NULL) - dcn20_dsc_destroy(&pool->base.dscs[i]); - } - - if (pool->base.mpc != NULL) { - kfree(TO_DCN20_MPC(pool->base.mpc)); - pool->base.mpc = NULL; - } - if (pool->base.hubbub != NULL) { - kfree(pool->base.hubbub); - pool->base.hubbub = NULL; - } - for (i = 0; i < pool->base.pipe_count; i++) { - if (pool->base.dpps[i] != NULL) - dcn31_dpp_destroy(&pool->base.dpps[i]); - - if (pool->base.ipps[i] != NULL) - pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); - - if (pool->base.hubps[i] != NULL) { - kfree(TO_DCN20_HUBP(pool->base.hubps[i])); - pool->base.hubps[i] = NULL; - } - - if (pool->base.irqs != NULL) { - dal_irq_service_destroy(&pool->base.irqs); - } - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - if (pool->base.engines[i] != NULL) - dce110_engine_destroy(&pool->base.engines[i]); - if (pool->base.hw_i2cs[i] != NULL) { - kfree(pool->base.hw_i2cs[i]); - pool->base.hw_i2cs[i] = NULL; - } - if (pool->base.sw_i2cs[i] != NULL) { - kfree(pool->base.sw_i2cs[i]); - pool->base.sw_i2cs[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - if (pool->base.opps[i] != NULL) - pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - if (pool->base.timing_generators[i] != NULL) { - kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); - pool->base.timing_generators[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_dwb; i++) { - if (pool->base.dwbc[i] != NULL) { - kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); - pool->base.dwbc[i] = NULL; - } - if (pool->base.mcif_wb[i] != NULL) { - kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); - pool->base.mcif_wb[i] = NULL; - } - } - - for (i = 0; i < pool->base.audio_count; i++) { - if (pool->base.audios[i]) - dce_aud_destroy(&pool->base.audios[i]); - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] != NULL) { - dcn20_clock_source_destroy(&pool->base.clock_sources[i]); - pool->base.clock_sources[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { - if (pool->base.mpc_lut[i] != NULL) { - dc_3dlut_func_release(pool->base.mpc_lut[i]); - pool->base.mpc_lut[i] = NULL; - } - if (pool->base.mpc_shaper[i] != NULL) { - dc_transfer_func_release(pool->base.mpc_shaper[i]); - pool->base.mpc_shaper[i] = NULL; - } - } - - if (pool->base.dp_clock_source != NULL) { - dcn20_clock_source_destroy(&pool->base.dp_clock_source); - pool->base.dp_clock_source = NULL; - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - if (pool->base.multiple_abms[i] != NULL) - dce_abm_destroy(&pool->base.multiple_abms[i]); - } - - if (pool->base.psr != NULL) - dmub_psr_destroy(&pool->base.psr); - - if (pool->base.replay != NULL) - dmub_replay_destroy(&pool->base.replay); - - if (pool->base.dccg != NULL) - dcn_dccg_destroy(&pool->base.dccg); -} - -static struct hubp *dcn31_hubp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn20_hubp *hubp2 = - kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); - - if (!hubp2) - return NULL; - - if (hubp31_construct(hubp2, ctx, inst, - &hubp_regs[inst], &hubp_shift, &hubp_mask)) - return &hubp2->base; - - BREAK_TO_DEBUGGER(); - kfree(hubp2); - return NULL; -} - -static bool dcn31_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t pipe_count = pool->res_cap->num_dwb; - - for (i = 0; i < pipe_count; i++) { - struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), - GFP_KERNEL); - - if (!dwbc30) { - dm_error("DC: failed to create dwbc30!\n"); - return false; - } - - dcn30_dwbc_construct(dwbc30, ctx, - &dwbc30_regs[i], - &dwbc30_shift, - &dwbc30_mask, - i); - - pool->dwbc[i] = &dwbc30->base; - } - return true; -} - -static bool dcn31_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t pipe_count = pool->res_cap->num_dwb; - - for (i = 0; i < pipe_count; i++) { - struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), - GFP_KERNEL); - - if (!mcif_wb30) { - dm_error("DC: failed to create mcif_wb30!\n"); - return false; - } - - dcn30_mmhubbub_construct(mcif_wb30, ctx, - &mcif_wb30_regs[i], - &mcif_wb30_shift, - &mcif_wb30_mask, - i); - - pool->mcif_wb[i] = &mcif_wb30->base; - } - return true; -} - -static struct display_stream_compressor *dcn31_dsc_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_dsc *dsc = - kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); - - if (!dsc) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); - return &dsc->base; -} - -static void dcn31_destroy_resource_pool(struct resource_pool **pool) -{ - struct dcn31_resource_pool *dcn31_pool = TO_DCN31_RES_POOL(*pool); - - dcn31_resource_destruct(dcn31_pool); - kfree(dcn31_pool); - *pool = NULL; -} - -static struct clock_source *dcn31_clock_source_create( - struct dc_context *ctx, - struct dc_bios *bios, - enum clock_source_id id, - const struct dce110_clk_src_regs *regs, - bool dp_clk_src) -{ - struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); - - if (!clk_src) - return NULL; - - if (dcn3_clk_src_construct(clk_src, ctx, bios, id, - regs, &cs_shift, &cs_mask)) { - clk_src->base.dp_clk_src = dp_clk_src; - return &clk_src->base; - } - - kfree(clk_src); - BREAK_TO_DEBUGGER(); - return NULL; -} - -static bool is_dual_plane(enum surface_pixel_format format) -{ - return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; -} - -int dcn31x_populate_dml_pipes_from_context(struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - bool fast_validate) -{ - uint32_t pipe_cnt; - int i; - - dc_assert_fp_enabled(); - - pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); - - for (i = 0; i < pipe_cnt; i++) { - pipes[i].pipe.src.gpuvm = 1; - if (dc->debug.dml_hostvm_override == DML_HOSTVM_NO_OVERRIDE) { - //pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active; - pipes[i].pipe.src.hostvm = dc->vm_pa_config.is_hvm_enabled; - } else if (dc->debug.dml_hostvm_override == DML_HOSTVM_OVERRIDE_FALSE) - pipes[i].pipe.src.hostvm = false; - else if (dc->debug.dml_hostvm_override == DML_HOSTVM_OVERRIDE_TRUE) - pipes[i].pipe.src.hostvm = true; - } - return pipe_cnt; -} - -int dcn31_populate_dml_pipes_from_context( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - bool fast_validate) -{ - int i, pipe_cnt; - struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; - bool upscaled = false; - - DC_FP_START(); - dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); - DC_FP_END(); - - for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { - struct dc_crtc_timing *timing; - - if (!res_ctx->pipe_ctx[i].stream) - continue; - pipe = &res_ctx->pipe_ctx[i]; - timing = &pipe->stream->timing; - if (pipe->plane_state && - (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height || - pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width)) - upscaled = true; - - /* - * Immediate flip can be set dynamically after enabling the plane. - * We need to require support for immediate flip or underflow can be - * intermittently experienced depending on peak b/w requirements. - */ - pipes[pipe_cnt].pipe.src.immediate_flip = true; - pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; - pipes[pipe_cnt].pipe.src.gpuvm = true; - pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; - pipes[pipe_cnt].pipe.src.dcc_rate = 3; - pipes[pipe_cnt].dout.dsc_input_bpc = 0; - DC_FP_START(); - dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt); - DC_FP_END(); - - - if (pipes[pipe_cnt].dout.dsc_enable) { - switch (timing->display_color_depth) { - case COLOR_DEPTH_888: - pipes[pipe_cnt].dout.dsc_input_bpc = 8; - break; - case COLOR_DEPTH_101010: - pipes[pipe_cnt].dout.dsc_input_bpc = 10; - break; - case COLOR_DEPTH_121212: - pipes[pipe_cnt].dout.dsc_input_bpc = 12; - break; - default: - ASSERT(0); - break; - } - } - - pipe_cnt++; - } - context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_1_DEFAULT_DET_SIZE; - dc->config.enable_4to1MPC = false; - if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { - if (is_dual_plane(pipe->plane_state->format) - && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) { - dc->config.enable_4to1MPC = true; - } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) { - /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */ - context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; - pipes[0].pipe.src.unbounded_req_mode = true; - } - } else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count - && dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) { - context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64; - } else if (context->stream_count >= 3 && upscaled) { - context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; - } - - return pipe_cnt; -} - -void dcn31_calculate_wm_and_dlg( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int pipe_cnt, - int vlevel) -{ - DC_FP_START(); - dcn31_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel); - DC_FP_END(); -} - -void -dcn31_populate_dml_writeback_from_context(struct dc *dc, - struct resource_context *res_ctx, - display_e2e_pipe_params_st *pipes) -{ - DC_FP_START(); - dcn30_populate_dml_writeback_from_context(dc, res_ctx, pipes); - DC_FP_END(); -} - -void -dcn31_set_mcif_arb_params(struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int pipe_cnt) -{ - DC_FP_START(); - dcn30_set_mcif_arb_params(dc, context, pipes, pipe_cnt); - DC_FP_END(); -} - -bool dcn31_validate_bandwidth(struct dc *dc, - struct dc_state *context, - bool fast_validate) -{ - bool out = false; - - BW_VAL_TRACE_SETUP(); - - int vlevel = 0; - int pipe_cnt = 0; - display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); - DC_LOGGER_INIT(dc->ctx->logger); - - BW_VAL_TRACE_COUNT(); - - DC_FP_START(); - out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true); - DC_FP_END(); - - // Disable fast_validate to set min dcfclk in alculate_wm_and_dlg - if (pipe_cnt == 0) - fast_validate = false; - - if (!out) - goto validate_fail; - - BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - - if (fast_validate) { - BW_VAL_TRACE_SKIP(fast); - goto validate_out; - } - if (dc->res_pool->funcs->calculate_wm_and_dlg) - dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); - - BW_VAL_TRACE_END_WATERMARKS(); - - goto validate_out; - -validate_fail: - DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", - dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); - - BW_VAL_TRACE_SKIP(fail); - out = false; - -validate_out: - kfree(pipes); - - BW_VAL_TRACE_FINISH(); - - return out; -} - -static void dcn31_get_panel_config_defaults(struct dc_panel_config *panel_config) -{ - *panel_config = panel_config_defaults; -} - -static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn20_get_dcc_compression_cap -}; - -static struct resource_funcs dcn31_res_pool_funcs = { - .destroy = dcn31_destroy_resource_pool, - .link_enc_create = dcn31_link_encoder_create, - .link_enc_create_minimal = dcn31_link_enc_create_minimal, - .link_encs_assign = link_enc_cfg_link_encs_assign, - .link_enc_unassign = link_enc_cfg_link_enc_unassign, - .panel_cntl_create = dcn31_panel_cntl_create, - .validate_bandwidth = dcn31_validate_bandwidth, - .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, - .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, - .populate_dml_pipes = dcn31_populate_dml_pipes_from_context, - .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, - .release_pipe = dcn20_release_pipe, - .add_stream_to_ctx = dcn30_add_stream_to_ctx, - .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, - .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, - .populate_dml_writeback_from_context = dcn31_populate_dml_writeback_from_context, - .set_mcif_arb_params = dcn31_set_mcif_arb_params, - .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, - .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, - .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, - .update_bw_bounding_box = dcn31_update_bw_bounding_box, - .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, - .get_panel_config_defaults = dcn31_get_panel_config_defaults, -}; - -static struct clock_source *dcn30_clock_source_create( - struct dc_context *ctx, - struct dc_bios *bios, - enum clock_source_id id, - const struct dce110_clk_src_regs *regs, - bool dp_clk_src) -{ - struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); - - if (!clk_src) - return NULL; - - if (dcn31_clk_src_construct(clk_src, ctx, bios, id, - regs, &cs_shift, &cs_mask)) { - clk_src->base.dp_clk_src = dp_clk_src; - return &clk_src->base; - } - - BREAK_TO_DEBUGGER(); - return NULL; -} - -static bool dcn31_resource_construct( - uint8_t num_virtual_links, - struct dc *dc, - struct dcn31_resource_pool *pool) -{ - int i; - struct dc_context *ctx = dc->ctx; - struct irq_service_init_data init_data; - - ctx->dc_bios->regs = &bios_regs; - - pool->base.res_cap = &res_cap_dcn31; - - pool->base.funcs = &dcn31_res_pool_funcs; - - /************************************************* - * Resource + asic cap harcoding * - *************************************************/ - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - pool->base.pipe_count = pool->base.res_cap->num_timing_generator; - pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; - dc->caps.max_downscale_ratio = 600; - dc->caps.i2c_speed_in_khz = 100; - dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a applied by default*/ - dc->caps.max_cursor_size = 256; - dc->caps.min_horizontal_blanking_period = 80; - dc->caps.dmdata_alloc_size = 2048; - - dc->caps.max_slave_planes = 2; - dc->caps.max_slave_yuv_planes = 2; - dc->caps.max_slave_rgb_planes = 2; - dc->caps.post_blend_color_processing = true; - dc->caps.force_dp_tps4_for_cp2520 = true; - if (dc->config.forceHBR2CP2520) - dc->caps.force_dp_tps4_for_cp2520 = false; - dc->caps.dp_hpo = true; - dc->caps.dp_hdmi21_pcon_support = true; - dc->caps.edp_dsc_support = true; - dc->caps.extended_aux_timeout_support = true; - dc->caps.dmcub_support = true; - dc->caps.is_apu = true; - dc->caps.zstate_support = true; - - /* Color pipeline capabilities */ - dc->caps.color.dpp.dcn_arch = 1; - dc->caps.color.dpp.input_lut_shared = 0; - dc->caps.color.dpp.icsc = 1; - dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr - dc->caps.color.dpp.dgam_rom_caps.srgb = 1; - dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; - dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; - dc->caps.color.dpp.dgam_rom_caps.pq = 1; - dc->caps.color.dpp.dgam_rom_caps.hlg = 1; - dc->caps.color.dpp.post_csc = 1; - dc->caps.color.dpp.gamma_corr = 1; - dc->caps.color.dpp.dgam_rom_for_yuv = 0; - - dc->caps.color.dpp.hw_3d_lut = 1; - dc->caps.color.dpp.ogam_ram = 1; - // no OGAM ROM on DCN301 - dc->caps.color.dpp.ogam_rom_caps.srgb = 0; - dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; - dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.dpp.ogam_rom_caps.pq = 0; - dc->caps.color.dpp.ogam_rom_caps.hlg = 0; - dc->caps.color.dpp.ocsc = 0; - - dc->caps.color.mpc.gamut_remap = 1; - dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2 - dc->caps.color.mpc.ogam_ram = 1; - dc->caps.color.mpc.ogam_rom_caps.srgb = 0; - dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; - dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.mpc.ogam_rom_caps.pq = 0; - dc->caps.color.mpc.ogam_rom_caps.hlg = 0; - dc->caps.color.mpc.ocsc = 1; - - dc->config.use_old_fixed_vs_sequence = true; - - /* Use pipe context based otg sync logic */ - dc->config.use_pipe_ctx_sync_logic = true; - - /* read VBIOS LTTPR caps */ - { - if (ctx->dc_bios->funcs->get_lttpr_caps) { - enum bp_result bp_query_result; - uint8_t is_vbios_lttpr_enable = 0; - - bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); - dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; - } - - /* interop bit is implicit */ - { - dc->caps.vbios_lttpr_aware = true; - } - } - - if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) - dc->debug = debug_defaults_drv; - - // Init the vm_helper - if (dc->vm_helper) - vm_helper_init(dc->vm_helper, 16); - - /************************************************* - * Create resources * - *************************************************/ - - /* Clock Sources for Pixel Clock*/ - pool->base.clock_sources[DCN31_CLK_SRC_PLL0] = - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL0, - &clk_src_regs[0], false); - pool->base.clock_sources[DCN31_CLK_SRC_PLL1] = - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL1, - &clk_src_regs[1], false); - /*move phypllx_pixclk_resync to dmub next*/ - if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { - pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL2, - &clk_src_regs_b0[2], false); - pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL3, - &clk_src_regs_b0[3], false); - } else { - pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL2, - &clk_src_regs[2], false); - pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL3, - &clk_src_regs[3], false); - } - - pool->base.clock_sources[DCN31_CLK_SRC_PLL4] = - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL4, - &clk_src_regs[4], false); - - pool->base.clk_src_count = DCN30_CLK_SRC_TOTAL; - - /* todo: not reuse phy_pll registers */ - pool->base.dp_clock_source = - dcn31_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_ID_DP_DTO, - &clk_src_regs[0], true); - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - - /* TODO: DCCG */ - pool->base.dccg = dccg31_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); - if (pool->base.dccg == NULL) { - dm_error("DC: failed to create dccg!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* TODO: IRQ */ - init_data.ctx = dc->ctx; - pool->base.irqs = dal_irq_service_dcn31_create(&init_data); - if (!pool->base.irqs) - goto create_fail; - - /* HUBBUB */ - pool->base.hubbub = dcn31_hubbub_create(ctx); - if (pool->base.hubbub == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create hubbub!\n"); - goto create_fail; - } - - /* HUBPs, DPPs, OPPs and TGs */ - for (i = 0; i < pool->base.pipe_count; i++) { - pool->base.hubps[i] = dcn31_hubp_create(ctx, i); - if (pool->base.hubps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create hubps!\n"); - goto create_fail; - } - - pool->base.dpps[i] = dcn31_dpp_create(ctx, i); - if (pool->base.dpps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create dpps!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - pool->base.opps[i] = dcn31_opp_create(ctx, i); - if (pool->base.opps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create output pixel processor!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - pool->base.timing_generators[i] = dcn31_timing_generator_create( - ctx, i); - if (pool->base.timing_generators[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto create_fail; - } - } - pool->base.timing_generator_count = i; - - /* PSR */ - pool->base.psr = dmub_psr_create(ctx); - if (pool->base.psr == NULL) { - dm_error("DC: failed to create psr obj!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* Replay */ - pool->base.replay = dmub_replay_create(ctx); - if (pool->base.replay == NULL) { - dm_error("DC: failed to create replay obj!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* ABM */ - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - pool->base.multiple_abms[i] = dmub_abm_create(ctx, - &abm_regs[i], - &abm_shift, - &abm_mask); - if (pool->base.multiple_abms[i] == NULL) { - dm_error("DC: failed to create abm for pipe %d!\n", i); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - - /* MPC and DSC */ - pool->base.mpc = dcn31_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut); - if (pool->base.mpc == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mpc!\n"); - goto create_fail; - } - - for (i = 0; i < pool->base.res_cap->num_dsc; i++) { - pool->base.dscs[i] = dcn31_dsc_create(ctx, i); - if (pool->base.dscs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create display stream compressor %d!\n", i); - goto create_fail; - } - } - - /* DWB and MMHUBBUB */ - if (!dcn31_dwbc_create(ctx, &pool->base)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create dwbc!\n"); - goto create_fail; - } - - if (!dcn31_mmhubbub_create(ctx, &pool->base)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mcif_wb!\n"); - goto create_fail; - } - - /* AUX and I2C */ - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - pool->base.engines[i] = dcn31_aux_engine_create(ctx, i); - if (pool->base.engines[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create aux engine!!\n"); - goto create_fail; - } - pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i); - if (pool->base.hw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create hw i2c!!\n"); - goto create_fail; - } - pool->base.sw_i2cs[i] = NULL; - } - - if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && - dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && - !dc->debug.dpia_debug.bits.disable_dpia) { - /* YELLOW CARP B0 has 4 DPIA's */ - pool->base.usb4_dpia_count = 4; - } - - if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1) - pool->base.usb4_dpia_count = 4; - - /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ - if (!resource_construct(num_virtual_links, dc, &pool->base, - &res_create_funcs)) - goto create_fail; - - /* HW Sequencer and Plane caps */ - dcn31_hw_sequencer_construct(dc); - - dc->caps.max_planes = pool->base.pipe_count; - - for (i = 0; i < dc->caps.max_planes; ++i) - dc->caps.planes[i] = plane_cap; - - dc->cap_funcs = cap_funcs; - - dc->dcn_ip->max_num_dpp = dcn3_1_ip.max_num_dpp; - - return true; - -create_fail: - dcn31_resource_destruct(pool); - - return false; -} - -struct resource_pool *dcn31_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc) -{ - struct dcn31_resource_pool *pool = - kzalloc(sizeof(struct dcn31_resource_pool), GFP_KERNEL); - - if (!pool) - return NULL; - - if (dcn31_resource_construct(init_data->num_virtual_links, dc, pool)) - return &pool->base; - - BREAK_TO_DEBUGGER(); - kfree(pool); - return NULL; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h deleted file mode 100644 index 901436591e..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef _DCN31_RESOURCE_H_ -#define _DCN31_RESOURCE_H_ - -#include "core_types.h" - -#define TO_DCN31_RES_POOL(pool)\ - container_of(pool, struct dcn31_resource_pool, base) - -extern struct _vcs_dpi_ip_params_st dcn3_1_ip; - -struct dcn31_resource_pool { - struct resource_pool base; -}; - -bool dcn31_validate_bandwidth(struct dc *dc, - struct dc_state *context, - bool fast_validate); -void dcn31_calculate_wm_and_dlg( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int pipe_cnt, - int vlevel); -int dcn31_populate_dml_pipes_from_context( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - bool fast_validate); -void -dcn31_populate_dml_writeback_from_context(struct dc *dc, - struct resource_context *res_ctx, - display_e2e_pipe_params_st *pipes); -void -dcn31_set_mcif_arb_params(struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int pipe_cnt); - -struct resource_pool *dcn31_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc); - -/*temp: B0 specific before switch to dcn313 headers*/ -#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL -#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e -#define regPHYPLLF_PIXCLK_RESYNC_CNTL_BASE_IDX 1 -#define regPHYPLLG_PIXCLK_RESYNC_CNTL 0x005f -#define regPHYPLLG_PIXCLK_RESYNC_CNTL_BASE_IDX 1 - -//PHYPLLF_PIXCLK_RESYNC_CNTL -#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE__SHIFT 0x0 -#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1 -#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4 -#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE__SHIFT 0x8 -#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9 -#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L -#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L -#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L -#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE_MASK 0x00000100L -#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L - -//PHYPLLG_PIXCLK_RESYNC_CNTL -#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE__SHIFT 0x0 -#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1 -#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4 -#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE__SHIFT 0x8 -#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9 -#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L -#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L -#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L -#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE_MASK 0x00000100L -#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L -#endif -#endif /* _DCN31_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile index 72456debb9..b134ab05aa 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile @@ -10,8 +10,7 @@ # # Makefile for dcn314. -DCN314 = dcn314_resource.o dcn314_init.o \ - dcn314_dio_stream_encoder.o dcn314_dccg.o dcn314_optc.o +DCN314 = dcn314_dio_stream_encoder.o dcn314_dccg.o AMD_DAL_DCN314 = $(addprefix $(AMDDALPATH)/dc/dcn314/,$(DCN314)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c deleted file mode 100644 index ccb7e317e8..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c +++ /dev/null @@ -1,163 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright 2022 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dce110/dce110_hwseq.h" -#include "dcn10/dcn10_hwseq.h" -#include "dcn20/dcn20_hwseq.h" -#include "dcn21/dcn21_hwseq.h" -#include "dcn30/dcn30_hwseq.h" -#include "dcn301/dcn301_hwseq.h" -#include "dcn31/dcn31_hwseq.h" -#include "dcn314/dcn314_hwseq.h" - -#include "dcn314_init.h" - -static const struct hw_sequencer_funcs dcn314_funcs = { - .program_gamut_remap = dcn30_program_gamut_remap, - .init_hw = dcn31_init_hw, - .power_down_on_boot = dcn10_power_down_on_boot, - .apply_ctx_to_hw = dce110_apply_ctx_to_hw, - .apply_ctx_for_surface = NULL, - .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, - .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, - .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, - .update_plane_addr = dcn20_update_plane_addr, - .update_dchub = dcn10_update_dchub, - .update_pending_status = dcn10_update_pending_status, - .program_output_csc = dcn20_program_output_csc, - .enable_accelerated_mode = dce110_enable_accelerated_mode, - .enable_timing_synchronization = dcn10_enable_timing_synchronization, - .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, - .update_info_frame = dcn31_update_info_frame, - .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, - .enable_stream = dcn20_enable_stream, - .disable_stream = dce110_disable_stream, - .unblank_stream = dcn20_unblank_stream, - .blank_stream = dce110_blank_stream, - .enable_audio_stream = dce110_enable_audio_stream, - .disable_audio_stream = dce110_disable_audio_stream, - .disable_plane = dcn20_disable_plane, - .disable_pixel_data = dcn20_disable_pixel_data, - .pipe_control_lock = dcn20_pipe_control_lock, - .interdependent_update_lock = dcn10_lock_all_pipes, - .cursor_lock = dcn10_cursor_lock, - .prepare_bandwidth = dcn20_prepare_bandwidth, - .optimize_bandwidth = dcn20_optimize_bandwidth, - .update_bandwidth = dcn20_update_bandwidth, - .set_drr = dcn10_set_drr, - .get_position = dcn10_get_position, - .set_static_screen_control = dcn30_set_static_screen_control, - .setup_stereo = dcn10_setup_stereo, - .set_avmute = dcn30_set_avmute, - .log_hw_state = dcn10_log_hw_state, - .get_hw_state = dcn10_get_hw_state, - .clear_status_bits = dcn10_clear_status_bits, - .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, - .edp_backlight_control = dce110_edp_backlight_control, - .edp_power_control = dce110_edp_power_control, - .edp_wait_for_T12 = dce110_edp_wait_for_T12, - .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, - .set_cursor_position = dcn10_set_cursor_position, - .set_cursor_attribute = dcn10_set_cursor_attribute, - .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, - .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, - .set_clock = dcn10_set_clock, - .get_clock = dcn10_get_clock, - .program_triplebuffer = dcn20_program_triple_buffer, - .enable_writeback = dcn30_enable_writeback, - .disable_writeback = dcn30_disable_writeback, - .update_writeback = dcn30_update_writeback, - .mmhubbub_warmup = dcn30_mmhubbub_warmup, - .dmdata_status_done = dcn20_dmdata_status_done, - .program_dmdata_engine = dcn30_program_dmdata_engine, - .set_dmdata_attributes = dcn20_set_dmdata_attributes, - .init_sys_ctx = dcn31_init_sys_ctx, - .init_vm_ctx = dcn20_init_vm_ctx, - .set_flip_control_gsl = dcn20_set_flip_control_gsl, - .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, - .calc_vupdate_position = dcn10_calc_vupdate_position, - .power_down = dce110_power_down, - .set_backlight_level = dcn21_set_backlight_level, - .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, - .set_pipe = dcn21_set_pipe, - .enable_lvds_link_output = dce110_enable_lvds_link_output, - .enable_tmds_link_output = dce110_enable_tmds_link_output, - .enable_dp_link_output = dce110_enable_dp_link_output, - .disable_link_output = dcn314_disable_link_output, - .z10_restore = dcn31_z10_restore, - .z10_save_init = dcn31_z10_save_init, - .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, - .optimize_pwr_state = dcn21_optimize_pwr_state, - .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, - .update_visual_confirm_color = dcn10_update_visual_confirm_color, -}; - -static const struct hwseq_private_funcs dcn314_private_funcs = { - .init_pipes = dcn10_init_pipes, - .update_plane_addr = dcn20_update_plane_addr, - .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, - .update_mpcc = dcn20_update_mpcc, - .set_input_transfer_func = dcn30_set_input_transfer_func, - .set_output_transfer_func = dcn30_set_output_transfer_func, - .power_down = dce110_power_down, - .enable_display_power_gating = dcn10_dummy_display_power_gating, - .blank_pixel_data = dcn20_blank_pixel_data, - .reset_hw_ctx_wrap = dcn31_reset_hw_ctx_wrap, - .enable_stream_timing = dcn20_enable_stream_timing, - .edp_backlight_control = dce110_edp_backlight_control, - .disable_stream_gating = dcn20_disable_stream_gating, - .enable_stream_gating = dcn20_enable_stream_gating, - .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, - .did_underflow_occur = dcn10_did_underflow_occur, - .init_blank = dcn20_init_blank, - .disable_vga = dcn20_disable_vga, - .bios_golden_init = dcn10_bios_golden_init, - .plane_atomic_disable = dcn20_plane_atomic_disable, - .plane_atomic_power_down = dcn10_plane_atomic_power_down, - .enable_power_gating_plane = dcn314_enable_power_gating_plane, - .dpp_root_clock_control = dcn314_dpp_root_clock_control, - .hubp_pg_control = dcn31_hubp_pg_control, - .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, - .update_odm = dcn314_update_odm, - .dsc_pg_control = dcn314_dsc_pg_control, - .set_hdr_multiplier = dcn10_set_hdr_multiplier, - .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, - .wait_for_blank_complete = dcn20_wait_for_blank_complete, - .dccg_init = dcn20_dccg_init, - .set_blend_lut = dcn30_set_blend_lut, - .set_shaper_3dlut = dcn20_set_shaper_3dlut, - .setup_hpo_hw_control = dcn31_setup_hpo_hw_control, - .calculate_dccg_k1_k2_values = dcn314_calculate_dccg_k1_k2_values, - .set_pixels_per_cycle = dcn314_set_pixels_per_cycle, - .resync_fifo_dccg_dio = dcn314_resync_fifo_dccg_dio, -}; - -void dcn314_hw_sequencer_construct(struct dc *dc) -{ - dc->hwss = dcn314_funcs; - dc->hwseq->funcs = dcn314_private_funcs; - -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.h deleted file mode 100644 index 8f92e66577..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.h +++ /dev/null @@ -1,34 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright 2022 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_DCN314_INIT_H__ -#define __DC_DCN314_INIT_H__ - -struct dc; - -void dcn314_hw_sequencer_construct(struct dc *dc); - -#endif /* __DC_DCN314_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c deleted file mode 100644 index 0086cafb0f..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c +++ /dev/null @@ -1,273 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright 2022 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dcn314_optc.h" - -#include "dcn30/dcn30_optc.h" -#include "dcn31/dcn31_optc.h" -#include "reg_helper.h" -#include "dc.h" -#include "dcn_calc_math.h" - -#define REG(reg)\ - optc1->tg_regs->reg - -#define CTX \ - optc1->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - optc1->tg_shift->field_name, optc1->tg_mask->field_name - -/* - * Enable CRTC - * Enable CRTC - call ASIC Control Object to enable Timing generator. - */ - -static void optc314_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, - struct dc_crtc_timing *timing) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - uint32_t memory_mask = 0; - int h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right; - int mpcc_hactive = h_active / opp_cnt; - /* Each memory instance is 2048x(314x2) bits to support half line of 4096 */ - int odm_mem_count = (h_active + 2047) / 2048; - - /* - * display <= 4k : 2 memories + 2 pipes - * 4k < display <= 8k : 4 memories + 2 pipes - * 8k < display <= 12k : 6 memories + 4 pipes - */ - if (opp_cnt == 4) { - if (odm_mem_count <= 2) - memory_mask = 0x3; - else if (odm_mem_count <= 4) - memory_mask = 0xf; - else - memory_mask = 0x3f; - } else { - if (odm_mem_count <= 2) - memory_mask = 0x1 << (opp_id[0] * 2) | 0x1 << (opp_id[1] * 2); - else if (odm_mem_count <= 4) - memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2); - else - memory_mask = 0x77; - } - - REG_SET(OPTC_MEMORY_CONFIG, 0, - OPTC_MEM_SEL, memory_mask); - - if (opp_cnt == 2) { - REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0, - OPTC_NUM_OF_INPUT_SEGMENT, 1, - OPTC_SEG0_SRC_SEL, opp_id[0], - OPTC_SEG1_SRC_SEL, opp_id[1]); - } else if (opp_cnt == 4) { - REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, - OPTC_NUM_OF_INPUT_SEGMENT, 3, - OPTC_SEG0_SRC_SEL, opp_id[0], - OPTC_SEG1_SRC_SEL, opp_id[1], - OPTC_SEG2_SRC_SEL, opp_id[2], - OPTC_SEG3_SRC_SEL, opp_id[3]); - } - - REG_UPDATE(OPTC_WIDTH_CONTROL, - OPTC_SEGMENT_WIDTH, mpcc_hactive); - - REG_UPDATE(OTG_H_TIMING_CNTL, - OTG_H_TIMING_DIV_MODE, opp_cnt - 1); - optc1->opp_count = opp_cnt; -} - -static bool optc314_enable_crtc(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - /* opp instance for OTG, 1 to 1 mapping and odm will adjust */ - REG_UPDATE(OPTC_DATA_SOURCE_SELECT, - OPTC_SEG0_SRC_SEL, optc->inst); - - /* VTG enable first is for HW workaround */ - REG_UPDATE(CONTROL, - VTG0_ENABLE, 1); - - REG_SEQ_START(); - - /* Enable CRTC */ - REG_UPDATE_2(OTG_CONTROL, - OTG_DISABLE_POINT_CNTL, 2, - OTG_MASTER_EN, 1); - - REG_SEQ_SUBMIT(); - REG_SEQ_WAIT_DONE(); - - return true; -} - -/* disable_crtc */ -static bool optc314_disable_crtc(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - /* disable otg request until end of the first line - * in the vertical blank region - */ - REG_UPDATE(OTG_CONTROL, - OTG_MASTER_EN, 0); - - REG_UPDATE(CONTROL, - VTG0_ENABLE, 0); - - /* CRTC disabled, so disable clock. */ - REG_WAIT(OTG_CLOCK_CONTROL, - OTG_BUSY, 0, - 1, 100000); - - return true; -} - -static void optc314_phantom_crtc_post_enable(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - /* Disable immediately. */ - REG_UPDATE_2(OTG_CONTROL, OTG_DISABLE_POINT_CNTL, 0, OTG_MASTER_EN, 0); - - /* CRTC disabled, so disable clock. */ - REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000); -} - -static void optc314_set_odm_bypass(struct timing_generator *optc, - const struct dc_crtc_timing *dc_crtc_timing) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - enum h_timing_div_mode h_div = H_TIMING_NO_DIV; - - REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, - OPTC_NUM_OF_INPUT_SEGMENT, 0, - OPTC_SEG0_SRC_SEL, optc->inst, - OPTC_SEG1_SRC_SEL, 0xf, - OPTC_SEG2_SRC_SEL, 0xf, - OPTC_SEG3_SRC_SEL, 0xf - ); - - h_div = optc1_is_two_pixels_per_containter(dc_crtc_timing); - REG_UPDATE(OTG_H_TIMING_CNTL, - OTG_H_TIMING_DIV_MODE, h_div); - - REG_SET(OPTC_MEMORY_CONFIG, 0, - OPTC_MEM_SEL, 0); - optc1->opp_count = 1; -} - -static void optc314_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_UPDATE(OTG_H_TIMING_CNTL, - OTG_H_TIMING_DIV_MODE_MANUAL, manual_mode ? 1 : 0); -} - - -static struct timing_generator_funcs dcn314_tg_funcs = { - .validate_timing = optc1_validate_timing, - .program_timing = optc1_program_timing, - .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, - .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, - .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, - .program_global_sync = optc1_program_global_sync, - .enable_crtc = optc314_enable_crtc, - .disable_crtc = optc314_disable_crtc, - .immediate_disable_crtc = optc31_immediate_disable_crtc, - .phantom_crtc_post_enable = optc314_phantom_crtc_post_enable, - /* used by enable_timing_synchronization. Not need for FPGA */ - .is_counter_moving = optc1_is_counter_moving, - .get_position = optc1_get_position, - .get_frame_count = optc1_get_vblank_counter, - .get_scanoutpos = optc1_get_crtc_scanoutpos, - .get_otg_active_size = optc1_get_otg_active_size, - .set_early_control = optc1_set_early_control, - /* used by enable_timing_synchronization. Not need for FPGA */ - .wait_for_state = optc1_wait_for_state, - .set_blank_color = optc3_program_blank_color, - .did_triggered_reset_occur = optc1_did_triggered_reset_occur, - .triplebuffer_lock = optc3_triplebuffer_lock, - .triplebuffer_unlock = optc2_triplebuffer_unlock, - .enable_reset_trigger = optc1_enable_reset_trigger, - .enable_crtc_reset = optc1_enable_crtc_reset, - .disable_reset_trigger = optc1_disable_reset_trigger, - .lock = optc3_lock, - .unlock = optc1_unlock, - .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, - .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, - .enable_optc_clock = optc1_enable_optc_clock, - .set_drr = optc31_set_drr, - .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, - .set_vtotal_min_max = optc1_set_vtotal_min_max, - .set_static_screen_control = optc1_set_static_screen_control, - .program_stereo = optc1_program_stereo, - .is_stereo_left_eye = optc1_is_stereo_left_eye, - .tg_init = optc3_tg_init, - .is_tg_enabled = optc1_is_tg_enabled, - .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, - .clear_optc_underflow = optc1_clear_optc_underflow, - .setup_global_swap_lock = NULL, - .get_crc = optc1_get_crc, - .configure_crc = optc1_configure_crc, - .set_dsc_config = optc3_set_dsc_config, - .get_dsc_status = optc2_get_dsc_status, - .set_dwb_source = NULL, - .get_optc_source = optc2_get_optc_source, - .set_out_mux = optc3_set_out_mux, - .set_drr_trigger_window = optc3_set_drr_trigger_window, - .set_vtotal_change_limit = optc3_set_vtotal_change_limit, - .set_gsl = optc2_set_gsl, - .set_gsl_source_select = optc2_set_gsl_source_select, - .set_vtg_params = optc1_set_vtg_params, - .program_manual_trigger = optc2_program_manual_trigger, - .setup_manual_trigger = optc2_setup_manual_trigger, - .get_hw_timing = optc1_get_hw_timing, - .init_odm = optc3_init_odm, - .set_odm_bypass = optc314_set_odm_bypass, - .set_odm_combine = optc314_set_odm_combine, - .set_h_timing_div_manual_mode = optc314_set_h_timing_div_manual_mode, -}; - -void dcn314_timing_generator_init(struct optc *optc1) -{ - optc1->base.funcs = &dcn314_tg_funcs; - - optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; - optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; - - optc1->min_h_blank = 32; - optc1->min_v_blank = 3; - optc1->min_v_blank_interlace = 5; - optc1->min_h_sync_width = 4; - optc1->min_v_sync_width = 1; -} - diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.h deleted file mode 100644 index 99c098e761..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.h +++ /dev/null @@ -1,255 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright 2022 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_OPTC_DCN314_H__ -#define __DC_OPTC_DCN314_H__ - -#include "dcn10/dcn10_optc.h" - -#define OPTC_COMMON_REG_LIST_DCN3_14(inst) \ - SRI(OTG_VSTARTUP_PARAM, OTG, inst),\ - SRI(OTG_VUPDATE_PARAM, OTG, inst),\ - SRI(OTG_VREADY_PARAM, OTG, inst),\ - SRI(OTG_MASTER_UPDATE_LOCK, OTG, inst),\ - SRI(OTG_GLOBAL_CONTROL0, OTG, inst),\ - SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\ - SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\ - SRI(OTG_GLOBAL_CONTROL4, OTG, inst),\ - SRI(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst),\ - SRI(OTG_H_TOTAL, OTG, inst),\ - SRI(OTG_H_BLANK_START_END, OTG, inst),\ - SRI(OTG_H_SYNC_A, OTG, inst),\ - SRI(OTG_H_SYNC_A_CNTL, OTG, inst),\ - SRI(OTG_H_TIMING_CNTL, OTG, inst),\ - SRI(OTG_V_TOTAL, OTG, inst),\ - SRI(OTG_V_BLANK_START_END, OTG, inst),\ - SRI(OTG_V_SYNC_A, OTG, inst),\ - SRI(OTG_V_SYNC_A_CNTL, OTG, inst),\ - SRI(OTG_CONTROL, OTG, inst),\ - SRI(OTG_STEREO_CONTROL, OTG, inst),\ - SRI(OTG_3D_STRUCTURE_CONTROL, OTG, inst),\ - SRI(OTG_STEREO_STATUS, OTG, inst),\ - SRI(OTG_V_TOTAL_MAX, OTG, inst),\ - SRI(OTG_V_TOTAL_MIN, OTG, inst),\ - SRI(OTG_V_TOTAL_CONTROL, OTG, inst),\ - SRI(OTG_TRIGA_CNTL, OTG, inst),\ - SRI(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\ - SRI(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\ - SRI(OTG_STATUS_FRAME_COUNT, OTG, inst),\ - SRI(OTG_STATUS, OTG, inst),\ - SRI(OTG_STATUS_POSITION, OTG, inst),\ - SRI(OTG_NOM_VERT_POSITION, OTG, inst),\ - SRI(OTG_M_CONST_DTO0, OTG, inst),\ - SRI(OTG_M_CONST_DTO1, OTG, inst),\ - SRI(OTG_CLOCK_CONTROL, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\ - SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\ - SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\ - SRI(OPTC_DATA_SOURCE_SELECT, ODM, inst),\ - SRI(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\ - SRI(CONTROL, VTG, inst),\ - SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\ - SRI(OTG_GSL_CONTROL, OTG, inst),\ - SRI(OTG_CRC_CNTL, OTG, inst),\ - SRI(OTG_CRC0_DATA_RG, OTG, inst),\ - SRI(OTG_CRC0_DATA_B, OTG, inst),\ - SRI(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\ - SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\ - SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\ - SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst),\ - SR(GSL_SOURCE_SELECT),\ - SRI(OTG_TRIGA_MANUAL_TRIG, OTG, inst),\ - SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\ - SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\ - SRI(OTG_GSL_WINDOW_X, OTG, inst),\ - SRI(OTG_GSL_WINDOW_Y, OTG, inst),\ - SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\ - SRI(OTG_DSC_START_POSITION, OTG, inst),\ - SRI(OTG_DRR_TRIGGER_WINDOW, OTG, inst),\ - SRI(OTG_DRR_V_TOTAL_CHANGE, OTG, inst),\ - SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\ - SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\ - SRI(OPTC_WIDTH_CONTROL, ODM, inst),\ - SRI(OPTC_MEMORY_CONFIG, ODM, inst),\ - SRI(OTG_DRR_CONTROL, OTG, inst) - -#define OPTC_COMMON_MASK_SH_LIST_DCN3_14(mask_sh)\ - SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\ - SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_OFFSET, mask_sh),\ - SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_WIDTH, mask_sh),\ - SF(OTG0_OTG_VREADY_PARAM, VREADY_OFFSET, mask_sh),\ - SF(OTG0_OTG_MASTER_UPDATE_LOCK, OTG_MASTER_UPDATE_LOCK, mask_sh),\ - SF(OTG0_OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_START_X, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_END_X, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_EN, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_START_Y, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_END_Y, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL2, OTG_MASTER_UPDATE_LOCK_SEL, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_X, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_Y, mask_sh),\ - SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\ - SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\ - SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\ - SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\ - SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_START, mask_sh),\ - SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_END, mask_sh),\ - SF(OTG0_OTG_H_SYNC_A_CNTL, OTG_H_SYNC_A_POL, mask_sh),\ - SF(OTG0_OTG_V_TOTAL, OTG_V_TOTAL, mask_sh),\ - SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_START, mask_sh),\ - SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_END, mask_sh),\ - SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_START, mask_sh),\ - SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_END, mask_sh),\ - SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_A_POL, mask_sh),\ - SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_MODE, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_MASTER_EN, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_START_POINT_CNTL, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_OUT_MUX, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EYE_FLAG_POLARITY, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, mask_sh),\ - SF(OTG0_OTG_STEREO_STATUS, OTG_STEREO_CURRENT_EYE, mask_sh),\ - SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_EN, mask_sh),\ - SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_V_UPDATE_MODE, mask_sh),\ - SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_STEREO_SEL_OVR, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_MAX, OTG_V_TOTAL_MAX, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_MIN, OTG_V_TOTAL_MIN, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MIN_SEL, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MAX_SEL, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_FORCE_LOCK_ON_EVENT, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MIN_EN, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MAX_EN, mask_sh),\ - SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_CLEAR, mask_sh),\ - SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_MODE, mask_sh),\ - SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_OCCURRED, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_SELECT, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_PIPE_SELECT, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_RISING_EDGE_DETECT_CNTL, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_POLARITY_SELECT, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FREQUENCY_SELECT, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_DELAY, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_CLEAR, mask_sh),\ - SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_EVENT_MASK, mask_sh),\ - SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_FRAME_COUNT, mask_sh),\ - SF(OTG0_OTG_STATUS_FRAME_COUNT, OTG_FRAME_COUNT, mask_sh),\ - SF(OTG0_OTG_STATUS, OTG_V_BLANK, mask_sh),\ - SF(OTG0_OTG_STATUS, OTG_V_ACTIVE_DISP, mask_sh),\ - SF(OTG0_OTG_STATUS_POSITION, OTG_HORZ_COUNT, mask_sh),\ - SF(OTG0_OTG_STATUS_POSITION, OTG_VERT_COUNT, mask_sh),\ - SF(OTG0_OTG_NOM_VERT_POSITION, OTG_VERT_COUNT_NOM, mask_sh),\ - SF(OTG0_OTG_M_CONST_DTO0, OTG_M_CONST_DTO_PHASE, mask_sh),\ - SF(OTG0_OTG_M_CONST_DTO1, OTG_M_CONST_DTO_MODULO, mask_sh),\ - SF(OTG0_OTG_CLOCK_CONTROL, OTG_BUSY, mask_sh),\ - SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_EN, mask_sh),\ - SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_ON, mask_sh),\ - SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_START, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_END, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT1_POSITION, OTG_VERTICAL_INTERRUPT1_LINE_START, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\ - SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\ - SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_ON, mask_sh),\ - SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\ - SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\ - SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\ - SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\ - SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\ - SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\ - SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED, mask_sh),\ - SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_CLEAR, mask_sh),\ - SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_AUTO_FORCE_VSYNC_MODE, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL0_EN, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL1_EN, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\ - SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\ - SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\ - SF(OTG0_OTG_CRC0_DATA_B, CRC0_B_CB, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_START, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_END, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_START, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_END, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_START, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh),\ - SF(OTG0_OTG_TRIGA_MANUAL_TRIG, OTG_TRIGA_MANUAL_TRIG, mask_sh),\ - SF(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, mask_sh),\ - SF(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, mask_sh),\ - SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL2, MANUAL_FLOW_CONTROL_SEL, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\ - SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\ - SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \ - SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\ - SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\ - SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \ - SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \ - SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \ - SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \ - SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \ - SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG2_SRC_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG3_SRC_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\ - SF(ODM0_OPTC_MEMORY_CONFIG, OPTC_MEM_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, mask_sh),\ - SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\ - SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\ - SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\ - SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_SEGMENT_WIDTH, mask_sh),\ - SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_START_X, mask_sh),\ - SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\ - SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\ - SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\ - SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE_MANUAL, mask_sh),\ - SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\ - SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh) - -void dcn314_timing_generator_init(struct optc *optc1); - -#endif /* __DC_OPTC_DCN314_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c deleted file mode 100644 index c97391edb5..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ /dev/null @@ -1,2180 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright 2022 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - - -#include "dm_services.h" -#include "dc.h" - -#include "dcn31/dcn31_init.h" -#include "dcn314/dcn314_init.h" - -#include "resource.h" -#include "include/irq_service_interface.h" -#include "dcn314_resource.h" - -#include "dcn20/dcn20_resource.h" -#include "dcn30/dcn30_resource.h" -#include "dcn31/dcn31_resource.h" - -#include "dcn10/dcn10_ipp.h" -#include "dcn30/dcn30_hubbub.h" -#include "dcn31/dcn31_hubbub.h" -#include "dcn30/dcn30_mpc.h" -#include "dcn31/dcn31_hubp.h" -#include "irq/dcn31/irq_service_dcn31.h" -#include "irq/dcn314/irq_service_dcn314.h" -#include "dcn30/dcn30_dpp.h" -#include "dcn314/dcn314_optc.h" -#include "dcn20/dcn20_hwseq.h" -#include "dcn30/dcn30_hwseq.h" -#include "dce110/dce110_hwseq.h" -#include "dcn30/dcn30_opp.h" -#include "dcn20/dcn20_dsc.h" -#include "dcn30/dcn30_vpg.h" -#include "dcn30/dcn30_afmt.h" -#include "dcn31/dcn31_dio_link_encoder.h" -#include "dcn314/dcn314_dio_stream_encoder.h" -#include "dcn31/dcn31_hpo_dp_stream_encoder.h" -#include "dcn31/dcn31_hpo_dp_link_encoder.h" -#include "dcn31/dcn31_apg.h" -#include "dcn31/dcn31_vpg.h" -#include "dcn31/dcn31_afmt.h" -#include "dce/dce_clock_source.h" -#include "dce/dce_audio.h" -#include "dce/dce_hwseq.h" -#include "clk_mgr.h" -#include "virtual/virtual_stream_encoder.h" -#include "dce110/dce110_resource.h" -#include "dml/display_mode_vba.h" -#include "dml/dcn31/dcn31_fpu.h" -#include "dml/dcn314/dcn314_fpu.h" -#include "dcn314/dcn314_dccg.h" -#include "dcn10/dcn10_resource.h" -#include "dcn31/dcn31_panel_cntl.h" -#include "dcn314/dcn314_hwseq.h" - -#include "dcn30/dcn30_dwb.h" -#include "dcn30/dcn30_mmhubbub.h" - -#include "dcn/dcn_3_1_4_offset.h" -#include "dcn/dcn_3_1_4_sh_mask.h" -#include "dpcs/dpcs_3_1_4_offset.h" -#include "dpcs/dpcs_3_1_4_sh_mask.h" - -#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10 -#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x01FF0000L - -#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0 -#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL - -#include "reg_helper.h" -#include "dce/dmub_abm.h" -#include "dce/dmub_psr.h" -#include "dce/dmub_replay.h" -#include "dce/dce_aux.h" -#include "dce/dce_i2c.h" -#include "dml/dcn314/display_mode_vba_314.h" -#include "vm_helper.h" -#include "dcn20/dcn20_vmid.h" - -#include "link_enc_cfg.h" - -#define DCN_BASE__INST0_SEG1 0x000000C0 -#define DCN_BASE__INST0_SEG2 0x000034C0 -#define DCN_BASE__INST0_SEG3 0x00009000 - -#define NBIO_BASE__INST0_SEG1 0x00000014 - -#define MAX_INSTANCE 7 -#define MAX_SEGMENT 8 - -#define regBIF_BX2_BIOS_SCRATCH_2 0x003a -#define regBIF_BX2_BIOS_SCRATCH_2_BASE_IDX 1 -#define regBIF_BX2_BIOS_SCRATCH_3 0x003b -#define regBIF_BX2_BIOS_SCRATCH_3_BASE_IDX 1 -#define regBIF_BX2_BIOS_SCRATCH_6 0x003e -#define regBIF_BX2_BIOS_SCRATCH_6_BASE_IDX 1 - -#define DC_LOGGER \ - dc->ctx->logger -#define DC_LOGGER_INIT(logger) - -enum dcn31_clk_src_array_id { - DCN31_CLK_SRC_PLL0, - DCN31_CLK_SRC_PLL1, - DCN31_CLK_SRC_PLL2, - DCN31_CLK_SRC_PLL3, - DCN31_CLK_SRC_PLL4, - DCN30_CLK_SRC_TOTAL -}; - -/* begin ********************* - * macros to expend register list macro defined in HW object header file - */ - -/* DCN */ -/* TODO awful hack. fixup dcn20_dwb.h */ -#undef BASE_INNER -#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg - -#define BASE(seg) BASE_INNER(seg) - -#define SR(reg_name)\ - .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ - reg ## reg_name - -#define SRI(reg_name, block, id)\ - .reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRI2(reg_name, block, id)\ - .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ - reg ## reg_name - -#define SRIR(var_name, reg_name, block, id)\ - .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII(reg_name, block, id)\ - .reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII_MPC_RMU(reg_name, block, id)\ - .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII_DWB(reg_name, temp_name, block, id)\ - .reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## temp_name - -#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ - .field_name = reg_name ## __ ## field_name ## post_fix - -#define DCCG_SRII(reg_name, block, id)\ - .block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define VUPDATE_SRII(reg_name, block, id)\ - .reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ - reg ## reg_name ## _ ## block ## id - -/* NBIO */ -#define NBIO_BASE_INNER(seg) \ - NBIO_BASE__INST0_SEG ## seg - -#define NBIO_BASE(seg) \ - NBIO_BASE_INNER(seg) - -#define NBIO_SR(reg_name)\ - .reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \ - regBIF_BX2_ ## reg_name - -/* MMHUB */ -#define MMHUB_BASE_INNER(seg) \ - MMHUB_BASE__INST0_SEG ## seg - -#define MMHUB_BASE(seg) \ - MMHUB_BASE_INNER(seg) - -#define MMHUB_SR(reg_name)\ - .reg_name = MMHUB_BASE(reg ## reg_name ## _BASE_IDX) + \ - reg ## reg_name - -/* CLOCK */ -#define CLK_BASE_INNER(seg) \ - CLK_BASE__INST0_SEG ## seg - -#define CLK_BASE(seg) \ - CLK_BASE_INNER(seg) - -#define CLK_SRI(reg_name, block, inst)\ - .reg_name = CLK_BASE(reg ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## _ ## inst ## _ ## reg_name - - -static const struct bios_registers bios_regs = { - NBIO_SR(BIOS_SCRATCH_3), - NBIO_SR(BIOS_SCRATCH_6) -}; - -#define clk_src_regs(index, pllid)\ -[index] = {\ - CS_COMMON_REG_LIST_DCN3_0(index, pllid),\ -} - -static const struct dce110_clk_src_regs clk_src_regs[] = { - clk_src_regs(0, A), - clk_src_regs(1, B), - clk_src_regs(2, C), - clk_src_regs(3, D), - clk_src_regs(4, E) -}; - -static const struct dce110_clk_src_shift cs_shift = { - CS_COMMON_MASK_SH_LIST_DCN3_1_4(__SHIFT) -}; - -static const struct dce110_clk_src_mask cs_mask = { - CS_COMMON_MASK_SH_LIST_DCN3_1_4(_MASK) -}; - -#define abm_regs(id)\ -[id] = {\ - ABM_DCN302_REG_LIST(id)\ -} - -static const struct dce_abm_registers abm_regs[] = { - abm_regs(0), - abm_regs(1), - abm_regs(2), - abm_regs(3), -}; - -static const struct dce_abm_shift abm_shift = { - ABM_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dce_abm_mask abm_mask = { - ABM_MASK_SH_LIST_DCN30(_MASK) -}; - -#define audio_regs(id)\ -[id] = {\ - AUD_COMMON_REG_LIST(id)\ -} - -static const struct dce_audio_registers audio_regs[] = { - audio_regs(0), - audio_regs(1), - audio_regs(2), - audio_regs(3), - audio_regs(4), - audio_regs(5), - audio_regs(6) -}; - -#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ - AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) - -static const struct dce_audio_shift audio_shift = { - DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_audio_mask audio_mask = { - DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) -}; - -#define vpg_regs(id)\ -[id] = {\ - VPG_DCN31_REG_LIST(id)\ -} - -static const struct dcn31_vpg_registers vpg_regs[] = { - vpg_regs(0), - vpg_regs(1), - vpg_regs(2), - vpg_regs(3), - vpg_regs(4), - vpg_regs(5), - vpg_regs(6), - vpg_regs(7), - vpg_regs(8), - vpg_regs(9), -}; - -static const struct dcn31_vpg_shift vpg_shift = { - DCN31_VPG_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_vpg_mask vpg_mask = { - DCN31_VPG_MASK_SH_LIST(_MASK) -}; - -#define afmt_regs(id)\ -[id] = {\ - AFMT_DCN31_REG_LIST(id)\ -} - -static const struct dcn31_afmt_registers afmt_regs[] = { - afmt_regs(0), - afmt_regs(1), - afmt_regs(2), - afmt_regs(3), - afmt_regs(4), - afmt_regs(5) -}; - -static const struct dcn31_afmt_shift afmt_shift = { - DCN31_AFMT_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_afmt_mask afmt_mask = { - DCN31_AFMT_MASK_SH_LIST(_MASK) -}; - -#define apg_regs(id)\ -[id] = {\ - APG_DCN31_REG_LIST(id)\ -} - -static const struct dcn31_apg_registers apg_regs[] = { - apg_regs(0), - apg_regs(1), - apg_regs(2), - apg_regs(3) -}; - -static const struct dcn31_apg_shift apg_shift = { - DCN31_APG_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_apg_mask apg_mask = { - DCN31_APG_MASK_SH_LIST(_MASK) -}; - -#define stream_enc_regs(id)\ -[id] = {\ - SE_DCN314_REG_LIST(id)\ -} - -static const struct dcn10_stream_enc_registers stream_enc_regs[] = { - stream_enc_regs(0), - stream_enc_regs(1), - stream_enc_regs(2), - stream_enc_regs(3), - stream_enc_regs(4) -}; - -static const struct dcn10_stream_encoder_shift se_shift = { - SE_COMMON_MASK_SH_LIST_DCN314(__SHIFT) -}; - -static const struct dcn10_stream_encoder_mask se_mask = { - SE_COMMON_MASK_SH_LIST_DCN314(_MASK) -}; - - -#define aux_regs(id)\ -[id] = {\ - DCN2_AUX_REG_LIST(id)\ -} - -static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { - aux_regs(0), - aux_regs(1), - aux_regs(2), - aux_regs(3), - aux_regs(4) -}; - -#define hpd_regs(id)\ -[id] = {\ - HPD_REG_LIST(id)\ -} - -static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { - hpd_regs(0), - hpd_regs(1), - hpd_regs(2), - hpd_regs(3), - hpd_regs(4) -}; - -#define link_regs(id, phyid)\ -[id] = {\ - LE_DCN31_REG_LIST(id), \ - UNIPHY_DCN2_REG_LIST(phyid), \ -} - -static const struct dce110_aux_registers_shift aux_shift = { - DCN_AUX_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce110_aux_registers_mask aux_mask = { - DCN_AUX_MASK_SH_LIST(_MASK) -}; - -static const struct dcn10_link_enc_registers link_enc_regs[] = { - link_regs(0, A), - link_regs(1, B), - link_regs(2, C), - link_regs(3, D), - link_regs(4, E) -}; - -static const struct dcn10_link_enc_shift le_shift = { - LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), - DPCS_DCN31_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn10_link_enc_mask le_mask = { - LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), - DPCS_DCN31_MASK_SH_LIST(_MASK) -}; - -#define hpo_dp_stream_encoder_reg_list(id)\ -[id] = {\ - DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\ -} - -static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = { - hpo_dp_stream_encoder_reg_list(0), - hpo_dp_stream_encoder_reg_list(1), - hpo_dp_stream_encoder_reg_list(2), - hpo_dp_stream_encoder_reg_list(3) -}; - -static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { - DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { - DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) -}; - - -#define hpo_dp_link_encoder_reg_list(id)\ -[id] = {\ - DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\ - DCN3_1_RDPCSTX_REG_LIST(0),\ - DCN3_1_RDPCSTX_REG_LIST(1),\ - DCN3_1_RDPCSTX_REG_LIST(2),\ -} - -static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = { - hpo_dp_link_encoder_reg_list(0), - hpo_dp_link_encoder_reg_list(1), -}; - -static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { - DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { - DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK) -}; - -#define dpp_regs(id)\ -[id] = {\ - DPP_REG_LIST_DCN30(id),\ -} - -static const struct dcn3_dpp_registers dpp_regs[] = { - dpp_regs(0), - dpp_regs(1), - dpp_regs(2), - dpp_regs(3) -}; - -static const struct dcn3_dpp_shift tf_shift = { - DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) -}; - -static const struct dcn3_dpp_mask tf_mask = { - DPP_REG_LIST_SH_MASK_DCN30(_MASK) -}; - -#define opp_regs(id)\ -[id] = {\ - OPP_REG_LIST_DCN30(id),\ -} - -static const struct dcn20_opp_registers opp_regs[] = { - opp_regs(0), - opp_regs(1), - opp_regs(2), - opp_regs(3) -}; - -static const struct dcn20_opp_shift opp_shift = { - OPP_MASK_SH_LIST_DCN20(__SHIFT) -}; - -static const struct dcn20_opp_mask opp_mask = { - OPP_MASK_SH_LIST_DCN20(_MASK) -}; - -#define aux_engine_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST0(id), \ - .AUXN_IMPCAL = 0, \ - .AUXP_IMPCAL = 0, \ - .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ -} - -static const struct dce110_aux_registers aux_engine_regs[] = { - aux_engine_regs(0), - aux_engine_regs(1), - aux_engine_regs(2), - aux_engine_regs(3), - aux_engine_regs(4) -}; - -#define dwbc_regs_dcn3(id)\ -[id] = {\ - DWBC_COMMON_REG_LIST_DCN30(id),\ -} - -static const struct dcn30_dwbc_registers dwbc30_regs[] = { - dwbc_regs_dcn3(0), -}; - -static const struct dcn30_dwbc_shift dwbc30_shift = { - DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_dwbc_mask dwbc30_mask = { - DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -#define mcif_wb_regs_dcn3(id)\ -[id] = {\ - MCIF_WB_COMMON_REG_LIST_DCN30(id),\ -} - -static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { - mcif_wb_regs_dcn3(0) -}; - -static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -#define dsc_regsDCN314(id)\ -[id] = {\ - DSC_REG_LIST_DCN20(id)\ -} - -static const struct dcn20_dsc_registers dsc_regs[] = { - dsc_regsDCN314(0), - dsc_regsDCN314(1), - dsc_regsDCN314(2), - dsc_regsDCN314(3) -}; - -static const struct dcn20_dsc_shift dsc_shift = { - DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) -}; - -static const struct dcn20_dsc_mask dsc_mask = { - DSC_REG_LIST_SH_MASK_DCN20(_MASK) -}; - -static const struct dcn30_mpc_registers mpc_regs = { - MPC_REG_LIST_DCN3_0(0), - MPC_REG_LIST_DCN3_0(1), - MPC_REG_LIST_DCN3_0(2), - MPC_REG_LIST_DCN3_0(3), - MPC_OUT_MUX_REG_LIST_DCN3_0(0), - MPC_OUT_MUX_REG_LIST_DCN3_0(1), - MPC_OUT_MUX_REG_LIST_DCN3_0(2), - MPC_OUT_MUX_REG_LIST_DCN3_0(3), - MPC_RMU_GLOBAL_REG_LIST_DCN3AG, - MPC_RMU_REG_LIST_DCN3AG(0), - MPC_RMU_REG_LIST_DCN3AG(1), - //MPC_RMU_REG_LIST_DCN3AG(2), - MPC_DWB_MUX_REG_LIST_DCN3_0(0), -}; - -static const struct dcn30_mpc_shift mpc_shift = { - MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_mpc_mask mpc_mask = { - MPC_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -#define optc_regs(id)\ -[id] = {OPTC_COMMON_REG_LIST_DCN3_14(id)} - -static const struct dcn_optc_registers optc_regs[] = { - optc_regs(0), - optc_regs(1), - optc_regs(2), - optc_regs(3) -}; - -static const struct dcn_optc_shift optc_shift = { - OPTC_COMMON_MASK_SH_LIST_DCN3_14(__SHIFT) -}; - -static const struct dcn_optc_mask optc_mask = { - OPTC_COMMON_MASK_SH_LIST_DCN3_14(_MASK) -}; - -#define hubp_regs(id)\ -[id] = {\ - HUBP_REG_LIST_DCN30(id)\ -} - -static const struct dcn_hubp2_registers hubp_regs[] = { - hubp_regs(0), - hubp_regs(1), - hubp_regs(2), - hubp_regs(3) -}; - - -static const struct dcn_hubp2_shift hubp_shift = { - HUBP_MASK_SH_LIST_DCN31(__SHIFT) -}; - -static const struct dcn_hubp2_mask hubp_mask = { - HUBP_MASK_SH_LIST_DCN31(_MASK) -}; -static const struct dcn_hubbub_registers hubbub_reg = { - HUBBUB_REG_LIST_DCN31(0) -}; - -static const struct dcn_hubbub_shift hubbub_shift = { - HUBBUB_MASK_SH_LIST_DCN31(__SHIFT) -}; - -static const struct dcn_hubbub_mask hubbub_mask = { - HUBBUB_MASK_SH_LIST_DCN31(_MASK) -}; - -static const struct dccg_registers dccg_regs = { - DCCG_REG_LIST_DCN314() -}; - -static const struct dccg_shift dccg_shift = { - DCCG_MASK_SH_LIST_DCN314(__SHIFT) -}; - -static const struct dccg_mask dccg_mask = { - DCCG_MASK_SH_LIST_DCN314(_MASK) -}; - - -#define SRII2(reg_name_pre, reg_name_post, id)\ - .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \ - ## id ## _ ## reg_name_post ## _BASE_IDX) + \ - reg ## reg_name_pre ## id ## _ ## reg_name_post - - -#define HWSEQ_DCN31_REG_LIST()\ - SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ - SR(DCHUBBUB_ARB_HOSTVM_CNTL), \ - SR(DIO_MEM_PWR_CTRL), \ - SR(ODM_MEM_PWR_CTRL3), \ - SR(DMU_MEM_PWR_CNTL), \ - SR(MMHUBBUB_MEM_PWR_CNTL), \ - SR(DCCG_GATE_DISABLE_CNTL), \ - SR(DCCG_GATE_DISABLE_CNTL2), \ - SR(DCFCLK_CNTL),\ - SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ - SRII(PIXEL_RATE_CNTL, OTG, 0), \ - SRII(PIXEL_RATE_CNTL, OTG, 1),\ - SRII(PIXEL_RATE_CNTL, OTG, 2),\ - SRII(PIXEL_RATE_CNTL, OTG, 3),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\ - SR(MICROSECOND_TIME_BASE_DIV), \ - SR(MILLISECOND_TIME_BASE_DIV), \ - SR(DISPCLK_FREQ_CHANGE_CNTL), \ - SR(RBBMIF_TIMEOUT_DIS), \ - SR(RBBMIF_TIMEOUT_DIS_2), \ - SR(DCHUBBUB_CRC_CTRL), \ - SR(DPP_TOP0_DPP_CRC_CTRL), \ - SR(DPP_TOP0_DPP_CRC_VAL_B_A), \ - SR(DPP_TOP0_DPP_CRC_VAL_R_G), \ - SR(MPC_CRC_CTRL), \ - SR(MPC_CRC_RESULT_GB), \ - SR(MPC_CRC_RESULT_C), \ - SR(MPC_CRC_RESULT_AR), \ - SR(DOMAIN0_PG_CONFIG), \ - SR(DOMAIN1_PG_CONFIG), \ - SR(DOMAIN2_PG_CONFIG), \ - SR(DOMAIN3_PG_CONFIG), \ - SR(DOMAIN16_PG_CONFIG), \ - SR(DOMAIN17_PG_CONFIG), \ - SR(DOMAIN18_PG_CONFIG), \ - SR(DOMAIN19_PG_CONFIG), \ - SR(DOMAIN0_PG_STATUS), \ - SR(DOMAIN1_PG_STATUS), \ - SR(DOMAIN2_PG_STATUS), \ - SR(DOMAIN3_PG_STATUS), \ - SR(DOMAIN16_PG_STATUS), \ - SR(DOMAIN17_PG_STATUS), \ - SR(DOMAIN18_PG_STATUS), \ - SR(DOMAIN19_PG_STATUS), \ - SR(D1VGA_CONTROL), \ - SR(D2VGA_CONTROL), \ - SR(D3VGA_CONTROL), \ - SR(D4VGA_CONTROL), \ - SR(D5VGA_CONTROL), \ - SR(D6VGA_CONTROL), \ - SR(DC_IP_REQUEST_CNTL), \ - SR(AZALIA_AUDIO_DTO), \ - SR(AZALIA_CONTROLLER_CLOCK_GATING), \ - SR(HPO_TOP_HW_CONTROL) - -static const struct dce_hwseq_registers hwseq_reg = { - HWSEQ_DCN31_REG_LIST() -}; - -#define HWSEQ_DCN31_MASK_SH_LIST(mask_sh)\ - HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ - HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ - HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \ - HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ - HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ - HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \ - HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \ - HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ - HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ - HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \ - HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \ - HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh) - -static const struct dce_hwseq_shift hwseq_shift = { - HWSEQ_DCN31_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_hwseq_mask hwseq_mask = { - HWSEQ_DCN31_MASK_SH_LIST(_MASK) -}; -#define vmid_regs(id)\ -[id] = {\ - DCN20_VMID_REG_LIST(id)\ -} - -static const struct dcn_vmid_registers vmid_regs[] = { - vmid_regs(0), - vmid_regs(1), - vmid_regs(2), - vmid_regs(3), - vmid_regs(4), - vmid_regs(5), - vmid_regs(6), - vmid_regs(7), - vmid_regs(8), - vmid_regs(9), - vmid_regs(10), - vmid_regs(11), - vmid_regs(12), - vmid_regs(13), - vmid_regs(14), - vmid_regs(15) -}; - -static const struct dcn20_vmid_shift vmid_shifts = { - DCN20_VMID_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn20_vmid_mask vmid_masks = { - DCN20_VMID_MASK_SH_LIST(_MASK) -}; - -static const struct resource_caps res_cap_dcn314 = { - .num_timing_generator = 4, - .num_opp = 4, - .num_video_plane = 4, - .num_audio = 5, - .num_stream_encoder = 5, - .num_dig_link_enc = 5, - .num_hpo_dp_stream_encoder = 4, - .num_hpo_dp_link_encoder = 2, - .num_pll = 5, - .num_dwb = 1, - .num_ddc = 5, - .num_vmid = 16, - .num_mpc_3dlut = 2, - .num_dsc = 4, -}; - -static const struct dc_plane_cap plane_cap = { - .type = DC_PLANE_TYPE_DCN_UNIVERSAL, - .per_pixel_alpha = true, - - .pixel_format_support = { - .argb8888 = true, - .nv12 = true, - .fp16 = true, - .p010 = true, - .ayuv = false, - }, - - .max_upscale_factor = { - .argb8888 = 16000, - .nv12 = 16000, - .fp16 = 16000 - }, - - // 6:1 downscaling ratio: 1000/6 = 166.666 - // 4:1 downscaling ratio for ARGB888 to prevent underflow during P010 playback: 1000/4 = 250 - .max_downscale_factor = { - .argb8888 = 250, - .nv12 = 167, - .fp16 = 167 - }, - 64, - 64 -}; - -static const struct dc_debug_options debug_defaults_drv = { - .disable_z10 = false, - .enable_z9_disable_interface = true, - .minimum_z8_residency_time = 2100, - .psr_skip_crtc_disable = true, - .replay_skip_crtc_disabled = true, - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = false, - .clock_trace = true, - .disable_dpp_power_gate = false, - .disable_hubp_power_gate = false, - .disable_pplib_clock_request = false, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, - .force_single_disp_pipe_split = false, - .disable_dcc = DCC_ENABLE, - .vsr_support = true, - .performance_trace = false, - .max_downscale_src_width = 4096,/*upto true 4k*/ - .disable_pplib_wm_range = false, - .scl_reset_length10 = true, - .sanity_checks = true, - .underflow_assert_delay_us = 0xFFFFFFFF, - .dwb_fi_phase = -1, // -1 = disable, - .dmub_command_table = true, - .pstate_enabled = true, - .use_max_lb = true, - .enable_mem_low_power = { - .bits = { - .vga = true, - .i2c = true, - .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled - .dscl = true, - .cm = true, - .mpc = true, - .optc = true, - .vpg = true, - .afmt = true, - } - }, - - .root_clock_optimization = { - .bits = { - .dpp = true, - .dsc = true, - .hdmistream = true, - .hdmichar = true, - .dpstream = true, - .symclk32_se = false, - .symclk32_le = true, - .symclk_fe = true, - .physymclk = true, - .dpiasymclk = true, - } - }, - - .seamless_boot_odm_combine = true, - .using_dml2 = false, -}; - -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = false, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .enable_tri_buf = true, - .use_max_lb = true -}; - -static const struct dc_panel_config panel_config_defaults = { - .psr = { - .disable_psr = false, - .disallow_psrsu = false, - .disallow_replay = false, - }, - .ilr = { - .optimize_edp_link_rate = true, - }, -}; - -static void dcn31_dpp_destroy(struct dpp **dpp) -{ - kfree(TO_DCN20_DPP(*dpp)); - *dpp = NULL; -} - -static struct dpp *dcn31_dpp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn3_dpp *dpp = - kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); - - if (!dpp) - return NULL; - - if (dpp3_construct(dpp, ctx, inst, - &dpp_regs[inst], &tf_shift, &tf_mask)) - return &dpp->base; - - BREAK_TO_DEBUGGER(); - kfree(dpp); - return NULL; -} - -static struct output_pixel_processor *dcn31_opp_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_opp *opp = - kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); - - if (!opp) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dcn20_opp_construct(opp, ctx, inst, - &opp_regs[inst], &opp_shift, &opp_mask); - return &opp->base; -} - -static struct dce_aux *dcn31_aux_engine_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); - - if (!aux_engine) - return NULL; - - dce110_aux_engine_construct(aux_engine, ctx, inst, - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst], - &aux_mask, - &aux_shift, - ctx->dc->caps.extended_aux_timeout_support); - - return &aux_engine->base; -} -#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) } - -static const struct dce_i2c_registers i2c_hw_regs[] = { - i2c_inst_regs(1), - i2c_inst_regs(2), - i2c_inst_regs(3), - i2c_inst_regs(4), - i2c_inst_regs(5), -}; - -static const struct dce_i2c_shift i2c_shifts = { - I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dce_i2c_mask i2c_masks = { - I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -/* ========================================================== */ - -/* - * DPIA index | Preferred Encoder | Host Router - * 0 | C | 0 - * 1 | First Available | 0 - * 2 | D | 1 - * 3 | First Available | 1 - */ -/* ========================================================== */ -static const enum engine_id dpia_to_preferred_enc_id_table[] = { - ENGINE_ID_DIGC, - ENGINE_ID_DIGC, - ENGINE_ID_DIGD, - ENGINE_ID_DIGD -}; - -static enum engine_id dcn314_get_preferred_eng_id_dpia(unsigned int dpia_index) -{ - return dpia_to_preferred_enc_id_table[dpia_index]; -} - -static struct dce_i2c_hw *dcn31_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); - - if (!dce_i2c_hw) - return NULL; - - dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, - &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); - - return dce_i2c_hw; -} -static struct mpc *dcn31_mpc_create( - struct dc_context *ctx, - int num_mpcc, - int num_rmu) -{ - struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), - GFP_KERNEL); - - if (!mpc30) - return NULL; - - dcn30_mpc_construct(mpc30, ctx, - &mpc_regs, - &mpc_shift, - &mpc_mask, - num_mpcc, - num_rmu); - - return &mpc30->base; -} - -static struct hubbub *dcn31_hubbub_create(struct dc_context *ctx) -{ - int i; - - struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), - GFP_KERNEL); - - if (!hubbub3) - return NULL; - - hubbub31_construct(hubbub3, ctx, - &hubbub_reg, - &hubbub_shift, - &hubbub_mask, - dcn3_14_ip.det_buffer_size_kbytes, - dcn3_14_ip.pixel_chunk_size_kbytes, - dcn3_14_ip.config_return_buffer_size_in_kbytes); - - - for (i = 0; i < res_cap_dcn314.num_vmid; i++) { - struct dcn20_vmid *vmid = &hubbub3->vmid[i]; - - vmid->ctx = ctx; - - vmid->regs = &vmid_regs[i]; - vmid->shifts = &vmid_shifts; - vmid->masks = &vmid_masks; - } - - return &hubbub3->base; -} - -static struct timing_generator *dcn31_timing_generator_create( - struct dc_context *ctx, - uint32_t instance) -{ - struct optc *tgn10 = - kzalloc(sizeof(struct optc), GFP_KERNEL); - - if (!tgn10) - return NULL; - - tgn10->base.inst = instance; - tgn10->base.ctx = ctx; - - tgn10->tg_regs = &optc_regs[instance]; - tgn10->tg_shift = &optc_shift; - tgn10->tg_mask = &optc_mask; - - dcn314_timing_generator_init(tgn10); - - return &tgn10->base; -} - -static const struct encoder_feature_support link_enc_feature = { - .max_hdmi_deep_color = COLOR_DEPTH_121212, - .max_hdmi_pixel_clock = 600000, - .hdmi_ycbcr420_supported = true, - .dp_ycbcr420_supported = true, - .fec_supported = true, - .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_HBR3_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true, - .flags.bits.IS_TPS4_CAPABLE = true -}; - -static struct link_encoder *dcn31_link_encoder_create( - struct dc_context *ctx, - const struct encoder_init_data *enc_init_data) -{ - struct dcn20_link_encoder *enc20 = - kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - - if (!enc20) - return NULL; - - dcn31_link_encoder_construct(enc20, - enc_init_data, - &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], - &link_enc_aux_regs[enc_init_data->channel - 1], - &link_enc_hpd_regs[enc_init_data->hpd_source], - &le_shift, - &le_mask); - - return &enc20->enc10.base; -} - -/* Create a minimal link encoder object not associated with a particular - * physical connector. - * resource_funcs.link_enc_create_minimal - */ -static struct link_encoder *dcn31_link_enc_create_minimal( - struct dc_context *ctx, enum engine_id eng_id) -{ - struct dcn20_link_encoder *enc20; - - if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc) - return NULL; - - enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) - return NULL; - - dcn31_link_encoder_construct_minimal( - enc20, - ctx, - &link_enc_feature, - &link_enc_regs[eng_id - ENGINE_ID_DIGA], - eng_id); - - return &enc20->enc10.base; -} - -static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data) -{ - struct dcn31_panel_cntl *panel_cntl = - kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL); - - if (!panel_cntl) - return NULL; - - dcn31_panel_cntl_construct(panel_cntl, init_data); - - return &panel_cntl->base; -} - -static void read_dce_straps( - struct dc_context *ctx, - struct resource_straps *straps) -{ - generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX), - FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); - -} - -static struct audio *dcn31_create_audio( - struct dc_context *ctx, unsigned int inst) -{ - return dce_audio_create(ctx, inst, - &audio_regs[inst], &audio_shift, &audio_mask); -} - -static struct vpg *dcn31_vpg_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL); - - if (!vpg31) - return NULL; - - vpg31_construct(vpg31, ctx, inst, - &vpg_regs[inst], - &vpg_shift, - &vpg_mask); - - return &vpg31->base; -} - -static struct afmt *dcn31_afmt_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL); - - if (!afmt31) - return NULL; - - afmt31_construct(afmt31, ctx, inst, - &afmt_regs[inst], - &afmt_shift, - &afmt_mask); - - // Light sleep by default, no need to power down here - - return &afmt31->base; -} - -static struct apg *dcn31_apg_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); - - if (!apg31) - return NULL; - - apg31_construct(apg31, ctx, inst, - &apg_regs[inst], - &apg_shift, - &apg_mask); - - return &apg31->base; -} - -static struct stream_encoder *dcn314_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dcn10_stream_encoder *enc1; - struct vpg *vpg; - struct afmt *afmt; - int vpg_inst; - int afmt_inst; - - /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ - if (eng_id < ENGINE_ID_DIGF) { - vpg_inst = eng_id; - afmt_inst = eng_id; - } else - return NULL; - - enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); - vpg = dcn31_vpg_create(ctx, vpg_inst); - afmt = dcn31_afmt_create(ctx, afmt_inst); - - if (!enc1 || !vpg || !afmt) { - kfree(enc1); - kfree(vpg); - kfree(afmt); - return NULL; - } - - dcn314_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, - eng_id, vpg, afmt, - &stream_enc_regs[eng_id], - &se_shift, &se_mask); - - return &enc1->base; -} - -static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; - struct vpg *vpg; - struct apg *apg; - uint32_t hpo_dp_inst; - uint32_t vpg_inst; - uint32_t apg_inst; - - ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); - hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; - - /* Mapping of VPG register blocks to HPO DP block instance: - * VPG[6] -> HPO_DP[0] - * VPG[7] -> HPO_DP[1] - * VPG[8] -> HPO_DP[2] - * VPG[9] -> HPO_DP[3] - */ - //Uses offset index 5-8, but actually maps to vpg_inst 6-9 - vpg_inst = hpo_dp_inst + 5; - - /* Mapping of APG register blocks to HPO DP block instance: - * APG[0] -> HPO_DP[0] - * APG[1] -> HPO_DP[1] - * APG[2] -> HPO_DP[2] - * APG[3] -> HPO_DP[3] - */ - apg_inst = hpo_dp_inst; - - /* allocate HPO stream encoder and create VPG sub-block */ - hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); - vpg = dcn31_vpg_create(ctx, vpg_inst); - apg = dcn31_apg_create(ctx, apg_inst); - - if (!hpo_dp_enc31 || !vpg || !apg) { - kfree(hpo_dp_enc31); - kfree(vpg); - kfree(apg); - return NULL; - } - - dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, - hpo_dp_inst, eng_id, vpg, apg, - &hpo_dp_stream_enc_regs[hpo_dp_inst], - &hpo_dp_se_shift, &hpo_dp_se_mask); - - return &hpo_dp_enc31->base; -} - -static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( - uint8_t inst, - struct dc_context *ctx) -{ - struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; - - /* allocate HPO link encoder */ - hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); - - hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, - &hpo_dp_link_enc_regs[inst], - &hpo_dp_le_shift, &hpo_dp_le_mask); - - return &hpo_dp_enc31->base; -} - -static struct dce_hwseq *dcn314_hwseq_create( - struct dc_context *ctx) -{ - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); - - if (hws) { - hws->ctx = ctx; - hws->regs = &hwseq_reg; - hws->shifts = &hwseq_shift; - hws->masks = &hwseq_mask; - } - return hws; -} -static const struct resource_create_funcs res_create_funcs = { - .read_dce_straps = read_dce_straps, - .create_audio = dcn31_create_audio, - .create_stream_encoder = dcn314_stream_encoder_create, - .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, - .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, - .create_hwseq = dcn314_hwseq_create, -}; - -static void dcn314_resource_destruct(struct dcn314_resource_pool *pool) -{ - unsigned int i; - - for (i = 0; i < pool->base.stream_enc_count; i++) { - if (pool->base.stream_enc[i] != NULL) { - if (pool->base.stream_enc[i]->vpg != NULL) { - kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); - pool->base.stream_enc[i]->vpg = NULL; - } - if (pool->base.stream_enc[i]->afmt != NULL) { - kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); - pool->base.stream_enc[i]->afmt = NULL; - } - kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); - pool->base.stream_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { - if (pool->base.hpo_dp_stream_enc[i] != NULL) { - if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { - kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); - pool->base.hpo_dp_stream_enc[i]->vpg = NULL; - } - if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { - kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); - pool->base.hpo_dp_stream_enc[i]->apg = NULL; - } - kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); - pool->base.hpo_dp_stream_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { - if (pool->base.hpo_dp_link_enc[i] != NULL) { - kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); - pool->base.hpo_dp_link_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_dsc; i++) { - if (pool->base.dscs[i] != NULL) - dcn20_dsc_destroy(&pool->base.dscs[i]); - } - - if (pool->base.mpc != NULL) { - kfree(TO_DCN20_MPC(pool->base.mpc)); - pool->base.mpc = NULL; - } - if (pool->base.hubbub != NULL) { - kfree(pool->base.hubbub); - pool->base.hubbub = NULL; - } - for (i = 0; i < pool->base.pipe_count; i++) { - if (pool->base.dpps[i] != NULL) - dcn31_dpp_destroy(&pool->base.dpps[i]); - - if (pool->base.ipps[i] != NULL) - pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); - - if (pool->base.hubps[i] != NULL) { - kfree(TO_DCN20_HUBP(pool->base.hubps[i])); - pool->base.hubps[i] = NULL; - } - - if (pool->base.irqs != NULL) - dal_irq_service_destroy(&pool->base.irqs); - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - if (pool->base.engines[i] != NULL) - dce110_engine_destroy(&pool->base.engines[i]); - if (pool->base.hw_i2cs[i] != NULL) { - kfree(pool->base.hw_i2cs[i]); - pool->base.hw_i2cs[i] = NULL; - } - if (pool->base.sw_i2cs[i] != NULL) { - kfree(pool->base.sw_i2cs[i]); - pool->base.sw_i2cs[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - if (pool->base.opps[i] != NULL) - pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - if (pool->base.timing_generators[i] != NULL) { - kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); - pool->base.timing_generators[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_dwb; i++) { - if (pool->base.dwbc[i] != NULL) { - kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); - pool->base.dwbc[i] = NULL; - } - if (pool->base.mcif_wb[i] != NULL) { - kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); - pool->base.mcif_wb[i] = NULL; - } - } - - for (i = 0; i < pool->base.audio_count; i++) { - if (pool->base.audios[i]) - dce_aud_destroy(&pool->base.audios[i]); - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] != NULL) { - dcn20_clock_source_destroy(&pool->base.clock_sources[i]); - pool->base.clock_sources[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { - if (pool->base.mpc_lut[i] != NULL) { - dc_3dlut_func_release(pool->base.mpc_lut[i]); - pool->base.mpc_lut[i] = NULL; - } - if (pool->base.mpc_shaper[i] != NULL) { - dc_transfer_func_release(pool->base.mpc_shaper[i]); - pool->base.mpc_shaper[i] = NULL; - } - } - - if (pool->base.dp_clock_source != NULL) { - dcn20_clock_source_destroy(&pool->base.dp_clock_source); - pool->base.dp_clock_source = NULL; - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - if (pool->base.multiple_abms[i] != NULL) - dce_abm_destroy(&pool->base.multiple_abms[i]); - } - - if (pool->base.psr != NULL) - dmub_psr_destroy(&pool->base.psr); - - if (pool->base.replay != NULL) - dmub_replay_destroy(&pool->base.replay); - - if (pool->base.dccg != NULL) - dcn_dccg_destroy(&pool->base.dccg); -} - -static struct hubp *dcn31_hubp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn20_hubp *hubp2 = - kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); - - if (!hubp2) - return NULL; - - if (hubp31_construct(hubp2, ctx, inst, - &hubp_regs[inst], &hubp_shift, &hubp_mask)) - return &hubp2->base; - - BREAK_TO_DEBUGGER(); - kfree(hubp2); - return NULL; -} - -static bool dcn31_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t pipe_count = pool->res_cap->num_dwb; - - for (i = 0; i < pipe_count; i++) { - struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), - GFP_KERNEL); - - if (!dwbc30) { - dm_error("DC: failed to create dwbc30!\n"); - return false; - } - - dcn30_dwbc_construct(dwbc30, ctx, - &dwbc30_regs[i], - &dwbc30_shift, - &dwbc30_mask, - i); - - pool->dwbc[i] = &dwbc30->base; - } - return true; -} - -static bool dcn31_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t pipe_count = pool->res_cap->num_dwb; - - for (i = 0; i < pipe_count; i++) { - struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), - GFP_KERNEL); - - if (!mcif_wb30) { - dm_error("DC: failed to create mcif_wb30!\n"); - return false; - } - - dcn30_mmhubbub_construct(mcif_wb30, ctx, - &mcif_wb30_regs[i], - &mcif_wb30_shift, - &mcif_wb30_mask, - i); - - pool->mcif_wb[i] = &mcif_wb30->base; - } - return true; -} - -static struct display_stream_compressor *dcn314_dsc_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_dsc *dsc = - kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); - - if (!dsc) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); - return &dsc->base; -} - -static void dcn314_destroy_resource_pool(struct resource_pool **pool) -{ - struct dcn314_resource_pool *dcn314_pool = TO_DCN314_RES_POOL(*pool); - - dcn314_resource_destruct(dcn314_pool); - kfree(dcn314_pool); - *pool = NULL; -} - -static struct clock_source *dcn31_clock_source_create( - struct dc_context *ctx, - struct dc_bios *bios, - enum clock_source_id id, - const struct dce110_clk_src_regs *regs, - bool dp_clk_src) -{ - struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); - - if (!clk_src) - return NULL; - - if (dcn31_clk_src_construct(clk_src, ctx, bios, id, - regs, &cs_shift, &cs_mask)) { - clk_src->base.dp_clk_src = dp_clk_src; - return &clk_src->base; - } - - BREAK_TO_DEBUGGER(); - kfree(clk_src); - return NULL; -} - -static int dcn314_populate_dml_pipes_from_context( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - bool fast_validate) -{ - int pipe_cnt; - - DC_FP_START(); - pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, fast_validate); - DC_FP_END(); - - return pipe_cnt; -} - -static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn20_get_dcc_compression_cap -}; - -static void dcn314_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) -{ - DC_FP_START(); - dcn314_update_bw_bounding_box_fpu(dc, bw_params); - DC_FP_END(); -} - -static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_config) -{ - *panel_config = panel_config_defaults; -} - -static bool filter_modes_for_single_channel_workaround(struct dc *dc, - struct dc_state *context) -{ - // Filter 2K@240Hz+8K@24fps above combination timing if memory only has single dimm LPDDR - if (dc->clk_mgr->bw_params->vram_type == 34 && - dc->clk_mgr->bw_params->num_channels < 2 && - context->stream_count > 1) { - int total_phy_pix_clk = 0; - - for (int i = 0; i < context->stream_count; i++) - if (context->res_ctx.pipe_ctx[i].stream) - total_phy_pix_clk += context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; - - if (total_phy_pix_clk >= (1148928+826260)) //2K@240Hz+8K@24fps - return true; - } - return false; -} - -bool dcn314_validate_bandwidth(struct dc *dc, - struct dc_state *context, - bool fast_validate) -{ - bool out = false; - - BW_VAL_TRACE_SETUP(); - - int vlevel = 0; - int pipe_cnt = 0; - display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); - DC_LOGGER_INIT(dc->ctx->logger); - - BW_VAL_TRACE_COUNT(); - - if (filter_modes_for_single_channel_workaround(dc, context)) - goto validate_fail; - - DC_FP_START(); - // do not support self refresh only - out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, false); - DC_FP_END(); - - // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg - if (pipe_cnt == 0) - fast_validate = false; - - if (!out) - goto validate_fail; - - BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - - if (fast_validate) { - BW_VAL_TRACE_SKIP(fast); - goto validate_out; - } - if (dc->res_pool->funcs->calculate_wm_and_dlg) - dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); - - BW_VAL_TRACE_END_WATERMARKS(); - - goto validate_out; - -validate_fail: - DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", - dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); - - BW_VAL_TRACE_SKIP(fail); - out = false; - -validate_out: - kfree(pipes); - - BW_VAL_TRACE_FINISH(); - - return out; -} - -static struct resource_funcs dcn314_res_pool_funcs = { - .destroy = dcn314_destroy_resource_pool, - .link_enc_create = dcn31_link_encoder_create, - .link_enc_create_minimal = dcn31_link_enc_create_minimal, - .link_encs_assign = link_enc_cfg_link_encs_assign, - .link_enc_unassign = link_enc_cfg_link_enc_unassign, - .panel_cntl_create = dcn31_panel_cntl_create, - .validate_bandwidth = dcn314_validate_bandwidth, - .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, - .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, - .populate_dml_pipes = dcn314_populate_dml_pipes_from_context, - .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, - .release_pipe = dcn20_release_pipe, - .add_stream_to_ctx = dcn30_add_stream_to_ctx, - .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, - .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, - .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, - .set_mcif_arb_params = dcn30_set_mcif_arb_params, - .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, - .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, - .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, - .update_bw_bounding_box = dcn314_update_bw_bounding_box, - .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, - .get_panel_config_defaults = dcn314_get_panel_config_defaults, - .get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia, -}; - -static struct clock_source *dcn30_clock_source_create( - struct dc_context *ctx, - struct dc_bios *bios, - enum clock_source_id id, - const struct dce110_clk_src_regs *regs, - bool dp_clk_src) -{ - struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); - - if (!clk_src) - return NULL; - - if (dcn31_clk_src_construct(clk_src, ctx, bios, id, - regs, &cs_shift, &cs_mask)) { - clk_src->base.dp_clk_src = dp_clk_src; - return &clk_src->base; - } - - BREAK_TO_DEBUGGER(); - kfree(clk_src); - return NULL; -} - -static bool dcn314_resource_construct( - uint8_t num_virtual_links, - struct dc *dc, - struct dcn314_resource_pool *pool) -{ - int i; - struct dc_context *ctx = dc->ctx; - struct irq_service_init_data init_data; - - ctx->dc_bios->regs = &bios_regs; - - pool->base.res_cap = &res_cap_dcn314; - pool->base.funcs = &dcn314_res_pool_funcs; - - /************************************************* - * Resource + asic cap harcoding * - *************************************************/ - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - pool->base.pipe_count = pool->base.res_cap->num_timing_generator; - pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; - dc->caps.max_downscale_ratio = 400; - dc->caps.i2c_speed_in_khz = 100; - dc->caps.i2c_speed_in_khz_hdcp = 100; - dc->caps.max_cursor_size = 256; - dc->caps.min_horizontal_blanking_period = 80; - dc->caps.dmdata_alloc_size = 2048; - dc->caps.max_slave_planes = 2; - dc->caps.max_slave_yuv_planes = 2; - dc->caps.max_slave_rgb_planes = 2; - dc->caps.post_blend_color_processing = true; - dc->caps.force_dp_tps4_for_cp2520 = true; - if (dc->config.forceHBR2CP2520) - dc->caps.force_dp_tps4_for_cp2520 = false; - dc->caps.dp_hpo = true; - dc->caps.dp_hdmi21_pcon_support = true; - dc->caps.edp_dsc_support = true; - dc->caps.extended_aux_timeout_support = true; - dc->caps.dmcub_support = true; - dc->caps.is_apu = true; - dc->caps.seamless_odm = true; - - dc->caps.zstate_support = true; - - /* Color pipeline capabilities */ - dc->caps.color.dpp.dcn_arch = 1; - dc->caps.color.dpp.input_lut_shared = 0; - dc->caps.color.dpp.icsc = 1; - dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr - dc->caps.color.dpp.dgam_rom_caps.srgb = 1; - dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; - dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; - dc->caps.color.dpp.dgam_rom_caps.pq = 1; - dc->caps.color.dpp.dgam_rom_caps.hlg = 1; - dc->caps.color.dpp.post_csc = 1; - dc->caps.color.dpp.gamma_corr = 1; - dc->caps.color.dpp.dgam_rom_for_yuv = 0; - - dc->caps.color.dpp.hw_3d_lut = 1; - dc->caps.color.dpp.ogam_ram = 1; - // no OGAM ROM on DCN301 - dc->caps.color.dpp.ogam_rom_caps.srgb = 0; - dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; - dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.dpp.ogam_rom_caps.pq = 0; - dc->caps.color.dpp.ogam_rom_caps.hlg = 0; - dc->caps.color.dpp.ocsc = 0; - - dc->caps.color.mpc.gamut_remap = 1; - dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2 - dc->caps.color.mpc.ogam_ram = 1; - dc->caps.color.mpc.ogam_rom_caps.srgb = 0; - dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; - dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.mpc.ogam_rom_caps.pq = 0; - dc->caps.color.mpc.ogam_rom_caps.hlg = 0; - dc->caps.color.mpc.ocsc = 1; - - dc->caps.max_disp_clock_khz_at_vmin = 650000; - - /* Use pipe context based otg sync logic */ - dc->config.use_pipe_ctx_sync_logic = true; - - /* read VBIOS LTTPR caps */ - { - if (ctx->dc_bios->funcs->get_lttpr_caps) { - enum bp_result bp_query_result; - uint8_t is_vbios_lttpr_enable = 0; - - bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); - dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; - } - - /* interop bit is implicit */ - { - dc->caps.vbios_lttpr_aware = true; - } - } - - if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) - dc->debug = debug_defaults_drv; - else - dc->debug = debug_defaults_diags; - - /* Disable pipe power gating */ - dc->debug.disable_dpp_power_gate = true; - dc->debug.disable_hubp_power_gate = true; - - /* Disable root clock optimization */ - dc->debug.root_clock_optimization.u32All = 0; - - // Init the vm_helper - if (dc->vm_helper) - vm_helper_init(dc->vm_helper, 16); - - /************************************************* - * Create resources * - *************************************************/ - - /* Clock Sources for Pixel Clock*/ - pool->base.clock_sources[DCN31_CLK_SRC_PLL0] = - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL0, - &clk_src_regs[0], false); - pool->base.clock_sources[DCN31_CLK_SRC_PLL1] = - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL1, - &clk_src_regs[1], false); - pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL2, - &clk_src_regs[2], false); - pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL3, - &clk_src_regs[3], false); - pool->base.clock_sources[DCN31_CLK_SRC_PLL4] = - dcn30_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL4, - &clk_src_regs[4], false); - - pool->base.clk_src_count = DCN30_CLK_SRC_TOTAL; - - /* todo: not reuse phy_pll registers */ - pool->base.dp_clock_source = - dcn31_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_ID_DP_DTO, - &clk_src_regs[0], true); - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - - pool->base.dccg = dccg314_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); - if (pool->base.dccg == NULL) { - dm_error("DC: failed to create dccg!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - init_data.ctx = dc->ctx; - pool->base.irqs = dal_irq_service_dcn314_create(&init_data); - if (!pool->base.irqs) - goto create_fail; - - /* HUBBUB */ - pool->base.hubbub = dcn31_hubbub_create(ctx); - if (pool->base.hubbub == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create hubbub!\n"); - goto create_fail; - } - - /* HUBPs, DPPs, OPPs and TGs */ - for (i = 0; i < pool->base.pipe_count; i++) { - pool->base.hubps[i] = dcn31_hubp_create(ctx, i); - if (pool->base.hubps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create hubps!\n"); - goto create_fail; - } - - pool->base.dpps[i] = dcn31_dpp_create(ctx, i); - if (pool->base.dpps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create dpps!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - pool->base.opps[i] = dcn31_opp_create(ctx, i); - if (pool->base.opps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create output pixel processor!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - pool->base.timing_generators[i] = dcn31_timing_generator_create( - ctx, i); - if (pool->base.timing_generators[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto create_fail; - } - } - pool->base.timing_generator_count = i; - - /* PSR */ - pool->base.psr = dmub_psr_create(ctx); - if (pool->base.psr == NULL) { - dm_error("DC: failed to create psr obj!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* Replay */ - pool->base.replay = dmub_replay_create(ctx); - if (pool->base.replay == NULL) { - dm_error("DC: failed to create replay obj!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* ABM */ - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - pool->base.multiple_abms[i] = dmub_abm_create(ctx, - &abm_regs[i], - &abm_shift, - &abm_mask); - if (pool->base.multiple_abms[i] == NULL) { - dm_error("DC: failed to create abm for pipe %d!\n", i); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - - /* MPC and DSC */ - pool->base.mpc = dcn31_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut); - if (pool->base.mpc == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mpc!\n"); - goto create_fail; - } - - for (i = 0; i < pool->base.res_cap->num_dsc; i++) { - pool->base.dscs[i] = dcn314_dsc_create(ctx, i); - if (pool->base.dscs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create display stream compressor %d!\n", i); - goto create_fail; - } - } - - /* DWB and MMHUBBUB */ - if (!dcn31_dwbc_create(ctx, &pool->base)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create dwbc!\n"); - goto create_fail; - } - - if (!dcn31_mmhubbub_create(ctx, &pool->base)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mcif_wb!\n"); - goto create_fail; - } - - /* AUX and I2C */ - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - pool->base.engines[i] = dcn31_aux_engine_create(ctx, i); - if (pool->base.engines[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create aux engine!!\n"); - goto create_fail; - } - pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i); - if (pool->base.hw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create hw i2c!!\n"); - goto create_fail; - } - pool->base.sw_i2cs[i] = NULL; - } - - /* DCN314 has 4 DPIA */ - pool->base.usb4_dpia_count = 4; - - /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ - if (!resource_construct(num_virtual_links, dc, &pool->base, - &res_create_funcs)) - goto create_fail; - - /* HW Sequencer and Plane caps */ - dcn314_hw_sequencer_construct(dc); - - dc->caps.max_planes = pool->base.pipe_count; - - for (i = 0; i < dc->caps.max_planes; ++i) - dc->caps.planes[i] = plane_cap; - - dc->cap_funcs = cap_funcs; - - dc->dcn_ip->max_num_dpp = dcn3_14_ip.max_num_dpp; - - return true; - -create_fail: - - dcn314_resource_destruct(pool); - - return false; -} - -struct resource_pool *dcn314_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc) -{ - struct dcn314_resource_pool *pool = - kzalloc(sizeof(struct dcn314_resource_pool), GFP_KERNEL); - - if (!pool) - return NULL; - - if (dcn314_resource_construct(init_data->num_virtual_links, dc, pool)) - return &pool->base; - - BREAK_TO_DEBUGGER(); - kfree(pool); - return NULL; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h deleted file mode 100644 index 49ffe71018..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h +++ /dev/null @@ -1,50 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright 2022 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef _DCN314_RESOURCE_H_ -#define _DCN314_RESOURCE_H_ - -#include "core_types.h" - -extern struct _vcs_dpi_ip_params_st dcn3_14_ip; -extern struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc; - -#define TO_DCN314_RES_POOL(pool)\ - container_of(pool, struct dcn314_resource_pool, base) - -struct dcn314_resource_pool { - struct resource_pool base; -}; - -bool dcn314_validate_bandwidth(struct dc *dc, - struct dc_state *context, - bool fast_validate); - -struct resource_pool *dcn314_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc); - -#endif /* _DCN314_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/Makefile b/drivers/gpu/drm/amd/display/dc/dcn315/Makefile deleted file mode 100644 index 59381d2480..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn315/Makefile +++ /dev/null @@ -1,30 +0,0 @@ -# -# Copyright © 2021 Advanced Micro Devices, Inc. -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -# Authors: AMD -# -# Makefile for dcn315. - -DCN315 = dcn315_resource.o - -AMD_DAL_DCN315 = $(addprefix $(AMDDALPATH)/dc/dcn315/,$(DCN315)) - -AMD_DISPLAY_FILES += $(AMD_DAL_DCN315) diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c deleted file mode 100644 index cb8024eee8..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ /dev/null @@ -1,2151 +0,0 @@ -/* - * Copyright 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - - -#include "dm_services.h" -#include "dc.h" - -#include "dcn31/dcn31_init.h" - -#include "resource.h" -#include "include/irq_service_interface.h" -#include "dcn315_resource.h" - -#include "dcn20/dcn20_resource.h" -#include "dcn30/dcn30_resource.h" -#include "dcn31/dcn31_resource.h" - -#include "dcn10/dcn10_ipp.h" -#include "dcn30/dcn30_hubbub.h" -#include "dcn31/dcn31_hubbub.h" -#include "dcn30/dcn30_mpc.h" -#include "dcn31/dcn31_hubp.h" -#include "irq/dcn315/irq_service_dcn315.h" -#include "dcn30/dcn30_dpp.h" -#include "dcn31/dcn31_optc.h" -#include "dcn20/dcn20_hwseq.h" -#include "dcn30/dcn30_hwseq.h" -#include "dce110/dce110_hwseq.h" -#include "dcn30/dcn30_opp.h" -#include "dcn20/dcn20_dsc.h" -#include "dcn30/dcn30_vpg.h" -#include "dcn30/dcn30_afmt.h" -#include "dcn30/dcn30_dio_stream_encoder.h" -#include "dcn31/dcn31_hpo_dp_stream_encoder.h" -#include "dcn31/dcn31_hpo_dp_link_encoder.h" -#include "dcn31/dcn31_apg.h" -#include "dcn31/dcn31_dio_link_encoder.h" -#include "dcn31/dcn31_vpg.h" -#include "dcn31/dcn31_afmt.h" -#include "dce/dce_clock_source.h" -#include "dce/dce_audio.h" -#include "dce/dce_hwseq.h" -#include "clk_mgr.h" -#include "virtual/virtual_stream_encoder.h" -#include "dce110/dce110_resource.h" -#include "dml/display_mode_vba.h" -#include "dml/dcn31/dcn31_fpu.h" -#include "dcn31/dcn31_dccg.h" -#include "dcn10/dcn10_resource.h" -#include "dcn31/dcn31_panel_cntl.h" - -#include "dcn30/dcn30_dwb.h" -#include "dcn30/dcn30_mmhubbub.h" - -#include "dcn/dcn_3_1_5_offset.h" -#include "dcn/dcn_3_1_5_sh_mask.h" -#include "dpcs/dpcs_4_2_2_offset.h" -#include "dpcs/dpcs_4_2_2_sh_mask.h" - -#define NBIO_BASE__INST0_SEG0 0x00000000 -#define NBIO_BASE__INST0_SEG1 0x00000014 -#define NBIO_BASE__INST0_SEG2 0x00000D20 -#define NBIO_BASE__INST0_SEG3 0x00010400 -#define NBIO_BASE__INST0_SEG4 0x0241B000 -#define NBIO_BASE__INST0_SEG5 0x04040000 - -#define DPCS_BASE__INST0_SEG0 0x00000012 -#define DPCS_BASE__INST0_SEG1 0x000000C0 -#define DPCS_BASE__INST0_SEG2 0x000034C0 -#define DPCS_BASE__INST0_SEG3 0x00009000 -#define DPCS_BASE__INST0_SEG4 0x02403C00 -#define DPCS_BASE__INST0_SEG5 0 - -#define DCN_BASE__INST0_SEG0 0x00000012 -#define DCN_BASE__INST0_SEG1 0x000000C0 -#define DCN_BASE__INST0_SEG2 0x000034C0 -#define DCN_BASE__INST0_SEG3 0x00009000 -#define DCN_BASE__INST0_SEG4 0x02403C00 -#define DCN_BASE__INST0_SEG5 0 - -#define regBIF_BX_PF2_RSMU_INDEX 0x0000 -#define regBIF_BX_PF2_RSMU_INDEX_BASE_IDX 1 -#define regBIF_BX_PF2_RSMU_DATA 0x0001 -#define regBIF_BX_PF2_RSMU_DATA_BASE_IDX 1 -#define regBIF_BX2_BIOS_SCRATCH_6 0x003e -#define regBIF_BX2_BIOS_SCRATCH_6_BASE_IDX 1 -#define BIF_BX2_BIOS_SCRATCH_6__BIOS_SCRATCH_6__SHIFT 0x0 -#define BIF_BX2_BIOS_SCRATCH_6__BIOS_SCRATCH_6_MASK 0xFFFFFFFFL -#define regBIF_BX2_BIOS_SCRATCH_2 0x003a -#define regBIF_BX2_BIOS_SCRATCH_2_BASE_IDX 1 -#define BIF_BX2_BIOS_SCRATCH_2__BIOS_SCRATCH_2__SHIFT 0x0 -#define BIF_BX2_BIOS_SCRATCH_2__BIOS_SCRATCH_2_MASK 0xFFFFFFFFL -#define regBIF_BX2_BIOS_SCRATCH_3 0x003b -#define regBIF_BX2_BIOS_SCRATCH_3_BASE_IDX 1 -#define BIF_BX2_BIOS_SCRATCH_3__BIOS_SCRATCH_3__SHIFT 0x0 -#define BIF_BX2_BIOS_SCRATCH_3__BIOS_SCRATCH_3_MASK 0xFFFFFFFFL - -#define regDCHUBBUB_DEBUG_CTRL_0 0x04d6 -#define regDCHUBBUB_DEBUG_CTRL_0_BASE_IDX 2 -#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10 -#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x01FF0000L - -#include "reg_helper.h" -#include "dce/dmub_abm.h" -#include "dce/dmub_psr.h" -#include "dce/dce_aux.h" -#include "dce/dce_i2c.h" - -#include "dml/dcn30/display_mode_vba_30.h" -#include "vm_helper.h" -#include "dcn20/dcn20_vmid.h" - -#include "link_enc_cfg.h" - -#define DCN3_15_MAX_DET_SIZE 384 -#define DCN3_15_CRB_SEGMENT_SIZE_KB 64 -#define DCN3_15_MAX_DET_SEGS (DCN3_15_MAX_DET_SIZE / DCN3_15_CRB_SEGMENT_SIZE_KB) -/* Minimum 3 extra segments need to be in compbuf and claimable to guarantee seamless mpo transitions */ -#define MIN_RESERVED_DET_SEGS 3 - -enum dcn31_clk_src_array_id { - DCN31_CLK_SRC_PLL0, - DCN31_CLK_SRC_PLL1, - DCN31_CLK_SRC_PLL2, - DCN31_CLK_SRC_PLL3, - DCN31_CLK_SRC_PLL4, - DCN30_CLK_SRC_TOTAL -}; - -/* begin ********************* - * macros to expend register list macro defined in HW object header file - */ - -/* DCN */ -#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg - -#define BASE(seg) BASE_INNER(seg) - -#define SR(reg_name)\ - .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ - reg ## reg_name - -#define SRI(reg_name, block, id)\ - .reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRI2(reg_name, block, id)\ - .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ - reg ## reg_name - -#define SRIR(var_name, reg_name, block, id)\ - .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII(reg_name, block, id)\ - .reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII_MPC_RMU(reg_name, block, id)\ - .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII_DWB(reg_name, temp_name, block, id)\ - .reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## temp_name - -#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ - .field_name = reg_name ## __ ## field_name ## post_fix - -#define DCCG_SRII(reg_name, block, id)\ - .block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define VUPDATE_SRII(reg_name, block, id)\ - .reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ - reg ## reg_name ## _ ## block ## id - -/* NBIO */ -#define NBIO_BASE_INNER(seg) \ - NBIO_BASE__INST0_SEG ## seg - -#define NBIO_BASE(seg) \ - NBIO_BASE_INNER(seg) - -#define NBIO_SR(reg_name)\ - .reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \ - regBIF_BX2_ ## reg_name - -static const struct bios_registers bios_regs = { - NBIO_SR(BIOS_SCRATCH_3), - NBIO_SR(BIOS_SCRATCH_6) -}; - -#define clk_src_regs(index, pllid)\ -[index] = {\ - CS_COMMON_REG_LIST_DCN3_0(index, pllid),\ -} - -static const struct dce110_clk_src_regs clk_src_regs[] = { - clk_src_regs(0, A), - clk_src_regs(1, B), - clk_src_regs(2, C), - clk_src_regs(3, D), - clk_src_regs(4, E) -}; - -static const struct dce110_clk_src_shift cs_shift = { - CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) -}; - -static const struct dce110_clk_src_mask cs_mask = { - CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) -}; - -#define abm_regs(id)\ -[id] = {\ - ABM_DCN302_REG_LIST(id)\ -} - -static const struct dce_abm_registers abm_regs[] = { - abm_regs(0), - abm_regs(1), - abm_regs(2), - abm_regs(3), -}; - -static const struct dce_abm_shift abm_shift = { - ABM_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dce_abm_mask abm_mask = { - ABM_MASK_SH_LIST_DCN30(_MASK) -}; - -#define audio_regs(id)\ -[id] = {\ - AUD_COMMON_REG_LIST(id)\ -} - -static const struct dce_audio_registers audio_regs[] = { - audio_regs(0), - audio_regs(1), - audio_regs(2), - audio_regs(3), - audio_regs(4), - audio_regs(5), - audio_regs(6) -}; - -#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ - AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) - -static const struct dce_audio_shift audio_shift = { - DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_audio_mask audio_mask = { - DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) -}; - -#define vpg_regs(id)\ -[id] = {\ - VPG_DCN31_REG_LIST(id)\ -} - -static const struct dcn31_vpg_registers vpg_regs[] = { - vpg_regs(0), - vpg_regs(1), - vpg_regs(2), - vpg_regs(3), - vpg_regs(4), - vpg_regs(5), - vpg_regs(6), - vpg_regs(7), - vpg_regs(8), - vpg_regs(9), -}; - -static const struct dcn31_vpg_shift vpg_shift = { - DCN31_VPG_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_vpg_mask vpg_mask = { - DCN31_VPG_MASK_SH_LIST(_MASK) -}; - -#define afmt_regs(id)\ -[id] = {\ - AFMT_DCN31_REG_LIST(id)\ -} - -static const struct dcn31_afmt_registers afmt_regs[] = { - afmt_regs(0), - afmt_regs(1), - afmt_regs(2), - afmt_regs(3), - afmt_regs(4), - afmt_regs(5) -}; - -static const struct dcn31_afmt_shift afmt_shift = { - DCN31_AFMT_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_afmt_mask afmt_mask = { - DCN31_AFMT_MASK_SH_LIST(_MASK) -}; - -#define apg_regs(id)\ -[id] = {\ - APG_DCN31_REG_LIST(id)\ -} - -static const struct dcn31_apg_registers apg_regs[] = { - apg_regs(0), - apg_regs(1), - apg_regs(2), - apg_regs(3) -}; - -static const struct dcn31_apg_shift apg_shift = { - DCN31_APG_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_apg_mask apg_mask = { - DCN31_APG_MASK_SH_LIST(_MASK) -}; - -#define stream_enc_regs(id)\ -[id] = {\ - SE_DCN3_REG_LIST(id)\ -} - -static const struct dcn10_stream_enc_registers stream_enc_regs[] = { - stream_enc_regs(0), - stream_enc_regs(1), - stream_enc_regs(2), - stream_enc_regs(3), - stream_enc_regs(4) -}; - -static const struct dcn10_stream_encoder_shift se_shift = { - SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn10_stream_encoder_mask se_mask = { - SE_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - - -#define aux_regs(id)\ -[id] = {\ - DCN2_AUX_REG_LIST(id)\ -} - -static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { - aux_regs(0), - aux_regs(1), - aux_regs(2), - aux_regs(3), - aux_regs(4) -}; - -#define hpd_regs(id)\ -[id] = {\ - HPD_REG_LIST(id)\ -} - -static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { - hpd_regs(0), - hpd_regs(1), - hpd_regs(2), - hpd_regs(3), - hpd_regs(4) -}; - -#define link_regs(id, phyid)\ -[id] = {\ - LE_DCN31_REG_LIST(id), \ - UNIPHY_DCN2_REG_LIST(phyid), \ - DPCS_DCN31_REG_LIST(id), \ -} - -static const struct dce110_aux_registers_shift aux_shift = { - DCN_AUX_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce110_aux_registers_mask aux_mask = { - DCN_AUX_MASK_SH_LIST(_MASK) -}; - -static const struct dcn10_link_enc_registers link_enc_regs[] = { - link_regs(0, A), - link_regs(1, B), - link_regs(2, C), - link_regs(3, D), - link_regs(4, E) -}; - -static const struct dcn10_link_enc_shift le_shift = { - LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \ - DPCS_DCN31_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn10_link_enc_mask le_mask = { - LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), \ - DPCS_DCN31_MASK_SH_LIST(_MASK) -}; - -#define hpo_dp_stream_encoder_reg_list(id)\ -[id] = {\ - DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\ -} - -static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = { - hpo_dp_stream_encoder_reg_list(0), - hpo_dp_stream_encoder_reg_list(1), - hpo_dp_stream_encoder_reg_list(2), - hpo_dp_stream_encoder_reg_list(3), -}; - -static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { - DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { - DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) -}; - - -#define hpo_dp_link_encoder_reg_list(id)\ -[id] = {\ - DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\ - DCN3_1_RDPCSTX_REG_LIST(0),\ - DCN3_1_RDPCSTX_REG_LIST(1),\ - DCN3_1_RDPCSTX_REG_LIST(2),\ - DCN3_1_RDPCSTX_REG_LIST(3),\ - DCN3_1_RDPCSTX_REG_LIST(4)\ -} - -static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = { - hpo_dp_link_encoder_reg_list(0), - hpo_dp_link_encoder_reg_list(1), -}; - -static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { - DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { - DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK) -}; - -#define dpp_regs(id)\ -[id] = {\ - DPP_REG_LIST_DCN30(id),\ -} - -static const struct dcn3_dpp_registers dpp_regs[] = { - dpp_regs(0), - dpp_regs(1), - dpp_regs(2), - dpp_regs(3) -}; - -static const struct dcn3_dpp_shift tf_shift = { - DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) -}; - -static const struct dcn3_dpp_mask tf_mask = { - DPP_REG_LIST_SH_MASK_DCN30(_MASK) -}; - -#define opp_regs(id)\ -[id] = {\ - OPP_REG_LIST_DCN30(id),\ -} - -static const struct dcn20_opp_registers opp_regs[] = { - opp_regs(0), - opp_regs(1), - opp_regs(2), - opp_regs(3) -}; - -static const struct dcn20_opp_shift opp_shift = { - OPP_MASK_SH_LIST_DCN20(__SHIFT) -}; - -static const struct dcn20_opp_mask opp_mask = { - OPP_MASK_SH_LIST_DCN20(_MASK) -}; - -#define aux_engine_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST0(id), \ - .AUXN_IMPCAL = 0, \ - .AUXP_IMPCAL = 0, \ - .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ -} - -static const struct dce110_aux_registers aux_engine_regs[] = { - aux_engine_regs(0), - aux_engine_regs(1), - aux_engine_regs(2), - aux_engine_regs(3), - aux_engine_regs(4) -}; - -#define dwbc_regs_dcn3(id)\ -[id] = {\ - DWBC_COMMON_REG_LIST_DCN30(id),\ -} - -static const struct dcn30_dwbc_registers dwbc30_regs[] = { - dwbc_regs_dcn3(0), -}; - -static const struct dcn30_dwbc_shift dwbc30_shift = { - DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_dwbc_mask dwbc30_mask = { - DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -#define mcif_wb_regs_dcn3(id)\ -[id] = {\ - MCIF_WB_COMMON_REG_LIST_DCN30(id),\ -} - -static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { - mcif_wb_regs_dcn3(0) -}; - -static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -#define dsc_regsDCN20(id)\ -[id] = {\ - DSC_REG_LIST_DCN20(id)\ -} - -static const struct dcn20_dsc_registers dsc_regs[] = { - dsc_regsDCN20(0), - dsc_regsDCN20(1), - dsc_regsDCN20(2) -}; - -static const struct dcn20_dsc_shift dsc_shift = { - DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) -}; - -static const struct dcn20_dsc_mask dsc_mask = { - DSC_REG_LIST_SH_MASK_DCN20(_MASK) -}; - -static const struct dcn30_mpc_registers mpc_regs = { - MPC_REG_LIST_DCN3_0(0), - MPC_REG_LIST_DCN3_0(1), - MPC_REG_LIST_DCN3_0(2), - MPC_REG_LIST_DCN3_0(3), - MPC_OUT_MUX_REG_LIST_DCN3_0(0), - MPC_OUT_MUX_REG_LIST_DCN3_0(1), - MPC_OUT_MUX_REG_LIST_DCN3_0(2), - MPC_OUT_MUX_REG_LIST_DCN3_0(3), - MPC_DWB_MUX_REG_LIST_DCN3_0(0), -}; - -static const struct dcn30_mpc_shift mpc_shift = { - MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_mpc_mask mpc_mask = { - MPC_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -#define optc_regs(id)\ -[id] = {OPTC_COMMON_REG_LIST_DCN3_1(id)} - -static const struct dcn_optc_registers optc_regs[] = { - optc_regs(0), - optc_regs(1), - optc_regs(2), - optc_regs(3) -}; - -static const struct dcn_optc_shift optc_shift = { - OPTC_COMMON_MASK_SH_LIST_DCN3_1(__SHIFT) -}; - -static const struct dcn_optc_mask optc_mask = { - OPTC_COMMON_MASK_SH_LIST_DCN3_1(_MASK) -}; - -#define hubp_regs(id)\ -[id] = {\ - HUBP_REG_LIST_DCN30(id)\ -} - -static const struct dcn_hubp2_registers hubp_regs[] = { - hubp_regs(0), - hubp_regs(1), - hubp_regs(2), - hubp_regs(3) -}; - - -static const struct dcn_hubp2_shift hubp_shift = { - HUBP_MASK_SH_LIST_DCN31(__SHIFT) -}; - -static const struct dcn_hubp2_mask hubp_mask = { - HUBP_MASK_SH_LIST_DCN31(_MASK) -}; -static const struct dcn_hubbub_registers hubbub_reg = { - HUBBUB_REG_LIST_DCN31(0) -}; - -static const struct dcn_hubbub_shift hubbub_shift = { - HUBBUB_MASK_SH_LIST_DCN31(__SHIFT) -}; - -static const struct dcn_hubbub_mask hubbub_mask = { - HUBBUB_MASK_SH_LIST_DCN31(_MASK) -}; - -static const struct dccg_registers dccg_regs = { - DCCG_REG_LIST_DCN31() -}; - -static const struct dccg_shift dccg_shift = { - DCCG_MASK_SH_LIST_DCN31(__SHIFT) -}; - -static const struct dccg_mask dccg_mask = { - DCCG_MASK_SH_LIST_DCN31(_MASK) -}; - - -#define SRII2(reg_name_pre, reg_name_post, id)\ - .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \ - ## id ## _ ## reg_name_post ## _BASE_IDX) + \ - reg ## reg_name_pre ## id ## _ ## reg_name_post - - -#define HWSEQ_DCN31_REG_LIST()\ - SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ - SR(DCHUBBUB_ARB_HOSTVM_CNTL), \ - SR(DIO_MEM_PWR_CTRL), \ - SR(ODM_MEM_PWR_CTRL3), \ - SR(DMU_MEM_PWR_CNTL), \ - SR(MMHUBBUB_MEM_PWR_CNTL), \ - SR(DCCG_GATE_DISABLE_CNTL), \ - SR(DCCG_GATE_DISABLE_CNTL2), \ - SR(DCFCLK_CNTL),\ - SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ - SRII(PIXEL_RATE_CNTL, OTG, 0), \ - SRII(PIXEL_RATE_CNTL, OTG, 1),\ - SRII(PIXEL_RATE_CNTL, OTG, 2),\ - SRII(PIXEL_RATE_CNTL, OTG, 3),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\ - SR(MICROSECOND_TIME_BASE_DIV), \ - SR(MILLISECOND_TIME_BASE_DIV), \ - SR(DISPCLK_FREQ_CHANGE_CNTL), \ - SR(RBBMIF_TIMEOUT_DIS), \ - SR(RBBMIF_TIMEOUT_DIS_2), \ - SR(DCHUBBUB_CRC_CTRL), \ - SR(DPP_TOP0_DPP_CRC_CTRL), \ - SR(DPP_TOP0_DPP_CRC_VAL_B_A), \ - SR(DPP_TOP0_DPP_CRC_VAL_R_G), \ - SR(MPC_CRC_CTRL), \ - SR(MPC_CRC_RESULT_GB), \ - SR(MPC_CRC_RESULT_C), \ - SR(MPC_CRC_RESULT_AR), \ - SR(DOMAIN0_PG_CONFIG), \ - SR(DOMAIN1_PG_CONFIG), \ - SR(DOMAIN2_PG_CONFIG), \ - SR(DOMAIN3_PG_CONFIG), \ - SR(DOMAIN16_PG_CONFIG), \ - SR(DOMAIN17_PG_CONFIG), \ - SR(DOMAIN18_PG_CONFIG), \ - SR(DOMAIN0_PG_STATUS), \ - SR(DOMAIN1_PG_STATUS), \ - SR(DOMAIN2_PG_STATUS), \ - SR(DOMAIN3_PG_STATUS), \ - SR(DOMAIN16_PG_STATUS), \ - SR(DOMAIN17_PG_STATUS), \ - SR(DOMAIN18_PG_STATUS), \ - SR(D1VGA_CONTROL), \ - SR(D2VGA_CONTROL), \ - SR(D3VGA_CONTROL), \ - SR(D4VGA_CONTROL), \ - SR(D5VGA_CONTROL), \ - SR(D6VGA_CONTROL), \ - SR(DC_IP_REQUEST_CNTL), \ - SR(AZALIA_AUDIO_DTO), \ - SR(AZALIA_CONTROLLER_CLOCK_GATING), \ - SR(HPO_TOP_HW_CONTROL) - -static const struct dce_hwseq_registers hwseq_reg = { - HWSEQ_DCN31_REG_LIST() -}; - -#define HWSEQ_DCN31_MASK_SH_LIST(mask_sh)\ - HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ - HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ - HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \ - HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ - HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ - HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \ - HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \ - HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ - HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ - HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \ - HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \ - HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh) - -static const struct dce_hwseq_shift hwseq_shift = { - HWSEQ_DCN31_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_hwseq_mask hwseq_mask = { - HWSEQ_DCN31_MASK_SH_LIST(_MASK) -}; -#define vmid_regs(id)\ -[id] = {\ - DCN20_VMID_REG_LIST(id)\ -} - -static const struct dcn_vmid_registers vmid_regs[] = { - vmid_regs(0), - vmid_regs(1), - vmid_regs(2), - vmid_regs(3), - vmid_regs(4), - vmid_regs(5), - vmid_regs(6), - vmid_regs(7), - vmid_regs(8), - vmid_regs(9), - vmid_regs(10), - vmid_regs(11), - vmid_regs(12), - vmid_regs(13), - vmid_regs(14), - vmid_regs(15) -}; - -static const struct dcn20_vmid_shift vmid_shifts = { - DCN20_VMID_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn20_vmid_mask vmid_masks = { - DCN20_VMID_MASK_SH_LIST(_MASK) -}; - -static const struct resource_caps res_cap_dcn31 = { - .num_timing_generator = 4, - .num_opp = 4, - .num_video_plane = 4, - .num_audio = 5, - .num_stream_encoder = 5, - .num_dig_link_enc = 5, - .num_hpo_dp_stream_encoder = 4, - .num_hpo_dp_link_encoder = 2, - .num_pll = 5, - .num_dwb = 1, - .num_ddc = 5, - .num_vmid = 16, - .num_mpc_3dlut = 2, - .num_dsc = 3, -}; - -static const struct dc_plane_cap plane_cap = { - .type = DC_PLANE_TYPE_DCN_UNIVERSAL, - .per_pixel_alpha = true, - - .pixel_format_support = { - .argb8888 = true, - .nv12 = true, - .fp16 = true, - .p010 = true, - .ayuv = false, - }, - - .max_upscale_factor = { - .argb8888 = 16000, - .nv12 = 16000, - .fp16 = 16000 - }, - - // 6:1 downscaling ratio: 1000/6 = 166.666 - .max_downscale_factor = { - .argb8888 = 167, - .nv12 = 167, - .fp16 = 167 - }, - 64, - 64 -}; - -static const struct dc_debug_options debug_defaults_drv = { - .disable_z10 = true, /*hw not support it*/ - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = false, - .clock_trace = true, - .disable_pplib_clock_request = false, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, - .force_single_disp_pipe_split = false, - .disable_dcc = DCC_ENABLE, - .vsr_support = true, - .performance_trace = false, - .max_downscale_src_width = 4096,/*upto true 4k*/ - .disable_pplib_wm_range = false, - .scl_reset_length10 = true, - .sanity_checks = false, - .underflow_assert_delay_us = 0xFFFFFFFF, - .dwb_fi_phase = -1, // -1 = disable, - .dmub_command_table = true, - .pstate_enabled = true, - .use_max_lb = true, - .enable_mem_low_power = { - .bits = { - .vga = true, - .i2c = true, - .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled - .dscl = true, - .cm = true, - .mpc = true, - .optc = true, - .vpg = true, - .afmt = true, - } - }, - .enable_legacy_fast_update = true, - .psr_power_use_phy_fsm = 0, - .using_dml2 = false, -}; - -static const struct dc_panel_config panel_config_defaults = { - .psr = { - .disable_psr = false, - .disallow_psrsu = false, - .disallow_replay = false, - }, - .ilr = { - .optimize_edp_link_rate = true, - }, -}; - -static void dcn31_dpp_destroy(struct dpp **dpp) -{ - kfree(TO_DCN20_DPP(*dpp)); - *dpp = NULL; -} - -static struct dpp *dcn31_dpp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn3_dpp *dpp = - kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); - - if (!dpp) - return NULL; - - if (dpp3_construct(dpp, ctx, inst, - &dpp_regs[inst], &tf_shift, &tf_mask)) - return &dpp->base; - - BREAK_TO_DEBUGGER(); - kfree(dpp); - return NULL; -} - -static struct output_pixel_processor *dcn31_opp_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_opp *opp = - kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); - - if (!opp) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dcn20_opp_construct(opp, ctx, inst, - &opp_regs[inst], &opp_shift, &opp_mask); - return &opp->base; -} - -static struct dce_aux *dcn31_aux_engine_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); - - if (!aux_engine) - return NULL; - - dce110_aux_engine_construct(aux_engine, ctx, inst, - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst], - &aux_mask, - &aux_shift, - ctx->dc->caps.extended_aux_timeout_support); - - return &aux_engine->base; -} -#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) } - -static const struct dce_i2c_registers i2c_hw_regs[] = { - i2c_inst_regs(1), - i2c_inst_regs(2), - i2c_inst_regs(3), - i2c_inst_regs(4), - i2c_inst_regs(5), -}; - -static const struct dce_i2c_shift i2c_shifts = { - I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dce_i2c_mask i2c_masks = { - I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -static struct dce_i2c_hw *dcn31_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); - - if (!dce_i2c_hw) - return NULL; - - dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, - &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); - - return dce_i2c_hw; -} -static struct mpc *dcn31_mpc_create( - struct dc_context *ctx, - int num_mpcc, - int num_rmu) -{ - struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), - GFP_KERNEL); - - if (!mpc30) - return NULL; - - dcn30_mpc_construct(mpc30, ctx, - &mpc_regs, - &mpc_shift, - &mpc_mask, - num_mpcc, - num_rmu); - - return &mpc30->base; -} - -static struct hubbub *dcn31_hubbub_create(struct dc_context *ctx) -{ - int i; - - struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), - GFP_KERNEL); - - if (!hubbub3) - return NULL; - - hubbub31_construct(hubbub3, ctx, - &hubbub_reg, - &hubbub_shift, - &hubbub_mask, - dcn3_15_ip.det_buffer_size_kbytes, - dcn3_15_ip.pixel_chunk_size_kbytes, - dcn3_15_ip.config_return_buffer_size_in_kbytes); - - - for (i = 0; i < res_cap_dcn31.num_vmid; i++) { - struct dcn20_vmid *vmid = &hubbub3->vmid[i]; - - vmid->ctx = ctx; - - vmid->regs = &vmid_regs[i]; - vmid->shifts = &vmid_shifts; - vmid->masks = &vmid_masks; - } - - return &hubbub3->base; -} - -static struct timing_generator *dcn31_timing_generator_create( - struct dc_context *ctx, - uint32_t instance) -{ - struct optc *tgn10 = - kzalloc(sizeof(struct optc), GFP_KERNEL); - - if (!tgn10) - return NULL; - - tgn10->base.inst = instance; - tgn10->base.ctx = ctx; - - tgn10->tg_regs = &optc_regs[instance]; - tgn10->tg_shift = &optc_shift; - tgn10->tg_mask = &optc_mask; - - dcn31_timing_generator_init(tgn10); - - return &tgn10->base; -} - -static const struct encoder_feature_support link_enc_feature = { - .max_hdmi_deep_color = COLOR_DEPTH_121212, - .max_hdmi_pixel_clock = 600000, - .hdmi_ycbcr420_supported = true, - .dp_ycbcr420_supported = true, - .fec_supported = true, - .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_HBR3_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true, - .flags.bits.IS_TPS4_CAPABLE = true -}; - -static struct link_encoder *dcn31_link_encoder_create( - struct dc_context *ctx, - const struct encoder_init_data *enc_init_data) -{ - struct dcn20_link_encoder *enc20 = - kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - - if (!enc20) - return NULL; - - dcn31_link_encoder_construct(enc20, - enc_init_data, - &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], - &link_enc_aux_regs[enc_init_data->channel - 1], - &link_enc_hpd_regs[enc_init_data->hpd_source], - &le_shift, - &le_mask); - - return &enc20->enc10.base; -} - -/* Create a minimal link encoder object not associated with a particular - * physical connector. - * resource_funcs.link_enc_create_minimal - */ -static struct link_encoder *dcn31_link_enc_create_minimal( - struct dc_context *ctx, enum engine_id eng_id) -{ - struct dcn20_link_encoder *enc20; - - if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc) - return NULL; - - enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) - return NULL; - - dcn31_link_encoder_construct_minimal( - enc20, - ctx, - &link_enc_feature, - &link_enc_regs[eng_id - ENGINE_ID_DIGA], - eng_id); - - return &enc20->enc10.base; -} - -static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data) -{ - struct dcn31_panel_cntl *panel_cntl = - kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL); - - if (!panel_cntl) - return NULL; - - dcn31_panel_cntl_construct(panel_cntl, init_data); - - return &panel_cntl->base; -} - -static void read_dce_straps( - struct dc_context *ctx, - struct resource_straps *straps) -{ - generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX), - FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); - -} - -static struct audio *dcn31_create_audio( - struct dc_context *ctx, unsigned int inst) -{ - return dce_audio_create(ctx, inst, - &audio_regs[inst], &audio_shift, &audio_mask); -} - -static struct vpg *dcn31_vpg_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL); - - if (!vpg31) - return NULL; - - vpg31_construct(vpg31, ctx, inst, - &vpg_regs[inst], - &vpg_shift, - &vpg_mask); - - return &vpg31->base; -} - -static struct afmt *dcn31_afmt_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL); - - if (!afmt31) - return NULL; - - afmt31_construct(afmt31, ctx, inst, - &afmt_regs[inst], - &afmt_shift, - &afmt_mask); - - // Light sleep by default, no need to power down here - - return &afmt31->base; -} - -static struct apg *dcn31_apg_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); - - if (!apg31) - return NULL; - - apg31_construct(apg31, ctx, inst, - &apg_regs[inst], - &apg_shift, - &apg_mask); - - return &apg31->base; -} - -static struct stream_encoder *dcn315_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dcn10_stream_encoder *enc1; - struct vpg *vpg; - struct afmt *afmt; - int vpg_inst; - int afmt_inst; - - /*PHYB is wired off in HW, allow front end to remapping, otherwise needs more changes*/ - - /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ - if (eng_id <= ENGINE_ID_DIGF) { - vpg_inst = eng_id; - afmt_inst = eng_id; - } else - return NULL; - - enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); - vpg = dcn31_vpg_create(ctx, vpg_inst); - afmt = dcn31_afmt_create(ctx, afmt_inst); - - if (!enc1 || !vpg || !afmt) { - kfree(enc1); - kfree(vpg); - kfree(afmt); - return NULL; - } - - dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, - eng_id, vpg, afmt, - &stream_enc_regs[eng_id], - &se_shift, &se_mask); - - return &enc1->base; -} - -static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; - struct vpg *vpg; - struct apg *apg; - uint32_t hpo_dp_inst; - uint32_t vpg_inst; - uint32_t apg_inst; - - ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); - hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; - - /* Mapping of VPG register blocks to HPO DP block instance: - * VPG[6] -> HPO_DP[0] - * VPG[7] -> HPO_DP[1] - * VPG[8] -> HPO_DP[2] - * VPG[9] -> HPO_DP[3] - */ - vpg_inst = hpo_dp_inst + 6; - - /* Mapping of APG register blocks to HPO DP block instance: - * APG[0] -> HPO_DP[0] - * APG[1] -> HPO_DP[1] - * APG[2] -> HPO_DP[2] - * APG[3] -> HPO_DP[3] - */ - apg_inst = hpo_dp_inst; - - /* allocate HPO stream encoder and create VPG sub-block */ - hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); - vpg = dcn31_vpg_create(ctx, vpg_inst); - apg = dcn31_apg_create(ctx, apg_inst); - - if (!hpo_dp_enc31 || !vpg || !apg) { - kfree(hpo_dp_enc31); - kfree(vpg); - kfree(apg); - return NULL; - } - - dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, - hpo_dp_inst, eng_id, vpg, apg, - &hpo_dp_stream_enc_regs[hpo_dp_inst], - &hpo_dp_se_shift, &hpo_dp_se_mask); - - return &hpo_dp_enc31->base; -} - -static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( - uint8_t inst, - struct dc_context *ctx) -{ - struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; - - /* allocate HPO link encoder */ - hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); - - hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, - &hpo_dp_link_enc_regs[inst], - &hpo_dp_le_shift, &hpo_dp_le_mask); - - return &hpo_dp_enc31->base; -} - -static struct dce_hwseq *dcn31_hwseq_create( - struct dc_context *ctx) -{ - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); - - if (hws) { - hws->ctx = ctx; - hws->regs = &hwseq_reg; - hws->shifts = &hwseq_shift; - hws->masks = &hwseq_mask; - } - return hws; -} -static const struct resource_create_funcs res_create_funcs = { - .read_dce_straps = read_dce_straps, - .create_audio = dcn31_create_audio, - .create_stream_encoder = dcn315_stream_encoder_create, - .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, - .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, - .create_hwseq = dcn31_hwseq_create, -}; - -static void dcn315_resource_destruct(struct dcn315_resource_pool *pool) -{ - unsigned int i; - - for (i = 0; i < pool->base.stream_enc_count; i++) { - if (pool->base.stream_enc[i] != NULL) { - if (pool->base.stream_enc[i]->vpg != NULL) { - kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); - pool->base.stream_enc[i]->vpg = NULL; - } - if (pool->base.stream_enc[i]->afmt != NULL) { - kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); - pool->base.stream_enc[i]->afmt = NULL; - } - kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); - pool->base.stream_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { - if (pool->base.hpo_dp_stream_enc[i] != NULL) { - if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { - kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); - pool->base.hpo_dp_stream_enc[i]->vpg = NULL; - } - if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { - kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); - pool->base.hpo_dp_stream_enc[i]->apg = NULL; - } - kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); - pool->base.hpo_dp_stream_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { - if (pool->base.hpo_dp_link_enc[i] != NULL) { - kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); - pool->base.hpo_dp_link_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_dsc; i++) { - if (pool->base.dscs[i] != NULL) - dcn20_dsc_destroy(&pool->base.dscs[i]); - } - - if (pool->base.mpc != NULL) { - kfree(TO_DCN20_MPC(pool->base.mpc)); - pool->base.mpc = NULL; - } - if (pool->base.hubbub != NULL) { - kfree(pool->base.hubbub); - pool->base.hubbub = NULL; - } - for (i = 0; i < pool->base.pipe_count; i++) { - if (pool->base.dpps[i] != NULL) - dcn31_dpp_destroy(&pool->base.dpps[i]); - - if (pool->base.ipps[i] != NULL) - pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); - - if (pool->base.hubps[i] != NULL) { - kfree(TO_DCN20_HUBP(pool->base.hubps[i])); - pool->base.hubps[i] = NULL; - } - - if (pool->base.irqs != NULL) { - dal_irq_service_destroy(&pool->base.irqs); - } - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - if (pool->base.engines[i] != NULL) - dce110_engine_destroy(&pool->base.engines[i]); - if (pool->base.hw_i2cs[i] != NULL) { - kfree(pool->base.hw_i2cs[i]); - pool->base.hw_i2cs[i] = NULL; - } - if (pool->base.sw_i2cs[i] != NULL) { - kfree(pool->base.sw_i2cs[i]); - pool->base.sw_i2cs[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - if (pool->base.opps[i] != NULL) - pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - if (pool->base.timing_generators[i] != NULL) { - kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); - pool->base.timing_generators[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_dwb; i++) { - if (pool->base.dwbc[i] != NULL) { - kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); - pool->base.dwbc[i] = NULL; - } - if (pool->base.mcif_wb[i] != NULL) { - kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); - pool->base.mcif_wb[i] = NULL; - } - } - - for (i = 0; i < pool->base.audio_count; i++) { - if (pool->base.audios[i]) - dce_aud_destroy(&pool->base.audios[i]); - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] != NULL) { - dcn20_clock_source_destroy(&pool->base.clock_sources[i]); - pool->base.clock_sources[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { - if (pool->base.mpc_lut[i] != NULL) { - dc_3dlut_func_release(pool->base.mpc_lut[i]); - pool->base.mpc_lut[i] = NULL; - } - if (pool->base.mpc_shaper[i] != NULL) { - dc_transfer_func_release(pool->base.mpc_shaper[i]); - pool->base.mpc_shaper[i] = NULL; - } - } - - if (pool->base.dp_clock_source != NULL) { - dcn20_clock_source_destroy(&pool->base.dp_clock_source); - pool->base.dp_clock_source = NULL; - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - if (pool->base.multiple_abms[i] != NULL) - dce_abm_destroy(&pool->base.multiple_abms[i]); - } - - if (pool->base.psr != NULL) - dmub_psr_destroy(&pool->base.psr); - - if (pool->base.dccg != NULL) - dcn_dccg_destroy(&pool->base.dccg); -} - -static struct hubp *dcn31_hubp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn20_hubp *hubp2 = - kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); - - if (!hubp2) - return NULL; - - if (hubp31_construct(hubp2, ctx, inst, - &hubp_regs[inst], &hubp_shift, &hubp_mask)) - return &hubp2->base; - - BREAK_TO_DEBUGGER(); - kfree(hubp2); - return NULL; -} - -static bool dcn31_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t pipe_count = pool->res_cap->num_dwb; - - for (i = 0; i < pipe_count; i++) { - struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), - GFP_KERNEL); - - if (!dwbc30) { - dm_error("DC: failed to create dwbc30!\n"); - return false; - } - - dcn30_dwbc_construct(dwbc30, ctx, - &dwbc30_regs[i], - &dwbc30_shift, - &dwbc30_mask, - i); - - pool->dwbc[i] = &dwbc30->base; - } - return true; -} - -static bool dcn31_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t pipe_count = pool->res_cap->num_dwb; - - for (i = 0; i < pipe_count; i++) { - struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), - GFP_KERNEL); - - if (!mcif_wb30) { - dm_error("DC: failed to create mcif_wb30!\n"); - return false; - } - - dcn30_mmhubbub_construct(mcif_wb30, ctx, - &mcif_wb30_regs[i], - &mcif_wb30_shift, - &mcif_wb30_mask, - i); - - pool->mcif_wb[i] = &mcif_wb30->base; - } - return true; -} - -static struct display_stream_compressor *dcn31_dsc_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_dsc *dsc = - kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); - - if (!dsc) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); - return &dsc->base; -} - -static void dcn315_destroy_resource_pool(struct resource_pool **pool) -{ - struct dcn315_resource_pool *dcn31_pool = TO_DCN315_RES_POOL(*pool); - - dcn315_resource_destruct(dcn31_pool); - kfree(dcn31_pool); - *pool = NULL; -} - -static struct clock_source *dcn31_clock_source_create( - struct dc_context *ctx, - struct dc_bios *bios, - enum clock_source_id id, - const struct dce110_clk_src_regs *regs, - bool dp_clk_src) -{ - struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); - - if (!clk_src) - return NULL; - - if (dcn31_clk_src_construct(clk_src, ctx, bios, id, - regs, &cs_shift, &cs_mask)) { - clk_src->base.dp_clk_src = dp_clk_src; - return &clk_src->base; - } - - kfree(clk_src); - BREAK_TO_DEBUGGER(); - return NULL; -} - -static bool is_dual_plane(enum surface_pixel_format format) -{ - return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; -} - -static int source_format_to_bpp (enum source_format_class SourcePixelFormat) -{ - if (SourcePixelFormat == dm_444_64) - return 8; - else if (SourcePixelFormat == dm_444_16) - return 2; - else if (SourcePixelFormat == dm_444_8) - return 1; - else if (SourcePixelFormat == dm_rgbe_alpha) - return 5; - else if (SourcePixelFormat == dm_420_8) - return 3; - else if (SourcePixelFormat == dm_420_12) - return 6; - else - return 4; -} - -static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context) -{ - int i; - struct resource_context *res_ctx = &context->res_ctx; - - /*Don't apply for single stream*/ - if (context->stream_count < 2) - return false; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - if (!res_ctx->pipe_ctx[i].stream) - continue; - - /*Don't apply if scaling*/ - if (res_ctx->pipe_ctx[i].stream->src.width != res_ctx->pipe_ctx[i].stream->dst.width || - res_ctx->pipe_ctx[i].stream->src.height != res_ctx->pipe_ctx[i].stream->dst.height || - (res_ctx->pipe_ctx[i].plane_state && (res_ctx->pipe_ctx[i].plane_state->src_rect.width - != res_ctx->pipe_ctx[i].plane_state->dst_rect.width || - res_ctx->pipe_ctx[i].plane_state->src_rect.height - != res_ctx->pipe_ctx[i].plane_state->dst_rect.height))) - return false; - /*Don't apply if MPO to avoid transition issues*/ - if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state != res_ctx->pipe_ctx[i].plane_state) - return false; - } - return true; -} - -static int dcn315_populate_dml_pipes_from_context( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - bool fast_validate) -{ - int i, pipe_cnt, crb_idx, crb_pipes; - struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe = NULL; - const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB; - int remaining_det_segs = max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB; - bool pixel_rate_crb = allow_pixel_rate_crb(dc, context); - - DC_FP_START(); - dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); - DC_FP_END(); - - for (i = 0, pipe_cnt = 0, crb_pipes = 0; i < dc->res_pool->pipe_count; i++) { - struct dc_crtc_timing *timing; - - if (!res_ctx->pipe_ctx[i].stream) - continue; - pipe = &res_ctx->pipe_ctx[i]; - timing = &pipe->stream->timing; - - /* - * Immediate flip can be set dynamically after enabling the plane. - * We need to require support for immediate flip or underflow can be - * intermittently experienced depending on peak b/w requirements. - */ - pipes[pipe_cnt].pipe.src.immediate_flip = true; - - pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; - pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; - pipes[pipe_cnt].pipe.src.dcc_rate = 3; - pipes[pipe_cnt].dout.dsc_input_bpc = 0; - DC_FP_START(); - dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt); - if (pixel_rate_crb && !pipe->top_pipe && !pipe->prev_odm_pipe) { - int bpp = source_format_to_bpp(pipes[pipe_cnt].pipe.src.source_format); - /* Ceil to crb segment size */ - int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate( - &context->bw_ctx.dml.soc, timing->pix_clk_100hz, bpp, DCN3_15_CRB_SEGMENT_SIZE_KB); - - if (approx_det_segs_required_for_pstate <= 2 * DCN3_15_MAX_DET_SEGS) { - bool split_required = approx_det_segs_required_for_pstate > DCN3_15_MAX_DET_SEGS; - split_required = split_required || timing->pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc); - split_required = split_required || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120); - - /* Minimum 2 segments to allow mpc/odm combine if its used later */ - if (approx_det_segs_required_for_pstate < 2) - approx_det_segs_required_for_pstate = 2; - if (split_required) - approx_det_segs_required_for_pstate += approx_det_segs_required_for_pstate % 2; - pipes[pipe_cnt].pipe.src.det_size_override = approx_det_segs_required_for_pstate; - remaining_det_segs -= approx_det_segs_required_for_pstate; - } else - remaining_det_segs = -1; - crb_pipes++; - } - DC_FP_END(); - - if (pipes[pipe_cnt].dout.dsc_enable) { - switch (timing->display_color_depth) { - case COLOR_DEPTH_888: - pipes[pipe_cnt].dout.dsc_input_bpc = 8; - break; - case COLOR_DEPTH_101010: - pipes[pipe_cnt].dout.dsc_input_bpc = 10; - break; - case COLOR_DEPTH_121212: - pipes[pipe_cnt].dout.dsc_input_bpc = 12; - break; - default: - ASSERT(0); - break; - } - } - pipe_cnt++; - } - - /* Spread remaining unreserved crb evenly among all pipes*/ - if (pixel_rate_crb) { - for (i = 0, pipe_cnt = 0, crb_idx = 0; i < dc->res_pool->pipe_count; i++) { - pipe = &res_ctx->pipe_ctx[i]; - if (!pipe->stream) - continue; - - /* Do not use asymetric crb if not enough for pstate support */ - if (remaining_det_segs < 0) { - pipes[pipe_cnt].pipe.src.det_size_override = 0; - pipe_cnt++; - continue; - } - - if (!pipe->top_pipe && !pipe->prev_odm_pipe) { - bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc) - || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120); - - if (remaining_det_segs > MIN_RESERVED_DET_SEGS) - pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes + - (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0); - if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) { - /* Clamp to 2 pipe split max det segments */ - remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS); - pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS; - } - if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) { - /* If we are splitting we must have an even number of segments */ - remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2; - pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2; - } - /* Convert segments into size for DML use */ - pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB; - - crb_idx++; - } - pipe_cnt++; - } - } - - if (pipe_cnt) - context->bw_ctx.dml.ip.det_buffer_size_kbytes = - (max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / pipe_cnt) * DCN3_15_CRB_SEGMENT_SIZE_KB; - if (context->bw_ctx.dml.ip.det_buffer_size_kbytes > DCN3_15_MAX_DET_SIZE) - context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_15_MAX_DET_SIZE; - - dc->config.enable_4to1MPC = false; - if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { - if (is_dual_plane(pipe->plane_state->format) - && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) { - dc->config.enable_4to1MPC = true; - context->bw_ctx.dml.ip.det_buffer_size_kbytes = - (max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / 4) * DCN3_15_CRB_SEGMENT_SIZE_KB; - } else if (!is_dual_plane(pipe->plane_state->format) - && pipe->plane_state->src_rect.width <= 5120 - && pipe->stream->timing.pix_clk_100hz < dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)) { - /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */ - context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; - pipes[0].pipe.src.unbounded_req_mode = true; - } - } - - return pipe_cnt; -} - -static void dcn315_get_panel_config_defaults(struct dc_panel_config *panel_config) -{ - *panel_config = panel_config_defaults; -} - -static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn20_get_dcc_compression_cap -}; - -static struct resource_funcs dcn315_res_pool_funcs = { - .destroy = dcn315_destroy_resource_pool, - .link_enc_create = dcn31_link_encoder_create, - .link_enc_create_minimal = dcn31_link_enc_create_minimal, - .link_encs_assign = link_enc_cfg_link_encs_assign, - .link_enc_unassign = link_enc_cfg_link_enc_unassign, - .panel_cntl_create = dcn31_panel_cntl_create, - .validate_bandwidth = dcn31_validate_bandwidth, - .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, - .update_soc_for_wm_a = dcn315_update_soc_for_wm_a, - .populate_dml_pipes = dcn315_populate_dml_pipes_from_context, - .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, - .release_pipe = dcn20_release_pipe, - .add_stream_to_ctx = dcn30_add_stream_to_ctx, - .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, - .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, - .populate_dml_writeback_from_context = dcn31_populate_dml_writeback_from_context, - .set_mcif_arb_params = dcn31_set_mcif_arb_params, - .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, - .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, - .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, - .update_bw_bounding_box = dcn315_update_bw_bounding_box, - .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, - .get_panel_config_defaults = dcn315_get_panel_config_defaults, -}; - -static bool dcn315_resource_construct( - uint8_t num_virtual_links, - struct dc *dc, - struct dcn315_resource_pool *pool) -{ - int i; - struct dc_context *ctx = dc->ctx; - struct irq_service_init_data init_data; - - ctx->dc_bios->regs = &bios_regs; - - pool->base.res_cap = &res_cap_dcn31; - - pool->base.funcs = &dcn315_res_pool_funcs; - - /************************************************* - * Resource + asic cap harcoding * - *************************************************/ - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - pool->base.pipe_count = pool->base.res_cap->num_timing_generator; - pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; - dc->caps.max_downscale_ratio = 600; - dc->caps.i2c_speed_in_khz = 100; - dc->caps.i2c_speed_in_khz_hdcp = 100; - dc->caps.max_cursor_size = 256; - dc->caps.min_horizontal_blanking_period = 80; - dc->caps.dmdata_alloc_size = 2048; - dc->caps.max_slave_planes = 2; - dc->caps.max_slave_yuv_planes = 2; - dc->caps.max_slave_rgb_planes = 2; - dc->caps.post_blend_color_processing = true; - dc->caps.force_dp_tps4_for_cp2520 = true; - if (dc->config.forceHBR2CP2520) - dc->caps.force_dp_tps4_for_cp2520 = false; - dc->caps.dp_hpo = true; - dc->caps.dp_hdmi21_pcon_support = true; - dc->caps.edp_dsc_support = true; - dc->caps.extended_aux_timeout_support = true; - dc->caps.dmcub_support = true; - dc->caps.is_apu = true; - - /* Color pipeline capabilities */ - dc->caps.color.dpp.dcn_arch = 1; - dc->caps.color.dpp.input_lut_shared = 0; - dc->caps.color.dpp.icsc = 1; - dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr - dc->caps.color.dpp.dgam_rom_caps.srgb = 1; - dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; - dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; - dc->caps.color.dpp.dgam_rom_caps.pq = 1; - dc->caps.color.dpp.dgam_rom_caps.hlg = 1; - dc->caps.color.dpp.post_csc = 1; - dc->caps.color.dpp.gamma_corr = 1; - dc->caps.color.dpp.dgam_rom_for_yuv = 0; - - dc->caps.color.dpp.hw_3d_lut = 1; - dc->caps.color.dpp.ogam_ram = 1; - // no OGAM ROM on DCN301 - dc->caps.color.dpp.ogam_rom_caps.srgb = 0; - dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; - dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.dpp.ogam_rom_caps.pq = 0; - dc->caps.color.dpp.ogam_rom_caps.hlg = 0; - dc->caps.color.dpp.ocsc = 0; - - dc->caps.color.mpc.gamut_remap = 1; - dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2 - dc->caps.color.mpc.ogam_ram = 1; - dc->caps.color.mpc.ogam_rom_caps.srgb = 0; - dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; - dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.mpc.ogam_rom_caps.pq = 0; - dc->caps.color.mpc.ogam_rom_caps.hlg = 0; - dc->caps.color.mpc.ocsc = 1; - - /* read VBIOS LTTPR caps */ - { - if (ctx->dc_bios->funcs->get_lttpr_caps) { - enum bp_result bp_query_result; - uint8_t is_vbios_lttpr_enable = 0; - - bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); - dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; - } - - /* interop bit is implicit */ - { - dc->caps.vbios_lttpr_aware = true; - } - } - - if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) - dc->debug = debug_defaults_drv; - - // Init the vm_helper - if (dc->vm_helper) - vm_helper_init(dc->vm_helper, 16); - - /************************************************* - * Create resources * - *************************************************/ - - /* Clock Sources for Pixel Clock*/ - pool->base.clock_sources[DCN31_CLK_SRC_PLL0] = - dcn31_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL0, - &clk_src_regs[0], false); - pool->base.clock_sources[DCN31_CLK_SRC_PLL1] = - dcn31_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL1, - &clk_src_regs[1], false); - pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = - dcn31_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL2, - &clk_src_regs[2], false); - pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = - dcn31_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL3, - &clk_src_regs[3], false); - pool->base.clock_sources[DCN31_CLK_SRC_PLL4] = - dcn31_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL4, - &clk_src_regs[4], false); - - pool->base.clk_src_count = DCN30_CLK_SRC_TOTAL; - - /* todo: not reuse phy_pll registers */ - pool->base.dp_clock_source = - dcn31_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_ID_DP_DTO, - &clk_src_regs[0], true); - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - - /* TODO: DCCG */ - pool->base.dccg = dccg31_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); - if (pool->base.dccg == NULL) { - dm_error("DC: failed to create dccg!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* TODO: IRQ */ - init_data.ctx = dc->ctx; - pool->base.irqs = dal_irq_service_dcn315_create(&init_data); - if (!pool->base.irqs) - goto create_fail; - - /* HUBBUB */ - pool->base.hubbub = dcn31_hubbub_create(ctx); - if (pool->base.hubbub == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create hubbub!\n"); - goto create_fail; - } - - /* HUBPs, DPPs, OPPs and TGs */ - for (i = 0; i < pool->base.pipe_count; i++) { - pool->base.hubps[i] = dcn31_hubp_create(ctx, i); - if (pool->base.hubps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create hubps!\n"); - goto create_fail; - } - - pool->base.dpps[i] = dcn31_dpp_create(ctx, i); - if (pool->base.dpps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create dpps!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - pool->base.opps[i] = dcn31_opp_create(ctx, i); - if (pool->base.opps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create output pixel processor!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - pool->base.timing_generators[i] = dcn31_timing_generator_create( - ctx, i); - if (pool->base.timing_generators[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto create_fail; - } - } - pool->base.timing_generator_count = i; - - /* PSR */ - pool->base.psr = dmub_psr_create(ctx); - if (pool->base.psr == NULL) { - dm_error("DC: failed to create psr obj!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* ABM */ - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - pool->base.multiple_abms[i] = dmub_abm_create(ctx, - &abm_regs[i], - &abm_shift, - &abm_mask); - if (pool->base.multiple_abms[i] == NULL) { - dm_error("DC: failed to create abm for pipe %d!\n", i); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - - /* MPC and DSC */ - pool->base.mpc = dcn31_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut); - if (pool->base.mpc == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mpc!\n"); - goto create_fail; - } - - for (i = 0; i < pool->base.res_cap->num_dsc; i++) { - pool->base.dscs[i] = dcn31_dsc_create(ctx, i); - if (pool->base.dscs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create display stream compressor %d!\n", i); - goto create_fail; - } - } - - /* DWB and MMHUBBUB */ - if (!dcn31_dwbc_create(ctx, &pool->base)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create dwbc!\n"); - goto create_fail; - } - - if (!dcn31_mmhubbub_create(ctx, &pool->base)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mcif_wb!\n"); - goto create_fail; - } - - /* AUX and I2C */ - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - pool->base.engines[i] = dcn31_aux_engine_create(ctx, i); - if (pool->base.engines[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create aux engine!!\n"); - goto create_fail; - } - pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i); - if (pool->base.hw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create hw i2c!!\n"); - goto create_fail; - } - pool->base.sw_i2cs[i] = NULL; - } - - /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ - if (!resource_construct(num_virtual_links, dc, &pool->base, - &res_create_funcs)) - goto create_fail; - - /* HW Sequencer and Plane caps */ - dcn31_hw_sequencer_construct(dc); - - dc->caps.max_planes = pool->base.pipe_count; - - for (i = 0; i < dc->caps.max_planes; ++i) - dc->caps.planes[i] = plane_cap; - - dc->cap_funcs = cap_funcs; - - dc->dcn_ip->max_num_dpp = dcn3_15_ip.max_num_dpp; - - return true; - -create_fail: - - dcn315_resource_destruct(pool); - - return false; -} - -struct resource_pool *dcn315_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc) -{ - struct dcn315_resource_pool *pool = - kzalloc(sizeof(struct dcn315_resource_pool), GFP_KERNEL); - - if (!pool) - return NULL; - - if (dcn315_resource_construct(init_data->num_virtual_links, dc, pool)) - return &pool->base; - - BREAK_TO_DEBUGGER(); - kfree(pool); - return NULL; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h deleted file mode 100644 index 22849eaa6f..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef _DCN315_RESOURCE_H_ -#define _DCN315_RESOURCE_H_ - -#include "core_types.h" - -#define TO_DCN315_RES_POOL(pool)\ - container_of(pool, struct dcn315_resource_pool, base) - -extern struct _vcs_dpi_ip_params_st dcn3_15_ip; - -struct dcn315_resource_pool { - struct resource_pool base; -}; - -struct resource_pool *dcn315_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc); - -#endif /* _DCN315_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/Makefile b/drivers/gpu/drm/amd/display/dc/dcn316/Makefile deleted file mode 100644 index 819d44a943..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn316/Makefile +++ /dev/null @@ -1,30 +0,0 @@ -# -# Copyright 2021 Advanced Micro Devices, Inc. -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -# Authors: AMD -# -# Makefile for dcn316. - -DCN316 = dcn316_resource.o - -AMD_DAL_DCN316 = $(addprefix $(AMDDALPATH)/dc/dcn316/,$(DCN316)) - -AMD_DISPLAY_FILES += $(AMD_DAL_DCN316) diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c deleted file mode 100644 index b9753d4606..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c +++ /dev/null @@ -1,2038 +0,0 @@ -/* - * Copyright 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - - -#include "dm_services.h" -#include "dc.h" - -#include "dcn31/dcn31_init.h" - -#include "resource.h" -#include "include/irq_service_interface.h" -#include "dcn316_resource.h" - -#include "dcn20/dcn20_resource.h" -#include "dcn30/dcn30_resource.h" -#include "dcn31/dcn31_resource.h" - -#include "dcn10/dcn10_ipp.h" -#include "dcn30/dcn30_hubbub.h" -#include "dcn31/dcn31_hubbub.h" -#include "dcn30/dcn30_mpc.h" -#include "dcn31/dcn31_hubp.h" -#include "irq/dcn31/irq_service_dcn31.h" -#include "dcn30/dcn30_dpp.h" -#include "dcn31/dcn31_optc.h" -#include "dcn20/dcn20_hwseq.h" -#include "dcn30/dcn30_hwseq.h" -#include "dce110/dce110_hwseq.h" -#include "dcn30/dcn30_opp.h" -#include "dcn20/dcn20_dsc.h" -#include "dcn30/dcn30_vpg.h" -#include "dcn30/dcn30_afmt.h" -#include "dcn30/dcn30_dio_stream_encoder.h" -#include "dcn31/dcn31_hpo_dp_stream_encoder.h" -#include "dcn31/dcn31_hpo_dp_link_encoder.h" -#include "dcn31/dcn31_apg.h" -#include "dcn31/dcn31_dio_link_encoder.h" -#include "dcn31/dcn31_vpg.h" -#include "dcn31/dcn31_afmt.h" -#include "dce/dce_clock_source.h" -#include "dce/dce_audio.h" -#include "dce/dce_hwseq.h" -#include "clk_mgr.h" -#include "virtual/virtual_stream_encoder.h" -#include "dce110/dce110_resource.h" -#include "dml/display_mode_vba.h" -#include "dml/dcn31/dcn31_fpu.h" -#include "dcn31/dcn31_dccg.h" -#include "dcn10/dcn10_resource.h" -#include "dcn31/dcn31_panel_cntl.h" - -#include "dcn30/dcn30_dwb.h" -#include "dcn30/dcn30_mmhubbub.h" - -#include "dcn/dcn_3_1_6_offset.h" -#include "dcn/dcn_3_1_6_sh_mask.h" -#include "dpcs/dpcs_4_2_3_offset.h" -#include "dpcs/dpcs_4_2_3_sh_mask.h" - -#define regBIF_BX1_BIOS_SCRATCH_2 0x003a -#define regBIF_BX1_BIOS_SCRATCH_2_BASE_IDX 1 -#define regBIF_BX1_BIOS_SCRATCH_3 0x003b -#define regBIF_BX1_BIOS_SCRATCH_3_BASE_IDX 1 -#define regBIF_BX1_BIOS_SCRATCH_6 0x003e -#define regBIF_BX1_BIOS_SCRATCH_6_BASE_IDX 1 - -#define regDCHUBBUB_DEBUG_CTRL_0 0x04d6 -#define regDCHUBBUB_DEBUG_CTRL_0_BASE_IDX 2 -#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10 -#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x01FF0000L - -#define DCN_BASE__INST0_SEG0 0x00000012 -#define DCN_BASE__INST0_SEG1 0x000000C0 -#define DCN_BASE__INST0_SEG2 0x000034C0 -#define DCN_BASE__INST0_SEG3 0x00009000 -#define DCN_BASE__INST0_SEG4 0x02403C00 -#define DCN_BASE__INST0_SEG5 0 - -#define DPCS_BASE__INST0_SEG0 0x00000012 -#define DPCS_BASE__INST0_SEG1 0x000000C0 -#define DPCS_BASE__INST0_SEG2 0x000034C0 -#define DPCS_BASE__INST0_SEG3 0x00009000 -#define DPCS_BASE__INST0_SEG4 0x02403C00 -#define DPCS_BASE__INST0_SEG5 0 - -#define NBIO_BASE__INST0_SEG0 0x00000000 -#define NBIO_BASE__INST0_SEG1 0x00000014 -#define NBIO_BASE__INST0_SEG2 0x00000D20 -#define NBIO_BASE__INST0_SEG3 0x00010400 -#define NBIO_BASE__INST0_SEG4 0x0241B000 -#define NBIO_BASE__INST0_SEG5 0x04040000 - -#include "reg_helper.h" -#include "dce/dmub_abm.h" -#include "dce/dmub_psr.h" -#include "dce/dce_aux.h" -#include "dce/dce_i2c.h" - -#include "dml/dcn30/display_mode_vba_30.h" -#include "vm_helper.h" -#include "dcn20/dcn20_vmid.h" - -#include "link_enc_cfg.h" - -#define DCN3_16_MAX_DET_SIZE 384 -#define DCN3_16_MIN_COMPBUF_SIZE_KB 128 -#define DCN3_16_CRB_SEGMENT_SIZE_KB 64 - -enum dcn31_clk_src_array_id { - DCN31_CLK_SRC_PLL0, - DCN31_CLK_SRC_PLL1, - DCN31_CLK_SRC_PLL2, - DCN31_CLK_SRC_PLL3, - DCN31_CLK_SRC_PLL4, - DCN30_CLK_SRC_TOTAL -}; - -/* begin ********************* - * macros to expend register list macro defined in HW object header file - */ - -/* DCN */ -#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg - -#define BASE(seg) BASE_INNER(seg) - -#define SR(reg_name)\ - .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ - reg ## reg_name - -#define SRI(reg_name, block, id)\ - .reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRI2(reg_name, block, id)\ - .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ - reg ## reg_name - -#define SRIR(var_name, reg_name, block, id)\ - .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII(reg_name, block, id)\ - .reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII_MPC_RMU(reg_name, block, id)\ - .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII_DWB(reg_name, temp_name, block, id)\ - .reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## temp_name - -#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ - .field_name = reg_name ## __ ## field_name ## post_fix - -#define DCCG_SRII(reg_name, block, id)\ - .block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define VUPDATE_SRII(reg_name, block, id)\ - .reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ - reg ## reg_name ## _ ## block ## id - -/* NBIO */ -#define NBIO_BASE_INNER(seg) \ - NBIO_BASE__INST0_SEG ## seg - -#define NBIO_BASE(seg) \ - NBIO_BASE_INNER(seg) - -#define NBIO_SR(reg_name)\ - .reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \ - regBIF_BX1_ ## reg_name - -static const struct bios_registers bios_regs = { - NBIO_SR(BIOS_SCRATCH_3), - NBIO_SR(BIOS_SCRATCH_6) -}; - -#define clk_src_regs(index, pllid)\ -[index] = {\ - CS_COMMON_REG_LIST_DCN3_0(index, pllid),\ -} - -static const struct dce110_clk_src_regs clk_src_regs[] = { - clk_src_regs(0, A), - clk_src_regs(1, B), - clk_src_regs(2, C), - clk_src_regs(3, D), - clk_src_regs(4, E) -}; - -static const struct dce110_clk_src_shift cs_shift = { - CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) -}; - -static const struct dce110_clk_src_mask cs_mask = { - CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) -}; - -#define abm_regs(id)\ -[id] = {\ - ABM_DCN302_REG_LIST(id)\ -} - -static const struct dce_abm_registers abm_regs[] = { - abm_regs(0), - abm_regs(1), - abm_regs(2), - abm_regs(3), -}; - -static const struct dce_abm_shift abm_shift = { - ABM_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dce_abm_mask abm_mask = { - ABM_MASK_SH_LIST_DCN30(_MASK) -}; - -#define audio_regs(id)\ -[id] = {\ - AUD_COMMON_REG_LIST(id)\ -} - -static const struct dce_audio_registers audio_regs[] = { - audio_regs(0), - audio_regs(1), - audio_regs(2), - audio_regs(3), - audio_regs(4), - audio_regs(5), - audio_regs(6) -}; - -#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ - AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) - -static const struct dce_audio_shift audio_shift = { - DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_audio_mask audio_mask = { - DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) -}; - -#define vpg_regs(id)\ -[id] = {\ - VPG_DCN31_REG_LIST(id)\ -} - -static const struct dcn31_vpg_registers vpg_regs[] = { - vpg_regs(0), - vpg_regs(1), - vpg_regs(2), - vpg_regs(3), - vpg_regs(4), - vpg_regs(5), - vpg_regs(6), - vpg_regs(7), - vpg_regs(8), - vpg_regs(9), -}; - -static const struct dcn31_vpg_shift vpg_shift = { - DCN31_VPG_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_vpg_mask vpg_mask = { - DCN31_VPG_MASK_SH_LIST(_MASK) -}; - -#define afmt_regs(id)\ -[id] = {\ - AFMT_DCN31_REG_LIST(id)\ -} - -static const struct dcn31_afmt_registers afmt_regs[] = { - afmt_regs(0), - afmt_regs(1), - afmt_regs(2), - afmt_regs(3), - afmt_regs(4), - afmt_regs(5) -}; - -static const struct dcn31_afmt_shift afmt_shift = { - DCN31_AFMT_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_afmt_mask afmt_mask = { - DCN31_AFMT_MASK_SH_LIST(_MASK) -}; - - -#define apg_regs(id)\ -[id] = {\ - APG_DCN31_REG_LIST(id)\ -} - -static const struct dcn31_apg_registers apg_regs[] = { - apg_regs(0), - apg_regs(1), - apg_regs(2), - apg_regs(3) -}; - -static const struct dcn31_apg_shift apg_shift = { - DCN31_APG_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_apg_mask apg_mask = { - DCN31_APG_MASK_SH_LIST(_MASK) -}; - - -#define stream_enc_regs(id)\ -[id] = {\ - SE_DCN3_REG_LIST(id)\ -} - -static const struct dcn10_stream_enc_registers stream_enc_regs[] = { - stream_enc_regs(0), - stream_enc_regs(1), - stream_enc_regs(2), - stream_enc_regs(3), - stream_enc_regs(4) -}; - -static const struct dcn10_stream_encoder_shift se_shift = { - SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn10_stream_encoder_mask se_mask = { - SE_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - - -#define aux_regs(id)\ -[id] = {\ - DCN2_AUX_REG_LIST(id)\ -} - -static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { - aux_regs(0), - aux_regs(1), - aux_regs(2), - aux_regs(3), - aux_regs(4) -}; - -#define hpd_regs(id)\ -[id] = {\ - HPD_REG_LIST(id)\ -} - -static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { - hpd_regs(0), - hpd_regs(1), - hpd_regs(2), - hpd_regs(3), - hpd_regs(4) -}; - -#define link_regs(id, phyid)\ -[id] = {\ - LE_DCN31_REG_LIST(id), \ - UNIPHY_DCN2_REG_LIST(phyid), \ - DPCS_DCN31_REG_LIST(id), \ -} - -static const struct dce110_aux_registers_shift aux_shift = { - DCN_AUX_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce110_aux_registers_mask aux_mask = { - DCN_AUX_MASK_SH_LIST(_MASK) -}; - -static const struct dcn10_link_enc_registers link_enc_regs[] = { - link_regs(0, A), - link_regs(1, B), - link_regs(2, C), - link_regs(3, D), - link_regs(4, E) -}; - -static const struct dcn10_link_enc_shift le_shift = { - LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \ - DPCS_DCN31_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn10_link_enc_mask le_mask = { - LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), \ - DPCS_DCN31_MASK_SH_LIST(_MASK) -}; - - - -#define hpo_dp_stream_encoder_reg_list(id)\ -[id] = {\ - DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\ -} - -static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = { - hpo_dp_stream_encoder_reg_list(0), - hpo_dp_stream_encoder_reg_list(1), - hpo_dp_stream_encoder_reg_list(2), - hpo_dp_stream_encoder_reg_list(3), -}; - -static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { - DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { - DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) -}; - - -#define hpo_dp_link_encoder_reg_list(id)\ -[id] = {\ - DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\ - DCN3_1_RDPCSTX_REG_LIST(0),\ - DCN3_1_RDPCSTX_REG_LIST(1),\ - DCN3_1_RDPCSTX_REG_LIST(2),\ - DCN3_1_RDPCSTX_REG_LIST(3),\ - DCN3_1_RDPCSTX_REG_LIST(4)\ -} - -static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = { - hpo_dp_link_encoder_reg_list(0), - hpo_dp_link_encoder_reg_list(1), -}; - -static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { - DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { - DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK) -}; - - -#define dpp_regs(id)\ -[id] = {\ - DPP_REG_LIST_DCN30(id),\ -} - -static const struct dcn3_dpp_registers dpp_regs[] = { - dpp_regs(0), - dpp_regs(1), - dpp_regs(2), - dpp_regs(3) -}; - -static const struct dcn3_dpp_shift tf_shift = { - DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) -}; - -static const struct dcn3_dpp_mask tf_mask = { - DPP_REG_LIST_SH_MASK_DCN30(_MASK) -}; - -#define opp_regs(id)\ -[id] = {\ - OPP_REG_LIST_DCN30(id),\ -} - -static const struct dcn20_opp_registers opp_regs[] = { - opp_regs(0), - opp_regs(1), - opp_regs(2), - opp_regs(3) -}; - -static const struct dcn20_opp_shift opp_shift = { - OPP_MASK_SH_LIST_DCN20(__SHIFT) -}; - -static const struct dcn20_opp_mask opp_mask = { - OPP_MASK_SH_LIST_DCN20(_MASK) -}; - -#define aux_engine_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST0(id), \ - .AUXN_IMPCAL = 0, \ - .AUXP_IMPCAL = 0, \ - .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ -} - -static const struct dce110_aux_registers aux_engine_regs[] = { - aux_engine_regs(0), - aux_engine_regs(1), - aux_engine_regs(2), - aux_engine_regs(3), - aux_engine_regs(4) -}; - -#define dwbc_regs_dcn3(id)\ -[id] = {\ - DWBC_COMMON_REG_LIST_DCN30(id),\ -} - -static const struct dcn30_dwbc_registers dwbc30_regs[] = { - dwbc_regs_dcn3(0), -}; - -static const struct dcn30_dwbc_shift dwbc30_shift = { - DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_dwbc_mask dwbc30_mask = { - DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -#define mcif_wb_regs_dcn3(id)\ -[id] = {\ - MCIF_WB_COMMON_REG_LIST_DCN30(id),\ -} - -static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { - mcif_wb_regs_dcn3(0) -}; - -static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -#define dsc_regsDCN20(id)\ -[id] = {\ - DSC_REG_LIST_DCN20(id)\ -} - -static const struct dcn20_dsc_registers dsc_regs[] = { - dsc_regsDCN20(0), - dsc_regsDCN20(1), - dsc_regsDCN20(2) -}; - -static const struct dcn20_dsc_shift dsc_shift = { - DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) -}; - -static const struct dcn20_dsc_mask dsc_mask = { - DSC_REG_LIST_SH_MASK_DCN20(_MASK) -}; - -static const struct dcn30_mpc_registers mpc_regs = { - MPC_REG_LIST_DCN3_0(0), - MPC_REG_LIST_DCN3_0(1), - MPC_REG_LIST_DCN3_0(2), - MPC_REG_LIST_DCN3_0(3), - MPC_OUT_MUX_REG_LIST_DCN3_0(0), - MPC_OUT_MUX_REG_LIST_DCN3_0(1), - MPC_OUT_MUX_REG_LIST_DCN3_0(2), - MPC_OUT_MUX_REG_LIST_DCN3_0(3), - MPC_RMU_GLOBAL_REG_LIST_DCN3AG, - MPC_RMU_REG_LIST_DCN3AG(0), - MPC_RMU_REG_LIST_DCN3AG(1), - //MPC_RMU_REG_LIST_DCN3AG(2), - MPC_DWB_MUX_REG_LIST_DCN3_0(0), -}; - -static const struct dcn30_mpc_shift mpc_shift = { - MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_mpc_mask mpc_mask = { - MPC_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -#define optc_regs(id)\ -[id] = {OPTC_COMMON_REG_LIST_DCN3_1(id)} - -static const struct dcn_optc_registers optc_regs[] = { - optc_regs(0), - optc_regs(1), - optc_regs(2), - optc_regs(3) -}; - -static const struct dcn_optc_shift optc_shift = { - OPTC_COMMON_MASK_SH_LIST_DCN3_1(__SHIFT) -}; - -static const struct dcn_optc_mask optc_mask = { - OPTC_COMMON_MASK_SH_LIST_DCN3_1(_MASK) -}; - -#define hubp_regs(id)\ -[id] = {\ - HUBP_REG_LIST_DCN30(id)\ -} - -static const struct dcn_hubp2_registers hubp_regs[] = { - hubp_regs(0), - hubp_regs(1), - hubp_regs(2), - hubp_regs(3) -}; - - -static const struct dcn_hubp2_shift hubp_shift = { - HUBP_MASK_SH_LIST_DCN31(__SHIFT) -}; - -static const struct dcn_hubp2_mask hubp_mask = { - HUBP_MASK_SH_LIST_DCN31(_MASK) -}; -static const struct dcn_hubbub_registers hubbub_reg = { - HUBBUB_REG_LIST_DCN31(0) -}; - -static const struct dcn_hubbub_shift hubbub_shift = { - HUBBUB_MASK_SH_LIST_DCN31(__SHIFT) -}; - -static const struct dcn_hubbub_mask hubbub_mask = { - HUBBUB_MASK_SH_LIST_DCN31(_MASK) -}; - -static const struct dccg_registers dccg_regs = { - DCCG_REG_LIST_DCN31() -}; - -static const struct dccg_shift dccg_shift = { - DCCG_MASK_SH_LIST_DCN31(__SHIFT) -}; - -static const struct dccg_mask dccg_mask = { - DCCG_MASK_SH_LIST_DCN31(_MASK) -}; - - -#define SRII2(reg_name_pre, reg_name_post, id)\ - .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \ - ## id ## _ ## reg_name_post ## _BASE_IDX) + \ - reg ## reg_name_pre ## id ## _ ## reg_name_post - - -#define HWSEQ_DCN31_REG_LIST()\ - SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ - SR(DCHUBBUB_ARB_HOSTVM_CNTL), \ - SR(DIO_MEM_PWR_CTRL), \ - SR(ODM_MEM_PWR_CTRL3), \ - SR(DMU_MEM_PWR_CNTL), \ - SR(MMHUBBUB_MEM_PWR_CNTL), \ - SR(DCCG_GATE_DISABLE_CNTL), \ - SR(DCCG_GATE_DISABLE_CNTL2), \ - SR(DCFCLK_CNTL),\ - SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ - SRII(PIXEL_RATE_CNTL, OTG, 0), \ - SRII(PIXEL_RATE_CNTL, OTG, 1),\ - SRII(PIXEL_RATE_CNTL, OTG, 2),\ - SRII(PIXEL_RATE_CNTL, OTG, 3),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\ - SR(MICROSECOND_TIME_BASE_DIV), \ - SR(MILLISECOND_TIME_BASE_DIV), \ - SR(DISPCLK_FREQ_CHANGE_CNTL), \ - SR(RBBMIF_TIMEOUT_DIS), \ - SR(RBBMIF_TIMEOUT_DIS_2), \ - SR(DCHUBBUB_CRC_CTRL), \ - SR(DPP_TOP0_DPP_CRC_CTRL), \ - SR(DPP_TOP0_DPP_CRC_VAL_B_A), \ - SR(DPP_TOP0_DPP_CRC_VAL_R_G), \ - SR(MPC_CRC_CTRL), \ - SR(MPC_CRC_RESULT_GB), \ - SR(MPC_CRC_RESULT_C), \ - SR(MPC_CRC_RESULT_AR), \ - SR(DOMAIN0_PG_CONFIG), \ - SR(DOMAIN1_PG_CONFIG), \ - SR(DOMAIN2_PG_CONFIG), \ - SR(DOMAIN3_PG_CONFIG), \ - SR(DOMAIN16_PG_CONFIG), \ - SR(DOMAIN17_PG_CONFIG), \ - SR(DOMAIN18_PG_CONFIG), \ - SR(DOMAIN0_PG_STATUS), \ - SR(DOMAIN1_PG_STATUS), \ - SR(DOMAIN2_PG_STATUS), \ - SR(DOMAIN3_PG_STATUS), \ - SR(DOMAIN16_PG_STATUS), \ - SR(DOMAIN17_PG_STATUS), \ - SR(DOMAIN18_PG_STATUS), \ - SR(D1VGA_CONTROL), \ - SR(D2VGA_CONTROL), \ - SR(D3VGA_CONTROL), \ - SR(D4VGA_CONTROL), \ - SR(D5VGA_CONTROL), \ - SR(D6VGA_CONTROL), \ - SR(DC_IP_REQUEST_CNTL), \ - SR(AZALIA_AUDIO_DTO), \ - SR(AZALIA_CONTROLLER_CLOCK_GATING), \ - SR(HPO_TOP_HW_CONTROL) - -static const struct dce_hwseq_registers hwseq_reg = { - HWSEQ_DCN31_REG_LIST() -}; - -#define HWSEQ_DCN31_MASK_SH_LIST(mask_sh)\ - HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ - HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ - HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \ - HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ - HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ - HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \ - HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \ - HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ - HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ - HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \ - HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \ - HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh) - -static const struct dce_hwseq_shift hwseq_shift = { - HWSEQ_DCN31_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_hwseq_mask hwseq_mask = { - HWSEQ_DCN31_MASK_SH_LIST(_MASK) -}; -#define vmid_regs(id)\ -[id] = {\ - DCN20_VMID_REG_LIST(id)\ -} - -static const struct dcn_vmid_registers vmid_regs[] = { - vmid_regs(0), - vmid_regs(1), - vmid_regs(2), - vmid_regs(3), - vmid_regs(4), - vmid_regs(5), - vmid_regs(6), - vmid_regs(7), - vmid_regs(8), - vmid_regs(9), - vmid_regs(10), - vmid_regs(11), - vmid_regs(12), - vmid_regs(13), - vmid_regs(14), - vmid_regs(15) -}; - -static const struct dcn20_vmid_shift vmid_shifts = { - DCN20_VMID_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn20_vmid_mask vmid_masks = { - DCN20_VMID_MASK_SH_LIST(_MASK) -}; - -static const struct resource_caps res_cap_dcn31 = { - .num_timing_generator = 4, - .num_opp = 4, - .num_video_plane = 4, - .num_audio = 5, - .num_stream_encoder = 5, - .num_dig_link_enc = 5, - .num_hpo_dp_stream_encoder = 4, - .num_hpo_dp_link_encoder = 2, - .num_pll = 5, - .num_dwb = 1, - .num_ddc = 5, - .num_vmid = 16, - .num_mpc_3dlut = 2, - .num_dsc = 3, -}; - -static const struct dc_plane_cap plane_cap = { - .type = DC_PLANE_TYPE_DCN_UNIVERSAL, - .per_pixel_alpha = true, - - .pixel_format_support = { - .argb8888 = true, - .nv12 = true, - .fp16 = true, - .p010 = true, - .ayuv = false, - }, - - .max_upscale_factor = { - .argb8888 = 16000, - .nv12 = 16000, - .fp16 = 16000 - }, - - // 6:1 downscaling ratio: 1000/6 = 166.666 - .max_downscale_factor = { - .argb8888 = 167, - .nv12 = 167, - .fp16 = 167 - }, - 64, - 64 -}; - -static const struct dc_debug_options debug_defaults_drv = { - .disable_z10 = true, /*hw not support it*/ - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = false, - .clock_trace = true, - .disable_pplib_clock_request = false, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, - .force_single_disp_pipe_split = false, - .disable_dcc = DCC_ENABLE, - .vsr_support = true, - .performance_trace = false, - .max_downscale_src_width = 4096,/*upto true 4k*/ - .disable_pplib_wm_range = false, - .scl_reset_length10 = true, - .sanity_checks = false, - .underflow_assert_delay_us = 0xFFFFFFFF, - .dwb_fi_phase = -1, // -1 = disable, - .dmub_command_table = true, - .pstate_enabled = true, - .use_max_lb = true, - .enable_mem_low_power = { - .bits = { - .vga = true, - .i2c = true, - .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled - .dscl = true, - .cm = true, - .mpc = true, - .optc = true, - .vpg = true, - .afmt = true, - } - }, - .enable_legacy_fast_update = true, - .using_dml2 = false, -}; - -static const struct dc_panel_config panel_config_defaults = { - .psr = { - .disable_psr = false, - .disallow_psrsu = false, - .disallow_replay = false, - }, - .ilr = { - .optimize_edp_link_rate = true, - }, -}; - -static void dcn31_dpp_destroy(struct dpp **dpp) -{ - kfree(TO_DCN20_DPP(*dpp)); - *dpp = NULL; -} - -static struct dpp *dcn31_dpp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn3_dpp *dpp = - kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); - - if (!dpp) - return NULL; - - if (dpp3_construct(dpp, ctx, inst, - &dpp_regs[inst], &tf_shift, &tf_mask)) - return &dpp->base; - - BREAK_TO_DEBUGGER(); - kfree(dpp); - return NULL; -} - -static struct output_pixel_processor *dcn31_opp_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_opp *opp = - kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); - - if (!opp) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dcn20_opp_construct(opp, ctx, inst, - &opp_regs[inst], &opp_shift, &opp_mask); - return &opp->base; -} - -static struct dce_aux *dcn31_aux_engine_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); - - if (!aux_engine) - return NULL; - - dce110_aux_engine_construct(aux_engine, ctx, inst, - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst], - &aux_mask, - &aux_shift, - ctx->dc->caps.extended_aux_timeout_support); - - return &aux_engine->base; -} -#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) } - -static const struct dce_i2c_registers i2c_hw_regs[] = { - i2c_inst_regs(1), - i2c_inst_regs(2), - i2c_inst_regs(3), - i2c_inst_regs(4), - i2c_inst_regs(5), -}; - -static const struct dce_i2c_shift i2c_shifts = { - I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dce_i2c_mask i2c_masks = { - I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -static struct dce_i2c_hw *dcn31_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); - - if (!dce_i2c_hw) - return NULL; - - dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, - &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); - - return dce_i2c_hw; -} -static struct mpc *dcn31_mpc_create( - struct dc_context *ctx, - int num_mpcc, - int num_rmu) -{ - struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), - GFP_KERNEL); - - if (!mpc30) - return NULL; - - dcn30_mpc_construct(mpc30, ctx, - &mpc_regs, - &mpc_shift, - &mpc_mask, - num_mpcc, - num_rmu); - - return &mpc30->base; -} - -static struct hubbub *dcn31_hubbub_create(struct dc_context *ctx) -{ - int i; - - struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), - GFP_KERNEL); - - if (!hubbub3) - return NULL; - - hubbub31_construct(hubbub3, ctx, - &hubbub_reg, - &hubbub_shift, - &hubbub_mask, - dcn3_16_ip.det_buffer_size_kbytes, - dcn3_16_ip.pixel_chunk_size_kbytes, - dcn3_16_ip.config_return_buffer_size_in_kbytes); - - - for (i = 0; i < res_cap_dcn31.num_vmid; i++) { - struct dcn20_vmid *vmid = &hubbub3->vmid[i]; - - vmid->ctx = ctx; - - vmid->regs = &vmid_regs[i]; - vmid->shifts = &vmid_shifts; - vmid->masks = &vmid_masks; - } - - return &hubbub3->base; -} - -static struct timing_generator *dcn31_timing_generator_create( - struct dc_context *ctx, - uint32_t instance) -{ - struct optc *tgn10 = - kzalloc(sizeof(struct optc), GFP_KERNEL); - - if (!tgn10) - return NULL; - - tgn10->base.inst = instance; - tgn10->base.ctx = ctx; - - tgn10->tg_regs = &optc_regs[instance]; - tgn10->tg_shift = &optc_shift; - tgn10->tg_mask = &optc_mask; - - dcn31_timing_generator_init(tgn10); - - return &tgn10->base; -} - -static const struct encoder_feature_support link_enc_feature = { - .max_hdmi_deep_color = COLOR_DEPTH_121212, - .max_hdmi_pixel_clock = 600000, - .hdmi_ycbcr420_supported = true, - .dp_ycbcr420_supported = true, - .fec_supported = true, - .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_HBR3_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true, - .flags.bits.IS_TPS4_CAPABLE = true -}; - -static struct link_encoder *dcn31_link_encoder_create( - struct dc_context *ctx, - const struct encoder_init_data *enc_init_data) -{ - struct dcn20_link_encoder *enc20 = - kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - - if (!enc20) - return NULL; - - dcn31_link_encoder_construct(enc20, - enc_init_data, - &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], - &link_enc_aux_regs[enc_init_data->channel - 1], - &link_enc_hpd_regs[enc_init_data->hpd_source], - &le_shift, - &le_mask); - - return &enc20->enc10.base; -} - -/* Create a minimal link encoder object not associated with a particular - * physical connector. - * resource_funcs.link_enc_create_minimal - */ -static struct link_encoder *dcn31_link_enc_create_minimal( - struct dc_context *ctx, enum engine_id eng_id) -{ - struct dcn20_link_encoder *enc20; - - if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc) - return NULL; - - enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) - return NULL; - - dcn31_link_encoder_construct_minimal( - enc20, - ctx, - &link_enc_feature, - &link_enc_regs[eng_id - ENGINE_ID_DIGA], - eng_id); - - return &enc20->enc10.base; -} - -static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data) -{ - struct dcn31_panel_cntl *panel_cntl = - kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL); - - if (!panel_cntl) - return NULL; - - dcn31_panel_cntl_construct(panel_cntl, init_data); - - return &panel_cntl->base; -} - -static void read_dce_straps( - struct dc_context *ctx, - struct resource_straps *straps) -{ - generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX), - FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); - -} - -static struct audio *dcn31_create_audio( - struct dc_context *ctx, unsigned int inst) -{ - return dce_audio_create(ctx, inst, - &audio_regs[inst], &audio_shift, &audio_mask); -} - -static struct vpg *dcn31_vpg_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL); - - if (!vpg31) - return NULL; - - vpg31_construct(vpg31, ctx, inst, - &vpg_regs[inst], - &vpg_shift, - &vpg_mask); - - return &vpg31->base; -} - -static struct afmt *dcn31_afmt_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL); - - if (!afmt31) - return NULL; - - afmt31_construct(afmt31, ctx, inst, - &afmt_regs[inst], - &afmt_shift, - &afmt_mask); - - // Light sleep by default, no need to power down here - - return &afmt31->base; -} - - -static struct apg *dcn31_apg_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); - - if (!apg31) - return NULL; - - apg31_construct(apg31, ctx, inst, - &apg_regs[inst], - &apg_shift, - &apg_mask); - - return &apg31->base; -} - - -static struct stream_encoder *dcn316_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dcn10_stream_encoder *enc1; - struct vpg *vpg; - struct afmt *afmt; - int vpg_inst; - int afmt_inst; - - /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ - if (eng_id <= ENGINE_ID_DIGF) { - vpg_inst = eng_id; - afmt_inst = eng_id; - } else - return NULL; - - enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); - vpg = dcn31_vpg_create(ctx, vpg_inst); - afmt = dcn31_afmt_create(ctx, afmt_inst); - - if (!enc1 || !vpg || !afmt) { - kfree(enc1); - kfree(vpg); - kfree(afmt); - return NULL; - } - - dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, - eng_id, vpg, afmt, - &stream_enc_regs[eng_id], - &se_shift, &se_mask); - - return &enc1->base; -} - - -static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; - struct vpg *vpg; - struct apg *apg; - uint32_t hpo_dp_inst; - uint32_t vpg_inst; - uint32_t apg_inst; - - ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); - hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; - - /* Mapping of VPG register blocks to HPO DP block instance: - * VPG[6] -> HPO_DP[0] - * VPG[7] -> HPO_DP[1] - * VPG[8] -> HPO_DP[2] - * VPG[9] -> HPO_DP[3] - */ - vpg_inst = hpo_dp_inst + 6; - - /* Mapping of APG register blocks to HPO DP block instance: - * APG[0] -> HPO_DP[0] - * APG[1] -> HPO_DP[1] - * APG[2] -> HPO_DP[2] - * APG[3] -> HPO_DP[3] - */ - apg_inst = hpo_dp_inst; - - /* allocate HPO stream encoder and create VPG sub-block */ - hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); - vpg = dcn31_vpg_create(ctx, vpg_inst); - apg = dcn31_apg_create(ctx, apg_inst); - - if (!hpo_dp_enc31 || !vpg || !apg) { - kfree(hpo_dp_enc31); - kfree(vpg); - kfree(apg); - return NULL; - } - - dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, - hpo_dp_inst, eng_id, vpg, apg, - &hpo_dp_stream_enc_regs[hpo_dp_inst], - &hpo_dp_se_shift, &hpo_dp_se_mask); - - return &hpo_dp_enc31->base; -} - -static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( - uint8_t inst, - struct dc_context *ctx) -{ - struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; - - /* allocate HPO link encoder */ - hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); - - hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, - &hpo_dp_link_enc_regs[inst], - &hpo_dp_le_shift, &hpo_dp_le_mask); - - return &hpo_dp_enc31->base; -} - - -static struct dce_hwseq *dcn31_hwseq_create( - struct dc_context *ctx) -{ - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); - - if (hws) { - hws->ctx = ctx; - hws->regs = &hwseq_reg; - hws->shifts = &hwseq_shift; - hws->masks = &hwseq_mask; - } - return hws; -} -static const struct resource_create_funcs res_create_funcs = { - .read_dce_straps = read_dce_straps, - .create_audio = dcn31_create_audio, - .create_stream_encoder = dcn316_stream_encoder_create, - .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, - .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, - .create_hwseq = dcn31_hwseq_create, -}; - -static void dcn316_resource_destruct(struct dcn316_resource_pool *pool) -{ - unsigned int i; - - for (i = 0; i < pool->base.stream_enc_count; i++) { - if (pool->base.stream_enc[i] != NULL) { - if (pool->base.stream_enc[i]->vpg != NULL) { - kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); - pool->base.stream_enc[i]->vpg = NULL; - } - if (pool->base.stream_enc[i]->afmt != NULL) { - kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); - pool->base.stream_enc[i]->afmt = NULL; - } - kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); - pool->base.stream_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { - if (pool->base.hpo_dp_stream_enc[i] != NULL) { - if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { - kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); - pool->base.hpo_dp_stream_enc[i]->vpg = NULL; - } - if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { - kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); - pool->base.hpo_dp_stream_enc[i]->apg = NULL; - } - kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); - pool->base.hpo_dp_stream_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { - if (pool->base.hpo_dp_link_enc[i] != NULL) { - kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); - pool->base.hpo_dp_link_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_dsc; i++) { - if (pool->base.dscs[i] != NULL) - dcn20_dsc_destroy(&pool->base.dscs[i]); - } - - if (pool->base.mpc != NULL) { - kfree(TO_DCN20_MPC(pool->base.mpc)); - pool->base.mpc = NULL; - } - if (pool->base.hubbub != NULL) { - kfree(pool->base.hubbub); - pool->base.hubbub = NULL; - } - for (i = 0; i < pool->base.pipe_count; i++) { - if (pool->base.dpps[i] != NULL) - dcn31_dpp_destroy(&pool->base.dpps[i]); - - if (pool->base.ipps[i] != NULL) - pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); - - if (pool->base.hubps[i] != NULL) { - kfree(TO_DCN20_HUBP(pool->base.hubps[i])); - pool->base.hubps[i] = NULL; - } - - if (pool->base.irqs != NULL) { - dal_irq_service_destroy(&pool->base.irqs); - } - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - if (pool->base.engines[i] != NULL) - dce110_engine_destroy(&pool->base.engines[i]); - if (pool->base.hw_i2cs[i] != NULL) { - kfree(pool->base.hw_i2cs[i]); - pool->base.hw_i2cs[i] = NULL; - } - if (pool->base.sw_i2cs[i] != NULL) { - kfree(pool->base.sw_i2cs[i]); - pool->base.sw_i2cs[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - if (pool->base.opps[i] != NULL) - pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - if (pool->base.timing_generators[i] != NULL) { - kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); - pool->base.timing_generators[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_dwb; i++) { - if (pool->base.dwbc[i] != NULL) { - kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); - pool->base.dwbc[i] = NULL; - } - if (pool->base.mcif_wb[i] != NULL) { - kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); - pool->base.mcif_wb[i] = NULL; - } - } - - for (i = 0; i < pool->base.audio_count; i++) { - if (pool->base.audios[i]) - dce_aud_destroy(&pool->base.audios[i]); - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] != NULL) { - dcn20_clock_source_destroy(&pool->base.clock_sources[i]); - pool->base.clock_sources[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { - if (pool->base.mpc_lut[i] != NULL) { - dc_3dlut_func_release(pool->base.mpc_lut[i]); - pool->base.mpc_lut[i] = NULL; - } - if (pool->base.mpc_shaper[i] != NULL) { - dc_transfer_func_release(pool->base.mpc_shaper[i]); - pool->base.mpc_shaper[i] = NULL; - } - } - - if (pool->base.dp_clock_source != NULL) { - dcn20_clock_source_destroy(&pool->base.dp_clock_source); - pool->base.dp_clock_source = NULL; - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - if (pool->base.multiple_abms[i] != NULL) - dce_abm_destroy(&pool->base.multiple_abms[i]); - } - - if (pool->base.psr != NULL) - dmub_psr_destroy(&pool->base.psr); - - if (pool->base.dccg != NULL) - dcn_dccg_destroy(&pool->base.dccg); -} - -static struct hubp *dcn31_hubp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn20_hubp *hubp2 = - kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); - - if (!hubp2) - return NULL; - - if (hubp31_construct(hubp2, ctx, inst, - &hubp_regs[inst], &hubp_shift, &hubp_mask)) - return &hubp2->base; - - BREAK_TO_DEBUGGER(); - kfree(hubp2); - return NULL; -} - -static bool dcn31_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t pipe_count = pool->res_cap->num_dwb; - - for (i = 0; i < pipe_count; i++) { - struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), - GFP_KERNEL); - - if (!dwbc30) { - dm_error("DC: failed to create dwbc30!\n"); - return false; - } - - dcn30_dwbc_construct(dwbc30, ctx, - &dwbc30_regs[i], - &dwbc30_shift, - &dwbc30_mask, - i); - - pool->dwbc[i] = &dwbc30->base; - } - return true; -} - -static bool dcn31_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t pipe_count = pool->res_cap->num_dwb; - - for (i = 0; i < pipe_count; i++) { - struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), - GFP_KERNEL); - - if (!mcif_wb30) { - dm_error("DC: failed to create mcif_wb30!\n"); - return false; - } - - dcn30_mmhubbub_construct(mcif_wb30, ctx, - &mcif_wb30_regs[i], - &mcif_wb30_shift, - &mcif_wb30_mask, - i); - - pool->mcif_wb[i] = &mcif_wb30->base; - } - return true; -} - -static struct display_stream_compressor *dcn31_dsc_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_dsc *dsc = - kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); - - if (!dsc) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); - return &dsc->base; -} - -static void dcn316_destroy_resource_pool(struct resource_pool **pool) -{ - struct dcn316_resource_pool *dcn31_pool = TO_DCN316_RES_POOL(*pool); - - dcn316_resource_destruct(dcn31_pool); - kfree(dcn31_pool); - *pool = NULL; -} - -static struct clock_source *dcn31_clock_source_create( - struct dc_context *ctx, - struct dc_bios *bios, - enum clock_source_id id, - const struct dce110_clk_src_regs *regs, - bool dp_clk_src) -{ - struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); - - if (!clk_src) - return NULL; - - if (dcn31_clk_src_construct(clk_src, ctx, bios, id, - regs, &cs_shift, &cs_mask)) { - clk_src->base.dp_clk_src = dp_clk_src; - return &clk_src->base; - } - - kfree(clk_src); - - BREAK_TO_DEBUGGER(); - return NULL; -} - -static bool is_dual_plane(enum surface_pixel_format format) -{ - return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; -} - -static int dcn316_populate_dml_pipes_from_context( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - bool fast_validate) -{ - int i, pipe_cnt; - struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; - const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_16_MIN_COMPBUF_SIZE_KB; - - DC_FP_START(); - dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); - DC_FP_END(); - - for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { - struct dc_crtc_timing *timing; - - if (!res_ctx->pipe_ctx[i].stream) - continue; - pipe = &res_ctx->pipe_ctx[i]; - timing = &pipe->stream->timing; - - /* - * Immediate flip can be set dynamically after enabling the plane. - * We need to require support for immediate flip or underflow can be - * intermittently experienced depending on peak b/w requirements. - */ - pipes[pipe_cnt].pipe.src.immediate_flip = true; - - pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; - pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; - pipes[pipe_cnt].pipe.src.dcc_rate = 3; - pipes[pipe_cnt].dout.dsc_input_bpc = 0; - DC_FP_START(); - dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt); - DC_FP_END(); - - if (pipes[pipe_cnt].dout.dsc_enable) { - switch (timing->display_color_depth) { - case COLOR_DEPTH_888: - pipes[pipe_cnt].dout.dsc_input_bpc = 8; - break; - case COLOR_DEPTH_101010: - pipes[pipe_cnt].dout.dsc_input_bpc = 10; - break; - case COLOR_DEPTH_121212: - pipes[pipe_cnt].dout.dsc_input_bpc = 12; - break; - default: - ASSERT(0); - break; - } - } - - pipe_cnt++; - } - - if (pipe_cnt) - context->bw_ctx.dml.ip.det_buffer_size_kbytes = - (max_usable_det / DCN3_16_CRB_SEGMENT_SIZE_KB / pipe_cnt) * DCN3_16_CRB_SEGMENT_SIZE_KB; - if (context->bw_ctx.dml.ip.det_buffer_size_kbytes > DCN3_16_MAX_DET_SIZE) - context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_16_MAX_DET_SIZE; - ASSERT(context->bw_ctx.dml.ip.det_buffer_size_kbytes >= DCN3_16_DEFAULT_DET_SIZE); - dc->config.enable_4to1MPC = false; - if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { - if (is_dual_plane(pipe->plane_state->format) - && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) { - dc->config.enable_4to1MPC = true; - context->bw_ctx.dml.ip.det_buffer_size_kbytes = - (max_usable_det / DCN3_16_CRB_SEGMENT_SIZE_KB / 4) * DCN3_16_CRB_SEGMENT_SIZE_KB; - } else if (!is_dual_plane(pipe->plane_state->format)) { - context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; - pipes[0].pipe.src.unbounded_req_mode = true; - } - } - - return pipe_cnt; -} - -static void dcn316_get_panel_config_defaults(struct dc_panel_config *panel_config) -{ - *panel_config = panel_config_defaults; -} - -static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn20_get_dcc_compression_cap -}; - -static struct resource_funcs dcn316_res_pool_funcs = { - .destroy = dcn316_destroy_resource_pool, - .link_enc_create = dcn31_link_encoder_create, - .link_enc_create_minimal = dcn31_link_enc_create_minimal, - .link_encs_assign = link_enc_cfg_link_encs_assign, - .link_enc_unassign = link_enc_cfg_link_enc_unassign, - .panel_cntl_create = dcn31_panel_cntl_create, - .validate_bandwidth = dcn31_validate_bandwidth, - .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, - .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, - .populate_dml_pipes = dcn316_populate_dml_pipes_from_context, - .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, - .release_pipe = dcn20_release_pipe, - .add_stream_to_ctx = dcn30_add_stream_to_ctx, - .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, - .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, - .populate_dml_writeback_from_context = dcn31_populate_dml_writeback_from_context, - .set_mcif_arb_params = dcn31_set_mcif_arb_params, - .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, - .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, - .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, - .update_bw_bounding_box = dcn316_update_bw_bounding_box, - .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, - .get_panel_config_defaults = dcn316_get_panel_config_defaults, -}; - -static bool dcn316_resource_construct( - uint8_t num_virtual_links, - struct dc *dc, - struct dcn316_resource_pool *pool) -{ - int i; - struct dc_context *ctx = dc->ctx; - struct irq_service_init_data init_data; - - ctx->dc_bios->regs = &bios_regs; - - pool->base.res_cap = &res_cap_dcn31; - - pool->base.funcs = &dcn316_res_pool_funcs; - - /************************************************* - * Resource + asic cap harcoding * - *************************************************/ - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - pool->base.pipe_count = pool->base.res_cap->num_timing_generator; - pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; - dc->caps.max_downscale_ratio = 600; - dc->caps.i2c_speed_in_khz = 100; - dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.5 w/a applied by default*/ - dc->caps.max_cursor_size = 256; - dc->caps.min_horizontal_blanking_period = 80; - dc->caps.dmdata_alloc_size = 2048; - dc->caps.max_slave_planes = 2; - dc->caps.max_slave_yuv_planes = 2; - dc->caps.max_slave_rgb_planes = 2; - dc->caps.post_blend_color_processing = true; - dc->caps.force_dp_tps4_for_cp2520 = true; - if (dc->config.forceHBR2CP2520) - dc->caps.force_dp_tps4_for_cp2520 = false; - dc->caps.dp_hpo = true; - dc->caps.dp_hdmi21_pcon_support = true; - dc->caps.edp_dsc_support = true; - dc->caps.extended_aux_timeout_support = true; - dc->caps.dmcub_support = true; - dc->caps.is_apu = true; - - /* Color pipeline capabilities */ - dc->caps.color.dpp.dcn_arch = 1; - dc->caps.color.dpp.input_lut_shared = 0; - dc->caps.color.dpp.icsc = 1; - dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr - dc->caps.color.dpp.dgam_rom_caps.srgb = 1; - dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; - dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; - dc->caps.color.dpp.dgam_rom_caps.pq = 1; - dc->caps.color.dpp.dgam_rom_caps.hlg = 1; - dc->caps.color.dpp.post_csc = 1; - dc->caps.color.dpp.gamma_corr = 1; - dc->caps.color.dpp.dgam_rom_for_yuv = 0; - - dc->caps.color.dpp.hw_3d_lut = 1; - dc->caps.color.dpp.ogam_ram = 1; - // no OGAM ROM on DCN301 - dc->caps.color.dpp.ogam_rom_caps.srgb = 0; - dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; - dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.dpp.ogam_rom_caps.pq = 0; - dc->caps.color.dpp.ogam_rom_caps.hlg = 0; - dc->caps.color.dpp.ocsc = 0; - - dc->caps.color.mpc.gamut_remap = 1; - dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2 - dc->caps.color.mpc.ogam_ram = 1; - dc->caps.color.mpc.ogam_rom_caps.srgb = 0; - dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; - dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.mpc.ogam_rom_caps.pq = 0; - dc->caps.color.mpc.ogam_rom_caps.hlg = 0; - dc->caps.color.mpc.ocsc = 1; - - /* read VBIOS LTTPR caps */ - { - if (ctx->dc_bios->funcs->get_lttpr_caps) { - enum bp_result bp_query_result; - uint8_t is_vbios_lttpr_enable = 0; - - bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); - dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; - } - - /* interop bit is implicit */ - { - dc->caps.vbios_lttpr_aware = true; - } - } - - if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) - dc->debug = debug_defaults_drv; - - // Init the vm_helper - if (dc->vm_helper) - vm_helper_init(dc->vm_helper, 16); - - /************************************************* - * Create resources * - *************************************************/ - - /* Clock Sources for Pixel Clock*/ - pool->base.clock_sources[DCN31_CLK_SRC_PLL0] = - dcn31_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL0, - &clk_src_regs[0], false); - pool->base.clock_sources[DCN31_CLK_SRC_PLL1] = - dcn31_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL1, - &clk_src_regs[1], false); - pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = - dcn31_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL2, - &clk_src_regs[2], false); - pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = - dcn31_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL3, - &clk_src_regs[3], false); - pool->base.clock_sources[DCN31_CLK_SRC_PLL4] = - dcn31_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL4, - &clk_src_regs[4], false); - - pool->base.clk_src_count = DCN30_CLK_SRC_TOTAL; - - /* todo: not reuse phy_pll registers */ - pool->base.dp_clock_source = - dcn31_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_ID_DP_DTO, - &clk_src_regs[0], true); - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - - /* TODO: DCCG */ - pool->base.dccg = dccg31_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); - if (pool->base.dccg == NULL) { - dm_error("DC: failed to create dccg!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* TODO: IRQ */ - init_data.ctx = dc->ctx; - pool->base.irqs = dal_irq_service_dcn31_create(&init_data); - if (!pool->base.irqs) - goto create_fail; - - /* HUBBUB */ - pool->base.hubbub = dcn31_hubbub_create(ctx); - if (pool->base.hubbub == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create hubbub!\n"); - goto create_fail; - } - - /* HUBPs, DPPs, OPPs and TGs */ - for (i = 0; i < pool->base.pipe_count; i++) { - pool->base.hubps[i] = dcn31_hubp_create(ctx, i); - if (pool->base.hubps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create hubps!\n"); - goto create_fail; - } - - pool->base.dpps[i] = dcn31_dpp_create(ctx, i); - if (pool->base.dpps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create dpps!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - pool->base.opps[i] = dcn31_opp_create(ctx, i); - if (pool->base.opps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create output pixel processor!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - pool->base.timing_generators[i] = dcn31_timing_generator_create( - ctx, i); - if (pool->base.timing_generators[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto create_fail; - } - } - pool->base.timing_generator_count = i; - - /* PSR */ - pool->base.psr = dmub_psr_create(ctx); - if (pool->base.psr == NULL) { - dm_error("DC: failed to create psr obj!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* ABM */ - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - pool->base.multiple_abms[i] = dmub_abm_create(ctx, - &abm_regs[i], - &abm_shift, - &abm_mask); - if (pool->base.multiple_abms[i] == NULL) { - dm_error("DC: failed to create abm for pipe %d!\n", i); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - - /* MPC and DSC */ - pool->base.mpc = dcn31_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut); - if (pool->base.mpc == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mpc!\n"); - goto create_fail; - } - - for (i = 0; i < pool->base.res_cap->num_dsc; i++) { - pool->base.dscs[i] = dcn31_dsc_create(ctx, i); - if (pool->base.dscs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create display stream compressor %d!\n", i); - goto create_fail; - } - } - - /* DWB and MMHUBBUB */ - if (!dcn31_dwbc_create(ctx, &pool->base)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create dwbc!\n"); - goto create_fail; - } - - if (!dcn31_mmhubbub_create(ctx, &pool->base)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mcif_wb!\n"); - goto create_fail; - } - - /* AUX and I2C */ - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - pool->base.engines[i] = dcn31_aux_engine_create(ctx, i); - if (pool->base.engines[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create aux engine!!\n"); - goto create_fail; - } - pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i); - if (pool->base.hw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create hw i2c!!\n"); - goto create_fail; - } - pool->base.sw_i2cs[i] = NULL; - } - - /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ - if (!resource_construct(num_virtual_links, dc, &pool->base, - &res_create_funcs)) - goto create_fail; - - /* HW Sequencer and Plane caps */ - dcn31_hw_sequencer_construct(dc); - - dc->caps.max_planes = pool->base.pipe_count; - - for (i = 0; i < dc->caps.max_planes; ++i) - dc->caps.planes[i] = plane_cap; - - dc->cap_funcs = cap_funcs; - - dc->dcn_ip->max_num_dpp = dcn3_16_ip.max_num_dpp; - - return true; - -create_fail: - - dcn316_resource_destruct(pool); - - return false; -} - -struct resource_pool *dcn316_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc) -{ - struct dcn316_resource_pool *pool = - kzalloc(sizeof(struct dcn316_resource_pool), GFP_KERNEL); - - if (!pool) - return NULL; - - if (dcn316_resource_construct(init_data->num_virtual_links, dc, pool)) - return &pool->base; - - BREAK_TO_DEBUGGER(); - kfree(pool); - return NULL; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h deleted file mode 100644 index aba6d63413..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef _DCN316_RESOURCE_H_ -#define _DCN316_RESOURCE_H_ - -#include "core_types.h" - -#define TO_DCN316_RES_POOL(pool)\ - container_of(pool, struct dcn316_resource_pool, base) - -extern struct _vcs_dpi_ip_params_st dcn3_16_ip; - -struct dcn316_resource_pool { - struct resource_pool base; -}; - -struct resource_pool *dcn316_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc); - -#endif /* _DCN316_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile index 8bb2513072..5314770fff 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile @@ -10,10 +10,10 @@ # # Makefile for dcn32. -DCN32 = dcn32_resource.o dcn32_hubbub.o dcn32_init.o dcn32_dccg.o \ - dcn32_dccg.o dcn32_optc.o dcn32_mmhubbub.o dcn32_hubp.o dcn32_dpp.o \ - dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_hpo_dp_link_encoder.o \ - dcn32_resource_helpers.o dcn32_mpc.o +DCN32 = dcn32_hubbub.o dcn32_dccg.o \ + dcn32_mmhubbub.o dcn32_dpp.o dcn32_hubp.o dcn32_mpc.o \ + dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_resource_helpers.o \ + dcn32_hpo_dp_link_encoder.o AMD_DAL_DCN32 = $(addprefix $(AMDDALPATH)/dc/dcn32/,$(DCN32)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c index d761b0df28..8a0460e863 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c @@ -34,6 +34,7 @@ #include "dc_bios_types.h" #include "link_enc_cfg.h" +#include "dc_dmub_srv.h" #include "gpio_service_interface.h" #ifndef MIN @@ -61,6 +62,38 @@ #define AUX_REG_WRITE(reg_name, val) \ dm_write_reg(CTX, AUX_REG(reg_name), val) +static uint8_t phy_id_from_transmitter(enum transmitter t) +{ + uint8_t phy_id; + + switch (t) { + case TRANSMITTER_UNIPHY_A: + phy_id = 0; + break; + case TRANSMITTER_UNIPHY_B: + phy_id = 1; + break; + case TRANSMITTER_UNIPHY_C: + phy_id = 2; + break; + case TRANSMITTER_UNIPHY_D: + phy_id = 3; + break; + case TRANSMITTER_UNIPHY_E: + phy_id = 4; + break; + case TRANSMITTER_UNIPHY_F: + phy_id = 5; + break; + case TRANSMITTER_UNIPHY_G: + phy_id = 6; + break; + default: + phy_id = 0; + break; + } + return phy_id; +} void enc32_hw_init(struct link_encoder *enc) { @@ -117,38 +150,50 @@ void dcn32_link_encoder_enable_dp_output( } } -static bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc) +static bool query_dp_alt_from_dmub(struct link_encoder *enc, + union dmub_rb_cmd *cmd) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); - uint32_t dp_alt_mode_disable = 0; - bool is_usb_c_alt_mode = false; - if (enc->features.flags.bits.DP_IS_USB_C) { - /* if value == 1 alt mode is disabled, otherwise it is enabled */ - REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); - is_usb_c_alt_mode = (dp_alt_mode_disable == 0); - } + memset(cmd, 0, sizeof(*cmd)); + cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS; + cmd->query_dp_alt.header.sub_type = + DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT; + cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data); + cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter); + + if (!dc_wake_and_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) + return false; - return is_usb_c_alt_mode; + return true; } -static void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc, +bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc) +{ + union dmub_rb_cmd cmd; + + if (!query_dp_alt_from_dmub(enc, &cmd)) + return false; + + return (cmd.query_dp_alt.data.is_dp_alt_disable == 0); +} + +void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_link_settings *link_settings) { - struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); - uint32_t is_in_usb_c_dp4_mode = 0; + union dmub_rb_cmd cmd; dcn10_link_encoder_get_max_link_cap(enc, link_settings); - /* in usb c dp2 mode, max lane count is 2 */ - if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) { - REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode); - if (!is_in_usb_c_dp4_mode) - link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count); - } + if (!query_dp_alt_from_dmub(enc, &cmd)) + return; + if (cmd.query_dp_alt.data.is_usb && + cmd.query_dp_alt.data.is_dp4 == 0) + link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count); } + static const struct link_encoder_funcs dcn32_link_enc_funcs = { .read_state = link_enc2_read_state, .validate_output_with_stream = @@ -203,12 +248,12 @@ void dcn32_link_encoder_construct( enc10->base.hpd_source = init_data->hpd_source; enc10->base.connector = init_data->connector; + if (enc10->base.connector.id == CONNECTOR_ID_USBC) + enc10->base.features.flags.bits.DP_IS_USB_C = 1; enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; enc10->base.features = *enc_features; - if (enc10->base.connector.id == CONNECTOR_ID_USBC) - enc10->base.features.flags.bits.DP_IS_USB_C = 1; enc10->base.transmitter = init_data->transmitter; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h index bbcfce06be..2d5f25290e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h @@ -53,4 +53,9 @@ void dcn32_link_encoder_enable_dp_output( const struct dc_link_settings *link_settings, enum clock_source_id clock_source); +bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc); + +void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc, + struct dc_link_settings *link_settings); + #endif /* __DC_LINK_ENCODER__DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c deleted file mode 100644 index 427cfc8c24..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright 2022 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dce110/dce110_hwseq.h" -#include "dcn10/dcn10_hwseq.h" -#include "dcn20/dcn20_hwseq.h" -#include "dcn21/dcn21_hwseq.h" -#include "dcn30/dcn30_hwseq.h" -#include "dcn31/dcn31_hwseq.h" -#include "dcn32/dcn32_hwseq.h" -#include "dcn32_init.h" - -static const struct hw_sequencer_funcs dcn32_funcs = { - .program_gamut_remap = dcn30_program_gamut_remap, - .init_hw = dcn32_init_hw, - .apply_ctx_to_hw = dce110_apply_ctx_to_hw, - .apply_ctx_for_surface = NULL, - .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, - .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, - .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, - .update_plane_addr = dcn20_update_plane_addr, - .update_dchub = dcn10_update_dchub, - .update_pending_status = dcn10_update_pending_status, - .program_output_csc = dcn20_program_output_csc, - .enable_accelerated_mode = dce110_enable_accelerated_mode, - .enable_timing_synchronization = dcn10_enable_timing_synchronization, - .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, - .update_info_frame = dcn31_update_info_frame, - .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, - .enable_stream = dcn20_enable_stream, - .disable_stream = dce110_disable_stream, - .unblank_stream = dcn32_unblank_stream, - .blank_stream = dce110_blank_stream, - .enable_audio_stream = dce110_enable_audio_stream, - .disable_audio_stream = dce110_disable_audio_stream, - .disable_plane = dcn20_disable_plane, - .disable_pixel_data = dcn20_disable_pixel_data, - .pipe_control_lock = dcn20_pipe_control_lock, - .interdependent_update_lock = dcn10_lock_all_pipes, - .cursor_lock = dcn10_cursor_lock, - .prepare_bandwidth = dcn32_prepare_bandwidth, - .optimize_bandwidth = dcn20_optimize_bandwidth, - .update_bandwidth = dcn20_update_bandwidth, - .set_drr = dcn10_set_drr, - .get_position = dcn10_get_position, - .set_static_screen_control = dcn30_set_static_screen_control, - .setup_stereo = dcn10_setup_stereo, - .set_avmute = dcn30_set_avmute, - .log_hw_state = dcn10_log_hw_state, - .get_hw_state = dcn10_get_hw_state, - .clear_status_bits = dcn10_clear_status_bits, - .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, - .edp_backlight_control = dce110_edp_backlight_control, - .edp_power_control = dce110_edp_power_control, - .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, - .edp_wait_for_T12 = dce110_edp_wait_for_T12, - .set_cursor_position = dcn10_set_cursor_position, - .set_cursor_attribute = dcn10_set_cursor_attribute, - .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, - .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, - .set_clock = dcn10_set_clock, - .get_clock = dcn10_get_clock, - .program_triplebuffer = dcn20_program_triple_buffer, - .enable_writeback = dcn30_enable_writeback, - .disable_writeback = dcn30_disable_writeback, - .update_writeback = dcn30_update_writeback, - .mmhubbub_warmup = dcn30_mmhubbub_warmup, - .dmdata_status_done = dcn20_dmdata_status_done, - .program_dmdata_engine = dcn30_program_dmdata_engine, - .set_dmdata_attributes = dcn20_set_dmdata_attributes, - .init_sys_ctx = dcn20_init_sys_ctx, - .init_vm_ctx = dcn20_init_vm_ctx, - .set_flip_control_gsl = dcn20_set_flip_control_gsl, - .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, - .calc_vupdate_position = dcn10_calc_vupdate_position, - .apply_idle_power_optimizations = dcn32_apply_idle_power_optimizations, - .does_plane_fit_in_mall = NULL, - .set_backlight_level = dcn21_set_backlight_level, - .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, - .hardware_release = dcn30_hardware_release, - .set_pipe = dcn21_set_pipe, - .enable_lvds_link_output = dce110_enable_lvds_link_output, - .enable_tmds_link_output = dce110_enable_tmds_link_output, - .enable_dp_link_output = dce110_enable_dp_link_output, - .disable_link_output = dcn32_disable_link_output, - .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, - .get_dcc_en_bits = dcn10_get_dcc_en_bits, - .commit_subvp_config = dcn32_commit_subvp_config, - .enable_phantom_streams = dcn32_enable_phantom_streams, - .subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock, - .update_visual_confirm_color = dcn10_update_visual_confirm_color, - .subvp_pipe_control_lock_fast = dcn32_subvp_pipe_control_lock_fast, - .update_phantom_vp_position = dcn32_update_phantom_vp_position, - .update_dsc_pg = dcn32_update_dsc_pg, - .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom, - .blank_phantom = dcn32_blank_phantom, - .is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless, -}; - -static const struct hwseq_private_funcs dcn32_private_funcs = { - .init_pipes = dcn10_init_pipes, - .update_plane_addr = dcn20_update_plane_addr, - .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, - .update_mpcc = dcn20_update_mpcc, - .set_input_transfer_func = dcn32_set_input_transfer_func, - .set_output_transfer_func = dcn32_set_output_transfer_func, - .power_down = dce110_power_down, - .enable_display_power_gating = dcn10_dummy_display_power_gating, - .blank_pixel_data = dcn20_blank_pixel_data, - .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, - .enable_stream_timing = dcn20_enable_stream_timing, - .edp_backlight_control = dce110_edp_backlight_control, - .disable_stream_gating = dcn20_disable_stream_gating, - .enable_stream_gating = dcn20_enable_stream_gating, - .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, - .did_underflow_occur = dcn10_did_underflow_occur, - .init_blank = dcn32_init_blank, - .disable_vga = dcn20_disable_vga, - .bios_golden_init = dcn10_bios_golden_init, - .plane_atomic_disable = dcn20_plane_atomic_disable, - .plane_atomic_power_down = dcn10_plane_atomic_power_down, - .enable_power_gating_plane = dcn32_enable_power_gating_plane, - .hubp_pg_control = dcn32_hubp_pg_control, - .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, - .update_odm = dcn32_update_odm, - .dsc_pg_control = dcn32_dsc_pg_control, - .dsc_pg_status = dcn32_dsc_pg_status, - .set_hdr_multiplier = dcn10_set_hdr_multiplier, - .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, - .wait_for_blank_complete = dcn20_wait_for_blank_complete, - .dccg_init = dcn20_dccg_init, - .set_mcm_luts = dcn32_set_mcm_luts, - .program_mall_pipe_config = dcn32_program_mall_pipe_config, - .update_force_pstate = dcn32_update_force_pstate, - .update_mall_sel = dcn32_update_mall_sel, - .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values, - .set_pixels_per_cycle = dcn32_set_pixels_per_cycle, - .resync_fifo_dccg_dio = dcn32_resync_fifo_dccg_dio, - .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, -}; - -void dcn32_hw_sequencer_init_functions(struct dc *dc) -{ - dc->hwss = dcn32_funcs; - dc->hwseq->funcs = dcn32_private_funcs; - -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.h deleted file mode 100644 index 89a591eb2c..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2022 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_DCN32_INIT_H__ -#define __DC_DCN32_INIT_H__ - -struct dc; - -void dcn32_hw_sequencer_init_functions(struct dc *dc); - -#endif /* __DC_DCN32_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c index 3279b61022..e408e859b3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c @@ -71,12 +71,13 @@ void mpc32_power_on_blnd_lut( { struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + REG_SET(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], 0, MPCC_MCM_1DLUT_MEM_PWR_DIS, power_on); + if (mpc->ctx->dc->debug.enable_mem_low_power.bits.cm) { if (power_on) { REG_UPDATE(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_FORCE, 0); REG_WAIT(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_STATE, 0, 1, 5); } else if (!mpc->ctx->dc->debug.disable_mem_low_power) { - ASSERT(false); /* TODO: change to mpc * dpp_base->ctx->dc->optimized_required = true; * dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c deleted file mode 100644 index 8234935433..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c +++ /dev/null @@ -1,374 +0,0 @@ -/* - * Copyright 2022 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dcn32_optc.h" - -#include "dcn30/dcn30_optc.h" -#include "dcn31/dcn31_optc.h" -#include "reg_helper.h" -#include "dc.h" -#include "dcn_calc_math.h" -#include "dc_dmub_srv.h" - -#define REG(reg)\ - optc1->tg_regs->reg - -#define CTX \ - optc1->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - optc1->tg_shift->field_name, optc1->tg_mask->field_name - -static void optc32_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, - struct dc_crtc_timing *timing) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - uint32_t memory_mask = 0; - int h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right; - int mpcc_hactive = h_active / opp_cnt; - /* Each memory instance is 2048x(32x2) bits to support half line of 4096 */ - int odm_mem_count = (h_active + 2047) / 2048; - - /* - * display <= 4k : 2 memories + 2 pipes - * 4k < display <= 8k : 4 memories + 2 pipes - * 8k < display <= 12k : 6 memories + 4 pipes - */ - if (opp_cnt == 4) { - if (odm_mem_count <= 2) - memory_mask = 0x3; - else if (odm_mem_count <= 4) - memory_mask = 0xf; - else - memory_mask = 0x3f; - } else { - if (odm_mem_count <= 2) - memory_mask = 0x1 << (opp_id[0] * 2) | 0x1 << (opp_id[1] * 2); - else if (odm_mem_count <= 4) - memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2); - else - memory_mask = 0x77; - } - - REG_SET(OPTC_MEMORY_CONFIG, 0, - OPTC_MEM_SEL, memory_mask); - - if (opp_cnt == 2) { - REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0, - OPTC_NUM_OF_INPUT_SEGMENT, 1, - OPTC_SEG0_SRC_SEL, opp_id[0], - OPTC_SEG1_SRC_SEL, opp_id[1]); - } else if (opp_cnt == 4) { - REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, - OPTC_NUM_OF_INPUT_SEGMENT, 3, - OPTC_SEG0_SRC_SEL, opp_id[0], - OPTC_SEG1_SRC_SEL, opp_id[1], - OPTC_SEG2_SRC_SEL, opp_id[2], - OPTC_SEG3_SRC_SEL, opp_id[3]); - } - - REG_UPDATE(OPTC_WIDTH_CONTROL, - OPTC_SEGMENT_WIDTH, mpcc_hactive); - - REG_UPDATE(OTG_H_TIMING_CNTL, - OTG_H_TIMING_DIV_MODE, opp_cnt - 1); - optc1->opp_count = opp_cnt; -} - -void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combine_segments) -{ - struct optc *optc1 = DCN10TG_FROM_TG(tg); - int segments; - - REG_GET(OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, &segments); - - switch (segments) { - case 0: - *odm_combine_segments = 1; - break; - case 1: - *odm_combine_segments = 2; - break; - case 3: - *odm_combine_segments = 4; - break; - /* 2 is reserved */ - case 2: - default: - *odm_combine_segments = -1; - } -} - -void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_UPDATE(OTG_H_TIMING_CNTL, - OTG_H_TIMING_DIV_MODE_MANUAL, manual_mode ? 1 : 0); -} -/** - * optc32_enable_crtc() - Enable CRTC - call ASIC Control Object to enable Timing generator. - * - * @optc: timing_generator instance. - * - * Return: If CRTC is enabled, return true. - */ -static bool optc32_enable_crtc(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - /* opp instance for OTG, 1 to 1 mapping and odm will adjust */ - REG_UPDATE(OPTC_DATA_SOURCE_SELECT, - OPTC_SEG0_SRC_SEL, optc->inst); - - /* VTG enable first is for HW workaround */ - REG_UPDATE(CONTROL, - VTG0_ENABLE, 1); - - REG_SEQ_START(); - - /* Enable CRTC */ - REG_UPDATE_2(OTG_CONTROL, - OTG_DISABLE_POINT_CNTL, 2, - OTG_MASTER_EN, 1); - - REG_SEQ_SUBMIT(); - REG_SEQ_WAIT_DONE(); - - return true; -} - -/* disable_crtc */ -static bool optc32_disable_crtc(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT, - OPTC_SEG0_SRC_SEL, 0xf, - OPTC_SEG1_SRC_SEL, 0xf, - OPTC_SEG2_SRC_SEL, 0xf, - OPTC_SEG3_SRC_SEL, 0xf, - OPTC_NUM_OF_INPUT_SEGMENT, 0); - - REG_UPDATE(OPTC_MEMORY_CONFIG, - OPTC_MEM_SEL, 0); - - /* disable otg request until end of the first line - * in the vertical blank region - */ - REG_UPDATE(OTG_CONTROL, - OTG_MASTER_EN, 0); - - REG_UPDATE(CONTROL, - VTG0_ENABLE, 0); - - /* CRTC disabled, so disable clock. */ - REG_WAIT(OTG_CLOCK_CONTROL, - OTG_BUSY, 0, - 1, 150000); - - return true; -} - -static void optc32_phantom_crtc_post_enable(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - /* Disable immediately. */ - REG_UPDATE_2(OTG_CONTROL, OTG_DISABLE_POINT_CNTL, 0, OTG_MASTER_EN, 0); - - /* CRTC disabled, so disable clock. */ - REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000); -} - -static void optc32_disable_phantom_otg(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT, - OPTC_SEG0_SRC_SEL, 0xf, - OPTC_SEG1_SRC_SEL, 0xf, - OPTC_SEG2_SRC_SEL, 0xf, - OPTC_SEG3_SRC_SEL, 0xf, - OPTC_NUM_OF_INPUT_SEGMENT, 0); - - REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0); -} - -void optc32_set_odm_bypass(struct timing_generator *optc, - const struct dc_crtc_timing *dc_crtc_timing) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - enum h_timing_div_mode h_div = H_TIMING_NO_DIV; - - REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, - OPTC_NUM_OF_INPUT_SEGMENT, 0, - OPTC_SEG0_SRC_SEL, optc->inst, - OPTC_SEG1_SRC_SEL, 0xf, - OPTC_SEG2_SRC_SEL, 0xf, - OPTC_SEG3_SRC_SEL, 0xf - ); - - h_div = optc1_is_two_pixels_per_containter(dc_crtc_timing); - REG_UPDATE(OTG_H_TIMING_CNTL, - OTG_H_TIMING_DIV_MODE, h_div); - - REG_SET(OPTC_MEMORY_CONFIG, 0, - OPTC_MEM_SEL, 0); - optc1->opp_count = 1; -} - -static void optc32_setup_manual_trigger(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - struct dc *dc = optc->ctx->dc; - - if (dc->caps.dmub_caps.mclk_sw && !dc->debug.disable_fams) - dc_dmub_srv_set_drr_manual_trigger_cmd(dc, optc->inst); - else { - /* - * MIN_MASK_EN is gone and MASK is now always enabled. - * - * To get it to it work with manual trigger we need to make sure - * we program the correct bit. - */ - REG_UPDATE_4(OTG_V_TOTAL_CONTROL, - OTG_V_TOTAL_MIN_SEL, 1, - OTG_V_TOTAL_MAX_SEL, 1, - OTG_FORCE_LOCK_ON_EVENT, 0, - OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */ - - // Setup manual flow control for EOF via TRIG_A - optc->funcs->setup_manual_trigger(optc); - } -} - -static void optc32_set_drr( - struct timing_generator *optc, - const struct drr_params *params) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - if (params != NULL && - params->vertical_total_max > 0 && - params->vertical_total_min > 0) { - - if (params->vertical_total_mid != 0) { - - REG_SET(OTG_V_TOTAL_MID, 0, - OTG_V_TOTAL_MID, params->vertical_total_mid - 1); - - REG_UPDATE_2(OTG_V_TOTAL_CONTROL, - OTG_VTOTAL_MID_REPLACING_MAX_EN, 1, - OTG_VTOTAL_MID_FRAME_NUM, - (uint8_t)params->vertical_total_mid_frame_num); - - } - - optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1); - } - - optc32_setup_manual_trigger(optc); -} - -static struct timing_generator_funcs dcn32_tg_funcs = { - .validate_timing = optc1_validate_timing, - .program_timing = optc1_program_timing, - .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, - .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, - .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, - .program_global_sync = optc1_program_global_sync, - .enable_crtc = optc32_enable_crtc, - .disable_crtc = optc32_disable_crtc, - .phantom_crtc_post_enable = optc32_phantom_crtc_post_enable, - .disable_phantom_crtc = optc32_disable_phantom_otg, - /* used by enable_timing_synchronization. Not need for FPGA */ - .is_counter_moving = optc1_is_counter_moving, - .get_position = optc1_get_position, - .get_frame_count = optc1_get_vblank_counter, - .get_scanoutpos = optc1_get_crtc_scanoutpos, - .get_otg_active_size = optc1_get_otg_active_size, - .set_early_control = optc1_set_early_control, - /* used by enable_timing_synchronization. Not need for FPGA */ - .wait_for_state = optc1_wait_for_state, - .set_blank_color = optc3_program_blank_color, - .did_triggered_reset_occur = optc1_did_triggered_reset_occur, - .triplebuffer_lock = optc3_triplebuffer_lock, - .triplebuffer_unlock = optc2_triplebuffer_unlock, - .enable_reset_trigger = optc1_enable_reset_trigger, - .enable_crtc_reset = optc1_enable_crtc_reset, - .disable_reset_trigger = optc1_disable_reset_trigger, - .lock = optc3_lock, - .unlock = optc1_unlock, - .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, - .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, - .enable_optc_clock = optc1_enable_optc_clock, - .set_drr = optc32_set_drr, - .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, - .set_vtotal_min_max = optc3_set_vtotal_min_max, - .set_static_screen_control = optc1_set_static_screen_control, - .program_stereo = optc1_program_stereo, - .is_stereo_left_eye = optc1_is_stereo_left_eye, - .tg_init = optc3_tg_init, - .is_tg_enabled = optc1_is_tg_enabled, - .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, - .clear_optc_underflow = optc1_clear_optc_underflow, - .setup_global_swap_lock = NULL, - .get_crc = optc1_get_crc, - .configure_crc = optc1_configure_crc, - .set_dsc_config = optc3_set_dsc_config, - .get_dsc_status = optc2_get_dsc_status, - .set_dwb_source = NULL, - .set_odm_bypass = optc32_set_odm_bypass, - .set_odm_combine = optc32_set_odm_combine, - .get_odm_combine_segments = optc32_get_odm_combine_segments, - .set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode, - .get_optc_source = optc2_get_optc_source, - .set_out_mux = optc3_set_out_mux, - .set_drr_trigger_window = optc3_set_drr_trigger_window, - .set_vtotal_change_limit = optc3_set_vtotal_change_limit, - .set_gsl = optc2_set_gsl, - .set_gsl_source_select = optc2_set_gsl_source_select, - .set_vtg_params = optc1_set_vtg_params, - .program_manual_trigger = optc2_program_manual_trigger, - .setup_manual_trigger = optc2_setup_manual_trigger, - .get_hw_timing = optc1_get_hw_timing, -}; - -void dcn32_timing_generator_init(struct optc *optc1) -{ - optc1->base.funcs = &dcn32_tg_funcs; - - optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; - optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; - - optc1->min_h_blank = 32; - optc1->min_v_blank = 3; - optc1->min_v_blank_interlace = 5; - optc1->min_h_sync_width = 4; - optc1->min_v_sync_width = 1; -} - diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h deleted file mode 100644 index 8ce3b178ca..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Copyright 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_OPTC_DCN32_H__ -#define __DC_OPTC_DCN32_H__ - -#include "dcn10/dcn10_optc.h" - -#define OPTC_COMMON_MASK_SH_LIST_DCN3_2(mask_sh)\ - SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\ - SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_OFFSET, mask_sh),\ - SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_WIDTH, mask_sh),\ - SF(OTG0_OTG_VREADY_PARAM, VREADY_OFFSET, mask_sh),\ - SF(OTG0_OTG_MASTER_UPDATE_LOCK, OTG_MASTER_UPDATE_LOCK, mask_sh),\ - SF(OTG0_OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_START_X, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_END_X, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_EN, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_START_Y, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_END_Y, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL2, OTG_MASTER_UPDATE_LOCK_SEL, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_X, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_Y, mask_sh),\ - SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\ - SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\ - SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\ - SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\ - SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_START, mask_sh),\ - SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_END, mask_sh),\ - SF(OTG0_OTG_H_SYNC_A_CNTL, OTG_H_SYNC_A_POL, mask_sh),\ - SF(OTG0_OTG_V_TOTAL, OTG_V_TOTAL, mask_sh),\ - SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_START, mask_sh),\ - SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_END, mask_sh),\ - SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_START, mask_sh),\ - SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_END, mask_sh),\ - SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_A_POL, mask_sh),\ - SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_MODE, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_MASTER_EN, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_START_POINT_CNTL, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\ - SF(OTG0_OTG_CONTROL, OTG_OUT_MUX, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EYE_FLAG_POLARITY, mask_sh),\ - SF(OTG0_OTG_STEREO_CONTROL, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, mask_sh),\ - SF(OTG0_OTG_STEREO_STATUS, OTG_STEREO_CURRENT_EYE, mask_sh),\ - SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_EN, mask_sh),\ - SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_V_UPDATE_MODE, mask_sh),\ - SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_STEREO_SEL_OVR, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_MAX, OTG_V_TOTAL_MAX, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_MIN, OTG_V_TOTAL_MIN, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MIN_SEL, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MAX_SEL, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_FORCE_LOCK_ON_EVENT, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MIN_EN, mask_sh),\ - SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MAX_EN, mask_sh),\ - SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_CLEAR, mask_sh),\ - SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_MODE, mask_sh),\ - SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_OCCURRED, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_SELECT, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_PIPE_SELECT, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_RISING_EDGE_DETECT_CNTL, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_POLARITY_SELECT, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FREQUENCY_SELECT, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_DELAY, mask_sh),\ - SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_CLEAR, mask_sh),\ - SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_EVENT_MASK, mask_sh),\ - SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_FRAME_COUNT, mask_sh),\ - SF(OTG0_OTG_STATUS_FRAME_COUNT, OTG_FRAME_COUNT, mask_sh),\ - SF(OTG0_OTG_STATUS, OTG_V_BLANK, mask_sh),\ - SF(OTG0_OTG_STATUS, OTG_V_ACTIVE_DISP, mask_sh),\ - SF(OTG0_OTG_STATUS_POSITION, OTG_HORZ_COUNT, mask_sh),\ - SF(OTG0_OTG_STATUS_POSITION, OTG_VERT_COUNT, mask_sh),\ - SF(OTG0_OTG_NOM_VERT_POSITION, OTG_VERT_COUNT_NOM, mask_sh),\ - SF(OTG0_OTG_M_CONST_DTO0, OTG_M_CONST_DTO_PHASE, mask_sh),\ - SF(OTG0_OTG_M_CONST_DTO1, OTG_M_CONST_DTO_MODULO, mask_sh),\ - SF(OTG0_OTG_CLOCK_CONTROL, OTG_BUSY, mask_sh),\ - SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_EN, mask_sh),\ - SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_ON, mask_sh),\ - SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_START, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_END, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT1_POSITION, OTG_VERTICAL_INTERRUPT1_LINE_START, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\ - SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\ - SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\ - SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_ON, mask_sh),\ - SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\ - SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\ - SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\ - SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\ - SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\ - SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\ - SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED, mask_sh),\ - SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_CLEAR, mask_sh),\ - SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_AUTO_FORCE_VSYNC_MODE, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL0_EN, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL1_EN, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\ - SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\ - SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\ - SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\ - SF(OTG0_OTG_CRC0_DATA_B, CRC0_B_CB, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_START, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_END, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_START, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_END, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_START, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh),\ - SF(OTG0_OTG_TRIGA_MANUAL_TRIG, OTG_TRIGA_MANUAL_TRIG, mask_sh),\ - SF(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, mask_sh),\ - SF(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, mask_sh),\ - SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL2, MANUAL_FLOW_CONTROL_SEL, mask_sh),\ - SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\ - SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\ - SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \ - SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\ - SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\ - SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \ - SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \ - SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \ - SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \ - SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \ - SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \ - SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG2_SRC_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG3_SRC_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\ - SF(ODM0_OPTC_MEMORY_CONFIG, OPTC_MEM_SEL, mask_sh),\ - SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, mask_sh),\ - SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\ - SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\ - SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\ - SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_SEGMENT_WIDTH, mask_sh),\ - SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_START_X, mask_sh),\ - SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\ - SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\ - SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\ - SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE_MANUAL, mask_sh),\ - SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\ - SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh) - -void dcn32_timing_generator_init(struct optc *optc1); -void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode); -void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combine_segments); -void optc32_set_odm_bypass(struct timing_generator *optc, - const struct dc_crtc_timing *dc_crtc_timing); - -#endif /* __DC_OPTC_DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c deleted file mode 100644 index f663de1cdc..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ /dev/null @@ -1,2880 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright 2022 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" -#include "dc.h" - -#include "dcn32_init.h" - -#include "resource.h" -#include "include/irq_service_interface.h" -#include "dcn32_resource.h" - -#include "dcn20/dcn20_resource.h" -#include "dcn30/dcn30_resource.h" - -#include "dcn10/dcn10_ipp.h" -#include "dcn30/dcn30_hubbub.h" -#include "dcn31/dcn31_hubbub.h" -#include "dcn32/dcn32_hubbub.h" -#include "dcn32/dcn32_mpc.h" -#include "dcn32_hubp.h" -#include "irq/dcn32/irq_service_dcn32.h" -#include "dcn32/dcn32_dpp.h" -#include "dcn32/dcn32_optc.h" -#include "dcn20/dcn20_hwseq.h" -#include "dcn30/dcn30_hwseq.h" -#include "dce110/dce110_hwseq.h" -#include "dcn30/dcn30_opp.h" -#include "dcn20/dcn20_dsc.h" -#include "dcn30/dcn30_vpg.h" -#include "dcn30/dcn30_afmt.h" -#include "dcn30/dcn30_dio_stream_encoder.h" -#include "dcn32/dcn32_dio_stream_encoder.h" -#include "dcn31/dcn31_hpo_dp_stream_encoder.h" -#include "dcn31/dcn31_hpo_dp_link_encoder.h" -#include "dcn32/dcn32_hpo_dp_link_encoder.h" -#include "dcn31/dcn31_apg.h" -#include "dcn31/dcn31_dio_link_encoder.h" -#include "dcn32/dcn32_dio_link_encoder.h" -#include "dce/dce_clock_source.h" -#include "dce/dce_audio.h" -#include "dce/dce_hwseq.h" -#include "clk_mgr.h" -#include "virtual/virtual_stream_encoder.h" -#include "dml/display_mode_vba.h" -#include "dcn32/dcn32_dccg.h" -#include "dcn10/dcn10_resource.h" -#include "link.h" -#include "dcn31/dcn31_panel_cntl.h" - -#include "dcn30/dcn30_dwb.h" -#include "dcn32/dcn32_mmhubbub.h" - -#include "dcn/dcn_3_2_0_offset.h" -#include "dcn/dcn_3_2_0_sh_mask.h" -#include "nbio/nbio_4_3_0_offset.h" - -#include "reg_helper.h" -#include "dce/dmub_abm.h" -#include "dce/dmub_psr.h" -#include "dce/dce_aux.h" -#include "dce/dce_i2c.h" - -#include "dml/dcn30/display_mode_vba_30.h" -#include "vm_helper.h" -#include "dcn20/dcn20_vmid.h" -#include "dml/dcn32/dcn32_fpu.h" - -#include "dml2/dml2_wrapper.h" - -#define DC_LOGGER_INIT(logger) - -enum dcn32_clk_src_array_id { - DCN32_CLK_SRC_PLL0, - DCN32_CLK_SRC_PLL1, - DCN32_CLK_SRC_PLL2, - DCN32_CLK_SRC_PLL3, - DCN32_CLK_SRC_PLL4, - DCN32_CLK_SRC_TOTAL -}; - -/* begin ********************* - * macros to expend register list macro defined in HW object header file - */ - -/* DCN */ -#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg] - -#define BASE(seg) BASE_INNER(seg) - -#define SR(reg_name)\ - REG_STRUCT.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ - reg ## reg_name -#define SR_ARR(reg_name, id) \ - REG_STRUCT[id].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name - -#define SR_ARR_INIT(reg_name, id, value) \ - REG_STRUCT[id].reg_name = value - -#define SRI(reg_name, block, id)\ - REG_STRUCT.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRI_ARR(reg_name, block, id)\ - REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SR_ARR_I2C(reg_name, id) \ - REG_STRUCT[id-1].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name - -#define SRI_ARR_I2C(reg_name, block, id)\ - REG_STRUCT[id-1].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRI_ARR_ALPHABET(reg_name, block, index, id)\ - REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRI2(reg_name, block, id)\ - .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ - reg ## reg_name -#define SRI2_ARR(reg_name, block, id)\ - REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ - reg ## reg_name - -#define SRIR(var_name, reg_name, block, id)\ - .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII(reg_name, block, id)\ - REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII_ARR_2(reg_name, block, id, inst)\ - REG_STRUCT[inst].reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII_MPC_RMU(reg_name, block, id)\ - .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII_DWB(reg_name, temp_name, block, id)\ - REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## temp_name - -#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ - .field_name = reg_name ## __ ## field_name ## post_fix - -#define DCCG_SRII(reg_name, block, id)\ - REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define VUPDATE_SRII(reg_name, block, id)\ - REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ - reg ## reg_name ## _ ## block ## id - -/* NBIO */ -#define NBIO_BASE_INNER(seg) ctx->nbio_reg_offsets[seg] - -#define NBIO_BASE(seg) \ - NBIO_BASE_INNER(seg) - -#define NBIO_SR(reg_name)\ - REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \ - regBIF_BX0_ ## reg_name -#define NBIO_SR_ARR(reg_name, id)\ - REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \ - regBIF_BX0_ ## reg_name - -#undef CTX -#define CTX ctx -#define REG(reg_name) \ - (ctx->dcn_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) - -static struct bios_registers bios_regs; - -#define bios_regs_init() \ - ( \ - NBIO_SR(BIOS_SCRATCH_3),\ - NBIO_SR(BIOS_SCRATCH_6)\ - ) - -#define clk_src_regs_init(index, pllid)\ - CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid) - -static struct dce110_clk_src_regs clk_src_regs[5]; - -static const struct dce110_clk_src_shift cs_shift = { - CS_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT) -}; - -static const struct dce110_clk_src_mask cs_mask = { - CS_COMMON_MASK_SH_LIST_DCN3_2(_MASK) -}; - -#define abm_regs_init(id)\ - ABM_DCN32_REG_LIST_RI(id) - -static struct dce_abm_registers abm_regs[4]; - -static const struct dce_abm_shift abm_shift = { - ABM_MASK_SH_LIST_DCN32(__SHIFT) -}; - -static const struct dce_abm_mask abm_mask = { - ABM_MASK_SH_LIST_DCN32(_MASK) -}; - -#define audio_regs_init(id)\ - AUD_COMMON_REG_LIST_RI(id) - -static struct dce_audio_registers audio_regs[5]; - -#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ - AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) - -static const struct dce_audio_shift audio_shift = { - DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_audio_mask audio_mask = { - DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) -}; - -#define vpg_regs_init(id)\ - VPG_DCN3_REG_LIST_RI(id) - -static struct dcn30_vpg_registers vpg_regs[10]; - -static const struct dcn30_vpg_shift vpg_shift = { - DCN3_VPG_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn30_vpg_mask vpg_mask = { - DCN3_VPG_MASK_SH_LIST(_MASK) -}; - -#define afmt_regs_init(id)\ - AFMT_DCN3_REG_LIST_RI(id) - -static struct dcn30_afmt_registers afmt_regs[6]; - -static const struct dcn30_afmt_shift afmt_shift = { - DCN3_AFMT_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn30_afmt_mask afmt_mask = { - DCN3_AFMT_MASK_SH_LIST(_MASK) -}; - -#define apg_regs_init(id)\ - APG_DCN31_REG_LIST_RI(id) - -static struct dcn31_apg_registers apg_regs[4]; - -static const struct dcn31_apg_shift apg_shift = { - DCN31_APG_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_apg_mask apg_mask = { - DCN31_APG_MASK_SH_LIST(_MASK) -}; - -#define stream_enc_regs_init(id)\ - SE_DCN32_REG_LIST_RI(id) - -static struct dcn10_stream_enc_registers stream_enc_regs[5]; - -static const struct dcn10_stream_encoder_shift se_shift = { - SE_COMMON_MASK_SH_LIST_DCN32(__SHIFT) -}; - -static const struct dcn10_stream_encoder_mask se_mask = { - SE_COMMON_MASK_SH_LIST_DCN32(_MASK) -}; - - -#define aux_regs_init(id)\ - DCN2_AUX_REG_LIST_RI(id) - -static struct dcn10_link_enc_aux_registers link_enc_aux_regs[5]; - -#define hpd_regs_init(id)\ - HPD_REG_LIST_RI(id) - -static struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[5]; - -#define link_regs_init(id, phyid)\ - ( \ - LE_DCN31_REG_LIST_RI(id), \ - UNIPHY_DCN2_REG_LIST_RI(id, phyid)\ - ) - /*DPCS_DCN31_REG_LIST(id),*/ \ - -static struct dcn10_link_enc_registers link_enc_regs[5]; - -static const struct dcn10_link_enc_shift le_shift = { - LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \ - //DPCS_DCN31_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn10_link_enc_mask le_mask = { - LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), \ - //DPCS_DCN31_MASK_SH_LIST(_MASK) -}; - -#define hpo_dp_stream_encoder_reg_init(id)\ - DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id) - -static struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[4]; - -static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { - DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { - DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) -}; - - -#define hpo_dp_link_encoder_reg_init(id)\ - DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id) - /*DCN3_1_RDPCSTX_REG_LIST(0),*/ - /*DCN3_1_RDPCSTX_REG_LIST(1),*/ - /*DCN3_1_RDPCSTX_REG_LIST(2),*/ - /*DCN3_1_RDPCSTX_REG_LIST(3),*/ - -static struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[2]; - -static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { - DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { - DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK) -}; - -#define dpp_regs_init(id)\ - DPP_REG_LIST_DCN30_COMMON_RI(id) - -static struct dcn3_dpp_registers dpp_regs[4]; - -static const struct dcn3_dpp_shift tf_shift = { - DPP_REG_LIST_SH_MASK_DCN30_COMMON(__SHIFT) -}; - -static const struct dcn3_dpp_mask tf_mask = { - DPP_REG_LIST_SH_MASK_DCN30_COMMON(_MASK) -}; - - -#define opp_regs_init(id)\ - OPP_REG_LIST_DCN30_RI(id) - -static struct dcn20_opp_registers opp_regs[4]; - -static const struct dcn20_opp_shift opp_shift = { - OPP_MASK_SH_LIST_DCN20(__SHIFT) -}; - -static const struct dcn20_opp_mask opp_mask = { - OPP_MASK_SH_LIST_DCN20(_MASK) -}; - -#define aux_engine_regs_init(id)\ - ( \ - AUX_COMMON_REG_LIST0_RI(id), \ - SR_ARR_INIT(AUXN_IMPCAL, id, 0), \ - SR_ARR_INIT(AUXP_IMPCAL, id, 0), \ - SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK), \ - SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK)\ - ) - -static struct dce110_aux_registers aux_engine_regs[5]; - -static const struct dce110_aux_registers_shift aux_shift = { - DCN_AUX_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce110_aux_registers_mask aux_mask = { - DCN_AUX_MASK_SH_LIST(_MASK) -}; - -#define dwbc_regs_dcn3_init(id)\ - DWBC_COMMON_REG_LIST_DCN30_RI(id) - -static struct dcn30_dwbc_registers dwbc30_regs[1]; - -static const struct dcn30_dwbc_shift dwbc30_shift = { - DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_dwbc_mask dwbc30_mask = { - DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -#define mcif_wb_regs_dcn3_init(id)\ - MCIF_WB_COMMON_REG_LIST_DCN32_RI(id) - -static struct dcn30_mmhubbub_registers mcif_wb30_regs[1]; - -static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN32(__SHIFT) -}; - -static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN32(_MASK) -}; - -#define dsc_regsDCN20_init(id)\ - DSC_REG_LIST_DCN20_RI(id) - -static struct dcn20_dsc_registers dsc_regs[4]; - -static const struct dcn20_dsc_shift dsc_shift = { - DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) -}; - -static const struct dcn20_dsc_mask dsc_mask = { - DSC_REG_LIST_SH_MASK_DCN20(_MASK) -}; - -static struct dcn30_mpc_registers mpc_regs; - -#define dcn_mpc_regs_init() \ - MPC_REG_LIST_DCN3_2_RI(0),\ - MPC_REG_LIST_DCN3_2_RI(1),\ - MPC_REG_LIST_DCN3_2_RI(2),\ - MPC_REG_LIST_DCN3_2_RI(3),\ - MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\ - MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\ - MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\ - MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\ - MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0) - -static const struct dcn30_mpc_shift mpc_shift = { - MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT) -}; - -static const struct dcn30_mpc_mask mpc_mask = { - MPC_COMMON_MASK_SH_LIST_DCN32(_MASK) -}; - -#define optc_regs_init(id)\ - OPTC_COMMON_REG_LIST_DCN3_2_RI(id) - -static struct dcn_optc_registers optc_regs[4]; - -static const struct dcn_optc_shift optc_shift = { - OPTC_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT) -}; - -static const struct dcn_optc_mask optc_mask = { - OPTC_COMMON_MASK_SH_LIST_DCN3_2(_MASK) -}; - -#define hubp_regs_init(id)\ - HUBP_REG_LIST_DCN32_RI(id) - -static struct dcn_hubp2_registers hubp_regs[4]; - - -static const struct dcn_hubp2_shift hubp_shift = { - HUBP_MASK_SH_LIST_DCN32(__SHIFT) -}; - -static const struct dcn_hubp2_mask hubp_mask = { - HUBP_MASK_SH_LIST_DCN32(_MASK) -}; - -static struct dcn_hubbub_registers hubbub_reg; -#define hubbub_reg_init()\ - HUBBUB_REG_LIST_DCN32_RI(0) - -static const struct dcn_hubbub_shift hubbub_shift = { - HUBBUB_MASK_SH_LIST_DCN32(__SHIFT) -}; - -static const struct dcn_hubbub_mask hubbub_mask = { - HUBBUB_MASK_SH_LIST_DCN32(_MASK) -}; - -static struct dccg_registers dccg_regs; - -#define dccg_regs_init()\ - DCCG_REG_LIST_DCN32_RI() - -static const struct dccg_shift dccg_shift = { - DCCG_MASK_SH_LIST_DCN32(__SHIFT) -}; - -static const struct dccg_mask dccg_mask = { - DCCG_MASK_SH_LIST_DCN32(_MASK) -}; - - -#define SRII2(reg_name_pre, reg_name_post, id)\ - .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \ - ## id ## _ ## reg_name_post ## _BASE_IDX) + \ - reg ## reg_name_pre ## id ## _ ## reg_name_post - - -#define HWSEQ_DCN32_REG_LIST()\ - SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ - SR(DIO_MEM_PWR_CTRL), \ - SR(ODM_MEM_PWR_CTRL3), \ - SR(MMHUBBUB_MEM_PWR_CNTL), \ - SR(DCCG_GATE_DISABLE_CNTL), \ - SR(DCCG_GATE_DISABLE_CNTL2), \ - SR(DCFCLK_CNTL),\ - SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ - SRII(PIXEL_RATE_CNTL, OTG, 0), \ - SRII(PIXEL_RATE_CNTL, OTG, 1),\ - SRII(PIXEL_RATE_CNTL, OTG, 2),\ - SRII(PIXEL_RATE_CNTL, OTG, 3),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\ - SR(MICROSECOND_TIME_BASE_DIV), \ - SR(MILLISECOND_TIME_BASE_DIV), \ - SR(DISPCLK_FREQ_CHANGE_CNTL), \ - SR(RBBMIF_TIMEOUT_DIS), \ - SR(RBBMIF_TIMEOUT_DIS_2), \ - SR(DCHUBBUB_CRC_CTRL), \ - SR(DPP_TOP0_DPP_CRC_CTRL), \ - SR(DPP_TOP0_DPP_CRC_VAL_B_A), \ - SR(DPP_TOP0_DPP_CRC_VAL_R_G), \ - SR(MPC_CRC_CTRL), \ - SR(MPC_CRC_RESULT_GB), \ - SR(MPC_CRC_RESULT_C), \ - SR(MPC_CRC_RESULT_AR), \ - SR(DOMAIN0_PG_CONFIG), \ - SR(DOMAIN1_PG_CONFIG), \ - SR(DOMAIN2_PG_CONFIG), \ - SR(DOMAIN3_PG_CONFIG), \ - SR(DOMAIN16_PG_CONFIG), \ - SR(DOMAIN17_PG_CONFIG), \ - SR(DOMAIN18_PG_CONFIG), \ - SR(DOMAIN19_PG_CONFIG), \ - SR(DOMAIN0_PG_STATUS), \ - SR(DOMAIN1_PG_STATUS), \ - SR(DOMAIN2_PG_STATUS), \ - SR(DOMAIN3_PG_STATUS), \ - SR(DOMAIN16_PG_STATUS), \ - SR(DOMAIN17_PG_STATUS), \ - SR(DOMAIN18_PG_STATUS), \ - SR(DOMAIN19_PG_STATUS), \ - SR(D1VGA_CONTROL), \ - SR(D2VGA_CONTROL), \ - SR(D3VGA_CONTROL), \ - SR(D4VGA_CONTROL), \ - SR(D5VGA_CONTROL), \ - SR(D6VGA_CONTROL), \ - SR(DC_IP_REQUEST_CNTL), \ - SR(AZALIA_AUDIO_DTO), \ - SR(AZALIA_CONTROLLER_CLOCK_GATING) - -static struct dce_hwseq_registers hwseq_reg; - -#define hwseq_reg_init()\ - HWSEQ_DCN32_REG_LIST() - -#define HWSEQ_DCN32_MASK_SH_LIST(mask_sh)\ - HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ - HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ - HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ - HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ - HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \ - HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ - HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ - HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh) - -static const struct dce_hwseq_shift hwseq_shift = { - HWSEQ_DCN32_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_hwseq_mask hwseq_mask = { - HWSEQ_DCN32_MASK_SH_LIST(_MASK) -}; -#define vmid_regs_init(id)\ - DCN20_VMID_REG_LIST_RI(id) - -static struct dcn_vmid_registers vmid_regs[16]; - -static const struct dcn20_vmid_shift vmid_shifts = { - DCN20_VMID_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn20_vmid_mask vmid_masks = { - DCN20_VMID_MASK_SH_LIST(_MASK) -}; - -static const struct resource_caps res_cap_dcn32 = { - .num_timing_generator = 4, - .num_opp = 4, - .num_video_plane = 4, - .num_audio = 5, - .num_stream_encoder = 5, - .num_hpo_dp_stream_encoder = 4, - .num_hpo_dp_link_encoder = 2, - .num_pll = 5, - .num_dwb = 1, - .num_ddc = 5, - .num_vmid = 16, - .num_mpc_3dlut = 4, - .num_dsc = 4, -}; - -static const struct dc_plane_cap plane_cap = { - .type = DC_PLANE_TYPE_DCN_UNIVERSAL, - .per_pixel_alpha = true, - - .pixel_format_support = { - .argb8888 = true, - .nv12 = true, - .fp16 = true, - .p010 = true, - .ayuv = false, - }, - - .max_upscale_factor = { - .argb8888 = 16000, - .nv12 = 16000, - .fp16 = 16000 - }, - - // 6:1 downscaling ratio: 1000/6 = 166.666 - .max_downscale_factor = { - .argb8888 = 167, - .nv12 = 167, - .fp16 = 167 - }, - 64, - 64 -}; - -static const struct dc_debug_options debug_defaults_drv = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = false, - .clock_trace = true, - .disable_pplib_clock_request = false, - .pipe_split_policy = MPC_SPLIT_AVOID, // Due to CRB, no need to MPC split anymore - .force_single_disp_pipe_split = false, - .disable_dcc = DCC_ENABLE, - .vsr_support = true, - .performance_trace = false, - .max_downscale_src_width = 7680,/*upto 8K*/ - .disable_pplib_wm_range = false, - .scl_reset_length10 = true, - .sanity_checks = false, - .underflow_assert_delay_us = 0xFFFFFFFF, - .dwb_fi_phase = -1, // -1 = disable, - .dmub_command_table = true, - .enable_mem_low_power = { - .bits = { - .vga = false, - .i2c = false, - .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled - .dscl = false, - .cm = false, - .mpc = false, - .optc = true, - } - }, - .use_max_lb = true, - .force_disable_subvp = false, - .exit_idle_opt_for_cursor_updates = true, - .using_dml2 = false, - .enable_single_display_2to1_odm_policy = true, - - /* Must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions*/ - .enable_double_buffered_dsc_pg_support = true, - .enable_dp_dig_pixel_rate_div_policy = 1, - .allow_sw_cursor_fallback = false, // Linux can't do SW cursor "fallback" - .alloc_extra_way_for_cursor = true, - .min_prefetch_in_strobe_ns = 60000, // 60us - .disable_unbounded_requesting = false, - .override_dispclk_programming = true, - .disable_fpo_optimizations = false, - .fpo_vactive_margin_us = 2000, // 2000us - .disable_fpo_vactive = false, - .disable_boot_optimizations = false, - .disable_subvp_high_refresh = false, - .disable_dp_plus_plus_wa = true, - .fpo_vactive_min_active_margin_us = 200, - .fpo_vactive_max_blank_us = 1000, - .enable_legacy_fast_update = false, -}; - -static struct dce_aux *dcn32_aux_engine_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); - - if (!aux_engine) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT aux_engine_regs - aux_engine_regs_init(0), - aux_engine_regs_init(1), - aux_engine_regs_init(2), - aux_engine_regs_init(3), - aux_engine_regs_init(4); - - dce110_aux_engine_construct(aux_engine, ctx, inst, - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst], - &aux_mask, - &aux_shift, - ctx->dc->caps.extended_aux_timeout_support); - - return &aux_engine->base; -} -#define i2c_inst_regs_init(id)\ - I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id) - -static struct dce_i2c_registers i2c_hw_regs[5]; - -static const struct dce_i2c_shift i2c_shifts = { - I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dce_i2c_mask i2c_masks = { - I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -static struct dce_i2c_hw *dcn32_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); - - if (!dce_i2c_hw) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT i2c_hw_regs - i2c_inst_regs_init(1), - i2c_inst_regs_init(2), - i2c_inst_regs_init(3), - i2c_inst_regs_init(4), - i2c_inst_regs_init(5); - - dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, - &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); - - return dce_i2c_hw; -} - -static struct clock_source *dcn32_clock_source_create( - struct dc_context *ctx, - struct dc_bios *bios, - enum clock_source_id id, - const struct dce110_clk_src_regs *regs, - bool dp_clk_src) -{ - struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); - - if (!clk_src) - return NULL; - - if (dcn31_clk_src_construct(clk_src, ctx, bios, id, - regs, &cs_shift, &cs_mask)) { - clk_src->base.dp_clk_src = dp_clk_src; - return &clk_src->base; - } - - kfree(clk_src); - BREAK_TO_DEBUGGER(); - return NULL; -} - -static struct hubbub *dcn32_hubbub_create(struct dc_context *ctx) -{ - int i; - - struct dcn20_hubbub *hubbub2 = kzalloc(sizeof(struct dcn20_hubbub), - GFP_KERNEL); - - if (!hubbub2) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT hubbub_reg - hubbub_reg_init(); - -#undef REG_STRUCT -#define REG_STRUCT vmid_regs - vmid_regs_init(0), - vmid_regs_init(1), - vmid_regs_init(2), - vmid_regs_init(3), - vmid_regs_init(4), - vmid_regs_init(5), - vmid_regs_init(6), - vmid_regs_init(7), - vmid_regs_init(8), - vmid_regs_init(9), - vmid_regs_init(10), - vmid_regs_init(11), - vmid_regs_init(12), - vmid_regs_init(13), - vmid_regs_init(14), - vmid_regs_init(15); - - hubbub32_construct(hubbub2, ctx, - &hubbub_reg, - &hubbub_shift, - &hubbub_mask, - ctx->dc->dml.ip.det_buffer_size_kbytes, - ctx->dc->dml.ip.pixel_chunk_size_kbytes, - ctx->dc->dml.ip.config_return_buffer_size_in_kbytes); - - - for (i = 0; i < res_cap_dcn32.num_vmid; i++) { - struct dcn20_vmid *vmid = &hubbub2->vmid[i]; - - vmid->ctx = ctx; - - vmid->regs = &vmid_regs[i]; - vmid->shifts = &vmid_shifts; - vmid->masks = &vmid_masks; - } - - return &hubbub2->base; -} - -static struct hubp *dcn32_hubp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn20_hubp *hubp2 = - kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); - - if (!hubp2) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT hubp_regs - hubp_regs_init(0), - hubp_regs_init(1), - hubp_regs_init(2), - hubp_regs_init(3); - - if (hubp32_construct(hubp2, ctx, inst, - &hubp_regs[inst], &hubp_shift, &hubp_mask)) - return &hubp2->base; - - BREAK_TO_DEBUGGER(); - kfree(hubp2); - return NULL; -} - -static void dcn32_dpp_destroy(struct dpp **dpp) -{ - kfree(TO_DCN30_DPP(*dpp)); - *dpp = NULL; -} - -static struct dpp *dcn32_dpp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn3_dpp *dpp3 = - kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); - - if (!dpp3) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT dpp_regs - dpp_regs_init(0), - dpp_regs_init(1), - dpp_regs_init(2), - dpp_regs_init(3); - - if (dpp32_construct(dpp3, ctx, inst, - &dpp_regs[inst], &tf_shift, &tf_mask)) - return &dpp3->base; - - BREAK_TO_DEBUGGER(); - kfree(dpp3); - return NULL; -} - -static struct mpc *dcn32_mpc_create( - struct dc_context *ctx, - int num_mpcc, - int num_rmu) -{ - struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), - GFP_KERNEL); - - if (!mpc30) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT mpc_regs - dcn_mpc_regs_init(); - - dcn32_mpc_construct(mpc30, ctx, - &mpc_regs, - &mpc_shift, - &mpc_mask, - num_mpcc, - num_rmu); - - return &mpc30->base; -} - -static struct output_pixel_processor *dcn32_opp_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_opp *opp2 = - kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); - - if (!opp2) { - BREAK_TO_DEBUGGER(); - return NULL; - } - -#undef REG_STRUCT -#define REG_STRUCT opp_regs - opp_regs_init(0), - opp_regs_init(1), - opp_regs_init(2), - opp_regs_init(3); - - dcn20_opp_construct(opp2, ctx, inst, - &opp_regs[inst], &opp_shift, &opp_mask); - return &opp2->base; -} - - -static struct timing_generator *dcn32_timing_generator_create( - struct dc_context *ctx, - uint32_t instance) -{ - struct optc *tgn10 = - kzalloc(sizeof(struct optc), GFP_KERNEL); - - if (!tgn10) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT optc_regs - optc_regs_init(0), - optc_regs_init(1), - optc_regs_init(2), - optc_regs_init(3); - - tgn10->base.inst = instance; - tgn10->base.ctx = ctx; - - tgn10->tg_regs = &optc_regs[instance]; - tgn10->tg_shift = &optc_shift; - tgn10->tg_mask = &optc_mask; - - dcn32_timing_generator_init(tgn10); - - return &tgn10->base; -} - -static const struct encoder_feature_support link_enc_feature = { - .max_hdmi_deep_color = COLOR_DEPTH_121212, - .max_hdmi_pixel_clock = 600000, - .hdmi_ycbcr420_supported = true, - .dp_ycbcr420_supported = true, - .fec_supported = true, - .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_HBR3_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true, - .flags.bits.IS_TPS4_CAPABLE = true -}; - -static struct link_encoder *dcn32_link_encoder_create( - struct dc_context *ctx, - const struct encoder_init_data *enc_init_data) -{ - struct dcn20_link_encoder *enc20 = - kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - - if (!enc20) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT link_enc_aux_regs - aux_regs_init(0), - aux_regs_init(1), - aux_regs_init(2), - aux_regs_init(3), - aux_regs_init(4); - -#undef REG_STRUCT -#define REG_STRUCT link_enc_hpd_regs - hpd_regs_init(0), - hpd_regs_init(1), - hpd_regs_init(2), - hpd_regs_init(3), - hpd_regs_init(4); - -#undef REG_STRUCT -#define REG_STRUCT link_enc_regs - link_regs_init(0, A), - link_regs_init(1, B), - link_regs_init(2, C), - link_regs_init(3, D), - link_regs_init(4, E); - - dcn32_link_encoder_construct(enc20, - enc_init_data, - &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], - &link_enc_aux_regs[enc_init_data->channel - 1], - &link_enc_hpd_regs[enc_init_data->hpd_source], - &le_shift, - &le_mask); - - return &enc20->enc10.base; -} - -struct panel_cntl *dcn32_panel_cntl_create(const struct panel_cntl_init_data *init_data) -{ - struct dcn31_panel_cntl *panel_cntl = - kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL); - - if (!panel_cntl) - return NULL; - - dcn31_panel_cntl_construct(panel_cntl, init_data); - - return &panel_cntl->base; -} - -static void read_dce_straps( - struct dc_context *ctx, - struct resource_straps *straps) -{ - generic_reg_get(ctx, ctx->dcn_reg_offsets[regDC_PINSTRAPS_BASE_IDX] + regDC_PINSTRAPS, - FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); - -} - -static struct audio *dcn32_create_audio( - struct dc_context *ctx, unsigned int inst) -{ - -#undef REG_STRUCT -#define REG_STRUCT audio_regs - audio_regs_init(0), - audio_regs_init(1), - audio_regs_init(2), - audio_regs_init(3), - audio_regs_init(4); - - return dce_audio_create(ctx, inst, - &audio_regs[inst], &audio_shift, &audio_mask); -} - -static struct vpg *dcn32_vpg_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL); - - if (!vpg3) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT vpg_regs - vpg_regs_init(0), - vpg_regs_init(1), - vpg_regs_init(2), - vpg_regs_init(3), - vpg_regs_init(4), - vpg_regs_init(5), - vpg_regs_init(6), - vpg_regs_init(7), - vpg_regs_init(8), - vpg_regs_init(9); - - vpg3_construct(vpg3, ctx, inst, - &vpg_regs[inst], - &vpg_shift, - &vpg_mask); - - return &vpg3->base; -} - -static struct afmt *dcn32_afmt_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL); - - if (!afmt3) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT afmt_regs - afmt_regs_init(0), - afmt_regs_init(1), - afmt_regs_init(2), - afmt_regs_init(3), - afmt_regs_init(4), - afmt_regs_init(5); - - afmt3_construct(afmt3, ctx, inst, - &afmt_regs[inst], - &afmt_shift, - &afmt_mask); - - return &afmt3->base; -} - -static struct apg *dcn31_apg_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); - - if (!apg31) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT apg_regs - apg_regs_init(0), - apg_regs_init(1), - apg_regs_init(2), - apg_regs_init(3); - - apg31_construct(apg31, ctx, inst, - &apg_regs[inst], - &apg_shift, - &apg_mask); - - return &apg31->base; -} - -static struct stream_encoder *dcn32_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dcn10_stream_encoder *enc1; - struct vpg *vpg; - struct afmt *afmt; - int vpg_inst; - int afmt_inst; - - /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ - if (eng_id <= ENGINE_ID_DIGF) { - vpg_inst = eng_id; - afmt_inst = eng_id; - } else - return NULL; - - enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); - vpg = dcn32_vpg_create(ctx, vpg_inst); - afmt = dcn32_afmt_create(ctx, afmt_inst); - - if (!enc1 || !vpg || !afmt) { - kfree(enc1); - kfree(vpg); - kfree(afmt); - return NULL; - } - -#undef REG_STRUCT -#define REG_STRUCT stream_enc_regs - stream_enc_regs_init(0), - stream_enc_regs_init(1), - stream_enc_regs_init(2), - stream_enc_regs_init(3), - stream_enc_regs_init(4); - - dcn32_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, - eng_id, vpg, afmt, - &stream_enc_regs[eng_id], - &se_shift, &se_mask); - - return &enc1->base; -} - -static struct hpo_dp_stream_encoder *dcn32_hpo_dp_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; - struct vpg *vpg; - struct apg *apg; - uint32_t hpo_dp_inst; - uint32_t vpg_inst; - uint32_t apg_inst; - - ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); - hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; - - /* Mapping of VPG register blocks to HPO DP block instance: - * VPG[6] -> HPO_DP[0] - * VPG[7] -> HPO_DP[1] - * VPG[8] -> HPO_DP[2] - * VPG[9] -> HPO_DP[3] - */ - vpg_inst = hpo_dp_inst + 6; - - /* Mapping of APG register blocks to HPO DP block instance: - * APG[0] -> HPO_DP[0] - * APG[1] -> HPO_DP[1] - * APG[2] -> HPO_DP[2] - * APG[3] -> HPO_DP[3] - */ - apg_inst = hpo_dp_inst; - - /* allocate HPO stream encoder and create VPG sub-block */ - hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); - vpg = dcn32_vpg_create(ctx, vpg_inst); - apg = dcn31_apg_create(ctx, apg_inst); - - if (!hpo_dp_enc31 || !vpg || !apg) { - kfree(hpo_dp_enc31); - kfree(vpg); - kfree(apg); - return NULL; - } - -#undef REG_STRUCT -#define REG_STRUCT hpo_dp_stream_enc_regs - hpo_dp_stream_encoder_reg_init(0), - hpo_dp_stream_encoder_reg_init(1), - hpo_dp_stream_encoder_reg_init(2), - hpo_dp_stream_encoder_reg_init(3); - - dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, - hpo_dp_inst, eng_id, vpg, apg, - &hpo_dp_stream_enc_regs[hpo_dp_inst], - &hpo_dp_se_shift, &hpo_dp_se_mask); - - return &hpo_dp_enc31->base; -} - -static struct hpo_dp_link_encoder *dcn32_hpo_dp_link_encoder_create( - uint8_t inst, - struct dc_context *ctx) -{ - struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; - - /* allocate HPO link encoder */ - hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); - -#undef REG_STRUCT -#define REG_STRUCT hpo_dp_link_enc_regs - hpo_dp_link_encoder_reg_init(0), - hpo_dp_link_encoder_reg_init(1); - - hpo_dp_link_encoder32_construct(hpo_dp_enc31, ctx, inst, - &hpo_dp_link_enc_regs[inst], - &hpo_dp_le_shift, &hpo_dp_le_mask); - - return &hpo_dp_enc31->base; -} - -static struct dce_hwseq *dcn32_hwseq_create( - struct dc_context *ctx) -{ - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); - -#undef REG_STRUCT -#define REG_STRUCT hwseq_reg - hwseq_reg_init(); - - if (hws) { - hws->ctx = ctx; - hws->regs = &hwseq_reg; - hws->shifts = &hwseq_shift; - hws->masks = &hwseq_mask; - } - return hws; -} -static const struct resource_create_funcs res_create_funcs = { - .read_dce_straps = read_dce_straps, - .create_audio = dcn32_create_audio, - .create_stream_encoder = dcn32_stream_encoder_create, - .create_hpo_dp_stream_encoder = dcn32_hpo_dp_stream_encoder_create, - .create_hpo_dp_link_encoder = dcn32_hpo_dp_link_encoder_create, - .create_hwseq = dcn32_hwseq_create, -}; - -static void dcn32_resource_destruct(struct dcn32_resource_pool *pool) -{ - unsigned int i; - - for (i = 0; i < pool->base.stream_enc_count; i++) { - if (pool->base.stream_enc[i] != NULL) { - if (pool->base.stream_enc[i]->vpg != NULL) { - kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); - pool->base.stream_enc[i]->vpg = NULL; - } - if (pool->base.stream_enc[i]->afmt != NULL) { - kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); - pool->base.stream_enc[i]->afmt = NULL; - } - kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); - pool->base.stream_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { - if (pool->base.hpo_dp_stream_enc[i] != NULL) { - if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { - kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); - pool->base.hpo_dp_stream_enc[i]->vpg = NULL; - } - if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { - kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); - pool->base.hpo_dp_stream_enc[i]->apg = NULL; - } - kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); - pool->base.hpo_dp_stream_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { - if (pool->base.hpo_dp_link_enc[i] != NULL) { - kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); - pool->base.hpo_dp_link_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_dsc; i++) { - if (pool->base.dscs[i] != NULL) - dcn20_dsc_destroy(&pool->base.dscs[i]); - } - - if (pool->base.mpc != NULL) { - kfree(TO_DCN20_MPC(pool->base.mpc)); - pool->base.mpc = NULL; - } - if (pool->base.hubbub != NULL) { - kfree(TO_DCN20_HUBBUB(pool->base.hubbub)); - pool->base.hubbub = NULL; - } - for (i = 0; i < pool->base.pipe_count; i++) { - if (pool->base.dpps[i] != NULL) - dcn32_dpp_destroy(&pool->base.dpps[i]); - - if (pool->base.ipps[i] != NULL) - pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); - - if (pool->base.hubps[i] != NULL) { - kfree(TO_DCN20_HUBP(pool->base.hubps[i])); - pool->base.hubps[i] = NULL; - } - - if (pool->base.irqs != NULL) { - dal_irq_service_destroy(&pool->base.irqs); - } - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - if (pool->base.engines[i] != NULL) - dce110_engine_destroy(&pool->base.engines[i]); - if (pool->base.hw_i2cs[i] != NULL) { - kfree(pool->base.hw_i2cs[i]); - pool->base.hw_i2cs[i] = NULL; - } - if (pool->base.sw_i2cs[i] != NULL) { - kfree(pool->base.sw_i2cs[i]); - pool->base.sw_i2cs[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - if (pool->base.opps[i] != NULL) - pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - if (pool->base.timing_generators[i] != NULL) { - kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); - pool->base.timing_generators[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_dwb; i++) { - if (pool->base.dwbc[i] != NULL) { - kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); - pool->base.dwbc[i] = NULL; - } - if (pool->base.mcif_wb[i] != NULL) { - kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); - pool->base.mcif_wb[i] = NULL; - } - } - - for (i = 0; i < pool->base.audio_count; i++) { - if (pool->base.audios[i]) - dce_aud_destroy(&pool->base.audios[i]); - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] != NULL) { - dcn20_clock_source_destroy(&pool->base.clock_sources[i]); - pool->base.clock_sources[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { - if (pool->base.mpc_lut[i] != NULL) { - dc_3dlut_func_release(pool->base.mpc_lut[i]); - pool->base.mpc_lut[i] = NULL; - } - if (pool->base.mpc_shaper[i] != NULL) { - dc_transfer_func_release(pool->base.mpc_shaper[i]); - pool->base.mpc_shaper[i] = NULL; - } - } - - if (pool->base.dp_clock_source != NULL) { - dcn20_clock_source_destroy(&pool->base.dp_clock_source); - pool->base.dp_clock_source = NULL; - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - if (pool->base.multiple_abms[i] != NULL) - dce_abm_destroy(&pool->base.multiple_abms[i]); - } - - if (pool->base.psr != NULL) - dmub_psr_destroy(&pool->base.psr); - - if (pool->base.dccg != NULL) - dcn_dccg_destroy(&pool->base.dccg); - - if (pool->base.oem_device != NULL) { - struct dc *dc = pool->base.oem_device->ctx->dc; - - dc->link_srv->destroy_ddc_service(&pool->base.oem_device); - } -} - - -static bool dcn32_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t dwb_count = pool->res_cap->num_dwb; - - for (i = 0; i < dwb_count; i++) { - struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), - GFP_KERNEL); - - if (!dwbc30) { - dm_error("DC: failed to create dwbc30!\n"); - return false; - } - -#undef REG_STRUCT -#define REG_STRUCT dwbc30_regs - dwbc_regs_dcn3_init(0); - - dcn30_dwbc_construct(dwbc30, ctx, - &dwbc30_regs[i], - &dwbc30_shift, - &dwbc30_mask, - i); - - pool->dwbc[i] = &dwbc30->base; - } - return true; -} - -static bool dcn32_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t dwb_count = pool->res_cap->num_dwb; - - for (i = 0; i < dwb_count; i++) { - struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), - GFP_KERNEL); - - if (!mcif_wb30) { - dm_error("DC: failed to create mcif_wb30!\n"); - return false; - } - -#undef REG_STRUCT -#define REG_STRUCT mcif_wb30_regs - mcif_wb_regs_dcn3_init(0); - - dcn32_mmhubbub_construct(mcif_wb30, ctx, - &mcif_wb30_regs[i], - &mcif_wb30_shift, - &mcif_wb30_mask, - i); - - pool->mcif_wb[i] = &mcif_wb30->base; - } - return true; -} - -static struct display_stream_compressor *dcn32_dsc_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_dsc *dsc = - kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); - - if (!dsc) { - BREAK_TO_DEBUGGER(); - return NULL; - } - -#undef REG_STRUCT -#define REG_STRUCT dsc_regs - dsc_regsDCN20_init(0), - dsc_regsDCN20_init(1), - dsc_regsDCN20_init(2), - dsc_regsDCN20_init(3); - - dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); - - dsc->max_image_width = 6016; - - return &dsc->base; -} - -static void dcn32_destroy_resource_pool(struct resource_pool **pool) -{ - struct dcn32_resource_pool *dcn32_pool = TO_DCN32_RES_POOL(*pool); - - dcn32_resource_destruct(dcn32_pool); - kfree(dcn32_pool); - *pool = NULL; -} - -bool dcn32_acquire_post_bldn_3dlut( - struct resource_context *res_ctx, - const struct resource_pool *pool, - int mpcc_id, - struct dc_3dlut **lut, - struct dc_transfer_func **shaper) -{ - bool ret = false; - - ASSERT(*lut == NULL && *shaper == NULL); - *lut = NULL; - *shaper = NULL; - - if (!res_ctx->is_mpc_3dlut_acquired[mpcc_id]) { - *lut = pool->mpc_lut[mpcc_id]; - *shaper = pool->mpc_shaper[mpcc_id]; - res_ctx->is_mpc_3dlut_acquired[mpcc_id] = true; - ret = true; - } - return ret; -} - -bool dcn32_release_post_bldn_3dlut( - struct resource_context *res_ctx, - const struct resource_pool *pool, - struct dc_3dlut **lut, - struct dc_transfer_func **shaper) -{ - int i; - bool ret = false; - - for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) { - if (pool->mpc_lut[i] == *lut && pool->mpc_shaper[i] == *shaper) { - res_ctx->is_mpc_3dlut_acquired[i] = false; - pool->mpc_lut[i]->state.raw = 0; - *lut = NULL; - *shaper = NULL; - ret = true; - break; - } - } - return ret; -} - -static void dcn32_enable_phantom_plane(struct dc *dc, - struct dc_state *context, - struct dc_stream_state *phantom_stream, - unsigned int dc_pipe_idx) -{ - struct dc_plane_state *phantom_plane = NULL; - struct dc_plane_state *prev_phantom_plane = NULL; - struct pipe_ctx *curr_pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx]; - - while (curr_pipe) { - if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state) - phantom_plane = prev_phantom_plane; - else - phantom_plane = dc_create_plane_state(dc); - - memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address)); - memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality, - sizeof(phantom_plane->scaling_quality)); - memcpy(&phantom_plane->src_rect, &curr_pipe->plane_state->src_rect, sizeof(phantom_plane->src_rect)); - memcpy(&phantom_plane->dst_rect, &curr_pipe->plane_state->dst_rect, sizeof(phantom_plane->dst_rect)); - memcpy(&phantom_plane->clip_rect, &curr_pipe->plane_state->clip_rect, sizeof(phantom_plane->clip_rect)); - memcpy(&phantom_plane->plane_size, &curr_pipe->plane_state->plane_size, - sizeof(phantom_plane->plane_size)); - memcpy(&phantom_plane->tiling_info, &curr_pipe->plane_state->tiling_info, - sizeof(phantom_plane->tiling_info)); - memcpy(&phantom_plane->dcc, &curr_pipe->plane_state->dcc, sizeof(phantom_plane->dcc)); - phantom_plane->format = curr_pipe->plane_state->format; - phantom_plane->rotation = curr_pipe->plane_state->rotation; - phantom_plane->visible = curr_pipe->plane_state->visible; - - /* Shadow pipe has small viewport. */ - phantom_plane->clip_rect.y = 0; - phantom_plane->clip_rect.height = phantom_stream->src.height; - - phantom_plane->is_phantom = true; - - dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context); - - curr_pipe = curr_pipe->bottom_pipe; - prev_phantom_plane = phantom_plane; - } -} - -static struct dc_stream_state *dcn32_enable_phantom_stream(struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - unsigned int pipe_cnt, - unsigned int dc_pipe_idx) -{ - struct dc_stream_state *phantom_stream = NULL; - struct pipe_ctx *ref_pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx]; - - phantom_stream = dc_create_stream_for_sink(ref_pipe->stream->sink); - phantom_stream->signal = SIGNAL_TYPE_VIRTUAL; - phantom_stream->dpms_off = true; - phantom_stream->mall_stream_config.type = SUBVP_PHANTOM; - phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream; - ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN; - ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream; - - /* stream has limited viewport and small timing */ - memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing)); - memcpy(&phantom_stream->src, &ref_pipe->stream->src, sizeof(phantom_stream->src)); - memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst)); - DC_FP_START(); - dcn32_set_phantom_stream_timing(dc, context, ref_pipe, phantom_stream, pipes, pipe_cnt, dc_pipe_idx); - DC_FP_END(); - - dc_add_stream_to_ctx(dc, context, phantom_stream); - return phantom_stream; -} - -void dcn32_retain_phantom_pipes(struct dc *dc, struct dc_state *context) -{ - int i; - struct dc_plane_state *phantom_plane = NULL; - struct dc_stream_state *phantom_stream = NULL; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - - if (resource_is_pipe_type(pipe, OTG_MASTER) && - resource_is_pipe_type(pipe, DPP_PIPE) && - pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - phantom_plane = pipe->plane_state; - phantom_stream = pipe->stream; - - dc_plane_state_retain(phantom_plane); - dc_stream_retain(phantom_stream); - } - } -} - -// return true if removed piped from ctx, false otherwise -bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context, bool fast_update) -{ - int i; - bool removed_pipe = false; - struct dc_plane_state *phantom_plane = NULL; - struct dc_stream_state *phantom_stream = NULL; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - // build scaling params for phantom pipes - if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - phantom_plane = pipe->plane_state; - phantom_stream = pipe->stream; - - dc_rem_all_planes_for_stream(dc, pipe->stream, context); - dc_remove_stream_from_ctx(dc, context, pipe->stream); - - /* Ref count is incremented on allocation and also when added to the context. - * Therefore we must call release for the the phantom plane and stream once - * they are removed from the ctx to finally decrement the refcount to 0 to free. - */ - dc_plane_state_release(phantom_plane); - dc_stream_release(phantom_stream); - - removed_pipe = true; - } - - /* For non-full updates, a shallow copy of the current state - * is created. In this case we don't want to erase the current - * state (there can be 2 HIRQL threads, one in flip, and one in - * checkMPO) that can cause a race condition. - * - * This is just a workaround, needs a proper fix. - */ - if (!fast_update) { - // Clear all phantom stream info - if (pipe->stream) { - pipe->stream->mall_stream_config.type = SUBVP_NONE; - pipe->stream->mall_stream_config.paired_stream = NULL; - } - - if (pipe->plane_state) { - pipe->plane_state->is_phantom = false; - } - } - } - return removed_pipe; -} - -/* TODO: Input to this function should indicate which pipe indexes (or streams) - * require a phantom pipe / stream - */ -void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - unsigned int pipe_cnt, - unsigned int index) -{ - struct dc_stream_state *phantom_stream = NULL; - unsigned int i; - - // The index of the DC pipe passed into this function is guarenteed to - // be a valid candidate for SubVP (i.e. has a plane, stream, doesn't - // already have phantom pipe assigned, etc.) by previous checks. - phantom_stream = dcn32_enable_phantom_stream(dc, context, pipes, pipe_cnt, index); - dcn32_enable_phantom_plane(dc, context, phantom_stream, index); - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - - // Build scaling params for phantom pipes which were newly added. - // We determine which phantom pipes were added by comparing with - // the phantom stream. - if (pipe->plane_state && pipe->stream && pipe->stream == phantom_stream && - pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - pipe->stream->use_dynamic_meta = false; - pipe->plane_state->flip_immediate = false; - if (!resource_build_scaling_params(pipe)) { - // Log / remove phantom pipes since failed to build scaling params - } - } - } -} - -static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_validate) -{ - bool out = false; - - BW_VAL_TRACE_SETUP(); - - int vlevel = 0; - int pipe_cnt = 0; - display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); - struct mall_temp_config mall_temp_config; - - /* To handle Freesync properly, setting FreeSync DML parameters - * to its default state for the first stage of validation - */ - context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false; - context->bw_ctx.dml.soc.dram_clock_change_requirement_final = true; - - DC_LOGGER_INIT(dc->ctx->logger); - - /* For fast validation, there are situations where a shallow copy of - * of the dc->current_state is created for the validation. In this case - * we want to save and restore the mall config because we always - * teardown subvp at the beginning of validation (and don't attempt - * to add it back if it's fast validation). If we don't restore the - * subvp config in cases of fast validation + shallow copy of the - * dc->current_state, the dc->current_state will have a partially - * removed subvp state when we did not intend to remove it. - */ - if (fast_validate) { - memset(&mall_temp_config, 0, sizeof(mall_temp_config)); - dcn32_save_mall_state(dc, context, &mall_temp_config); - } - - BW_VAL_TRACE_COUNT(); - - DC_FP_START(); - out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); - DC_FP_END(); - - if (fast_validate) - dcn32_restore_mall_state(dc, context, &mall_temp_config); - - if (pipe_cnt == 0) - goto validate_out; - - if (!out) - goto validate_fail; - - BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - - if (fast_validate) { - BW_VAL_TRACE_SKIP(fast); - goto validate_out; - } - - dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); - - dcn32_override_min_req_memclk(dc, context); - dcn32_override_min_req_dcfclk(dc, context); - - BW_VAL_TRACE_END_WATERMARKS(); - - goto validate_out; - -validate_fail: - DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", - dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); - - BW_VAL_TRACE_SKIP(fail); - out = false; - -validate_out: - kfree(pipes); - - BW_VAL_TRACE_FINISH(); - - return out; -} - -bool dcn32_validate_bandwidth(struct dc *dc, - struct dc_state *context, - bool fast_validate) -{ - bool out = false; - - if (dc->debug.using_dml2) - out = dml2_validate(dc, context, fast_validate); - else - out = dml1_validate(dc, context, fast_validate); - return out; -} - -int dcn32_populate_dml_pipes_from_context( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - bool fast_validate) -{ - int i, pipe_cnt; - struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe = NULL; - bool subvp_in_use = false; - struct dc_crtc_timing *timing; - - dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); - - for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { - - if (!res_ctx->pipe_ctx[i].stream) - continue; - pipe = &res_ctx->pipe_ctx[i]; - timing = &pipe->stream->timing; - - pipes[pipe_cnt].pipe.src.gpuvm = true; - DC_FP_START(); - dcn32_zero_pipe_dcc_fraction(pipes, pipe_cnt); - DC_FP_END(); - pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; - if (dc->config.enable_windowed_mpo_odm && - dc->debug.enable_single_display_2to1_odm_policy) { - switch (resource_get_odm_slice_count(pipe)) { - case 2: - pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; - break; - case 4: - pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1; - break; - default: - pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; - } - } else { - pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; - } - pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet - pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; - pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19; - - /* Only populate DML input with subvp info for full updates. - * This is just a workaround -- needs a proper fix. - */ - if (!fast_validate) { - switch (pipe->stream->mall_stream_config.type) { - case SUBVP_MAIN: - pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_sub_viewport; - subvp_in_use = true; - break; - case SUBVP_PHANTOM: - pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_phantom_pipe; - pipes[pipe_cnt].pipe.src.use_mall_for_static_screen = dm_use_mall_static_screen_disable; - // Disallow unbounded req for SubVP according to DCHUB programming guide - pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; - break; - case SUBVP_NONE: - pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_disable; - pipes[pipe_cnt].pipe.src.use_mall_for_static_screen = dm_use_mall_static_screen_disable; - break; - default: - break; - } - } - - pipes[pipe_cnt].dout.dsc_input_bpc = 0; - if (pipes[pipe_cnt].dout.dsc_enable) { - switch (timing->display_color_depth) { - case COLOR_DEPTH_888: - pipes[pipe_cnt].dout.dsc_input_bpc = 8; - break; - case COLOR_DEPTH_101010: - pipes[pipe_cnt].dout.dsc_input_bpc = 10; - break; - case COLOR_DEPTH_121212: - pipes[pipe_cnt].dout.dsc_input_bpc = 12; - break; - default: - ASSERT(0); - break; - } - } - - - pipe_cnt++; - } - - /* For DET allocation, we don't want to use DML policy (not optimal for utilizing all - * the DET available for each pipe). Use the DET override input to maintain our driver - * policy. - */ - dcn32_set_det_allocations(dc, context, pipes); - - // In general cases we want to keep the dram clock change requirement - // (prefer configs that support MCLK switch). Only override to false - // for SubVP - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || subvp_in_use) - context->bw_ctx.dml.soc.dram_clock_change_requirement_final = false; - else - context->bw_ctx.dml.soc.dram_clock_change_requirement_final = true; - - return pipe_cnt; -} - -static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn20_get_dcc_compression_cap, - .get_subvp_en = dcn32_subvp_in_use, -}; - -void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int pipe_cnt, - int vlevel) -{ - DC_FP_START(); - dcn32_calculate_wm_and_dlg_fpu(dc, context, pipes, pipe_cnt, vlevel); - DC_FP_END(); -} - -static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) -{ - DC_FP_START(); - dcn32_update_bw_bounding_box_fpu(dc, bw_params); - if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) - dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); - DC_FP_END(); -} - -static struct resource_funcs dcn32_res_pool_funcs = { - .destroy = dcn32_destroy_resource_pool, - .link_enc_create = dcn32_link_encoder_create, - .link_enc_create_minimal = NULL, - .panel_cntl_create = dcn32_panel_cntl_create, - .validate_bandwidth = dcn32_validate_bandwidth, - .calculate_wm_and_dlg = dcn32_calculate_wm_and_dlg, - .populate_dml_pipes = dcn32_populate_dml_pipes_from_context, - .acquire_free_pipe_as_secondary_dpp_pipe = dcn32_acquire_free_pipe_as_secondary_dpp_pipe, - .acquire_free_pipe_as_secondary_opp_head = dcn32_acquire_free_pipe_as_secondary_opp_head, - .release_pipe = dcn20_release_pipe, - .add_stream_to_ctx = dcn30_add_stream_to_ctx, - .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, - .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, - .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, - .set_mcif_arb_params = dcn30_set_mcif_arb_params, - .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, - .acquire_post_bldn_3dlut = dcn32_acquire_post_bldn_3dlut, - .release_post_bldn_3dlut = dcn32_release_post_bldn_3dlut, - .update_bw_bounding_box = dcn32_update_bw_bounding_box, - .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, - .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, - .add_phantom_pipes = dcn32_add_phantom_pipes, - .remove_phantom_pipes = dcn32_remove_phantom_pipes, - .retain_phantom_pipes = dcn32_retain_phantom_pipes, - .save_mall_state = dcn32_save_mall_state, - .restore_mall_state = dcn32_restore_mall_state, - .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params, -}; - -static uint32_t read_pipe_fuses(struct dc_context *ctx) -{ - uint32_t value = REG_READ(CC_DC_PIPE_DIS); - /* DCN32 support max 4 pipes */ - value = value & 0xf; - return value; -} - - -static bool dcn32_resource_construct( - uint8_t num_virtual_links, - struct dc *dc, - struct dcn32_resource_pool *pool) -{ - int i, j; - struct dc_context *ctx = dc->ctx; - struct irq_service_init_data init_data; - struct ddc_service_init_data ddc_init_data = {0}; - uint32_t pipe_fuses = 0; - uint32_t num_pipes = 4; - -#undef REG_STRUCT -#define REG_STRUCT bios_regs - bios_regs_init(); - -#undef REG_STRUCT -#define REG_STRUCT clk_src_regs - clk_src_regs_init(0, A), - clk_src_regs_init(1, B), - clk_src_regs_init(2, C), - clk_src_regs_init(3, D), - clk_src_regs_init(4, E); - -#undef REG_STRUCT -#define REG_STRUCT abm_regs - abm_regs_init(0), - abm_regs_init(1), - abm_regs_init(2), - abm_regs_init(3); - -#undef REG_STRUCT -#define REG_STRUCT dccg_regs - dccg_regs_init(); - - DC_FP_START(); - - ctx->dc_bios->regs = &bios_regs; - - pool->base.res_cap = &res_cap_dcn32; - /* max number of pipes for ASIC before checking for pipe fuses */ - num_pipes = pool->base.res_cap->num_timing_generator; - pipe_fuses = read_pipe_fuses(ctx); - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) - if (pipe_fuses & 1 << i) - num_pipes--; - - if (pipe_fuses & 1) - ASSERT(0); //Unexpected - Pipe 0 should always be fully functional! - - if (pipe_fuses & CC_DC_PIPE_DIS__DC_FULL_DIS_MASK) - ASSERT(0); //Entire DCN is harvested! - - /* within dml lib, initial value is hard coded, if ASIC pipe is fused, the - * value will be changed, update max_num_dpp and max_num_otg for dml. - */ - dcn3_2_ip.max_num_dpp = num_pipes; - dcn3_2_ip.max_num_otg = num_pipes; - - pool->base.funcs = &dcn32_res_pool_funcs; - - /************************************************* - * Resource + asic cap harcoding * - *************************************************/ - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - pool->base.timing_generator_count = num_pipes; - pool->base.pipe_count = num_pipes; - pool->base.mpcc_count = num_pipes; - dc->caps.max_downscale_ratio = 600; - dc->caps.i2c_speed_in_khz = 100; - dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/ - /* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/ - dc->caps.max_cursor_size = 64; - dc->caps.min_horizontal_blanking_period = 80; - dc->caps.dmdata_alloc_size = 2048; - dc->caps.mall_size_per_mem_channel = 4; - dc->caps.mall_size_total = 0; - dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; - - dc->caps.cache_line_size = 64; - dc->caps.cache_num_ways = 16; - - /* Calculate the available MALL space */ - dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall( - dc, dc->ctx->dc_bios->vram_info.num_chans) * - dc->caps.mall_size_per_mem_channel * 1024 * 1024; - dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes; - - dc->caps.subvp_fw_processing_delay_us = 15; - dc->caps.subvp_drr_max_vblank_margin_us = 40; - dc->caps.subvp_prefetch_end_to_mall_start_us = 15; - dc->caps.subvp_swath_height_margin_lines = 16; - dc->caps.subvp_pstate_allow_width_us = 20; - dc->caps.subvp_vertical_int_margin_us = 30; - dc->caps.subvp_drr_vblank_start_margin_us = 100; // 100us margin - - dc->caps.max_slave_planes = 2; - dc->caps.max_slave_yuv_planes = 2; - dc->caps.max_slave_rgb_planes = 2; - dc->caps.post_blend_color_processing = true; - dc->caps.force_dp_tps4_for_cp2520 = true; - if (dc->config.forceHBR2CP2520) - dc->caps.force_dp_tps4_for_cp2520 = false; - dc->caps.dp_hpo = true; - dc->caps.dp_hdmi21_pcon_support = true; - dc->caps.edp_dsc_support = true; - dc->caps.extended_aux_timeout_support = true; - dc->caps.dmcub_support = true; - dc->caps.seamless_odm = true; - dc->caps.max_v_total = (1 << 15) - 1; - - /* Color pipeline capabilities */ - dc->caps.color.dpp.dcn_arch = 1; - dc->caps.color.dpp.input_lut_shared = 0; - dc->caps.color.dpp.icsc = 1; - dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr - dc->caps.color.dpp.dgam_rom_caps.srgb = 1; - dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; - dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; - dc->caps.color.dpp.dgam_rom_caps.pq = 1; - dc->caps.color.dpp.dgam_rom_caps.hlg = 1; - dc->caps.color.dpp.post_csc = 1; - dc->caps.color.dpp.gamma_corr = 1; - dc->caps.color.dpp.dgam_rom_for_yuv = 0; - - dc->caps.color.dpp.hw_3d_lut = 1; - dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 - // no OGAM ROM on DCN2 and later ASICs - dc->caps.color.dpp.ogam_rom_caps.srgb = 0; - dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; - dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.dpp.ogam_rom_caps.pq = 0; - dc->caps.color.dpp.ogam_rom_caps.hlg = 0; - dc->caps.color.dpp.ocsc = 0; - - dc->caps.color.mpc.gamut_remap = 1; - dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //4, configurable to be before or after BLND in MPCC - dc->caps.color.mpc.ogam_ram = 1; - dc->caps.color.mpc.ogam_rom_caps.srgb = 0; - dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; - dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.mpc.ogam_rom_caps.pq = 0; - dc->caps.color.mpc.ogam_rom_caps.hlg = 0; - dc->caps.color.mpc.ocsc = 1; - - /* Use pipe context based otg sync logic */ - dc->config.use_pipe_ctx_sync_logic = true; - - dc->config.dc_mode_clk_limit_support = true; - /* read VBIOS LTTPR caps */ - { - if (ctx->dc_bios->funcs->get_lttpr_caps) { - enum bp_result bp_query_result; - uint8_t is_vbios_lttpr_enable = 0; - - bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); - dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; - } - - /* interop bit is implicit */ - { - dc->caps.vbios_lttpr_aware = true; - } - } - - if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) - dc->debug = debug_defaults_drv; - - // Init the vm_helper - if (dc->vm_helper) - vm_helper_init(dc->vm_helper, 16); - - /************************************************* - * Create resources * - *************************************************/ - - /* Clock Sources for Pixel Clock*/ - pool->base.clock_sources[DCN32_CLK_SRC_PLL0] = - dcn32_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL0, - &clk_src_regs[0], false); - pool->base.clock_sources[DCN32_CLK_SRC_PLL1] = - dcn32_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL1, - &clk_src_regs[1], false); - pool->base.clock_sources[DCN32_CLK_SRC_PLL2] = - dcn32_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL2, - &clk_src_regs[2], false); - pool->base.clock_sources[DCN32_CLK_SRC_PLL3] = - dcn32_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL3, - &clk_src_regs[3], false); - pool->base.clock_sources[DCN32_CLK_SRC_PLL4] = - dcn32_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL4, - &clk_src_regs[4], false); - - pool->base.clk_src_count = DCN32_CLK_SRC_TOTAL; - - /* todo: not reuse phy_pll registers */ - pool->base.dp_clock_source = - dcn32_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_ID_DP_DTO, - &clk_src_regs[0], true); - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - - /* DCCG */ - pool->base.dccg = dccg32_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); - if (pool->base.dccg == NULL) { - dm_error("DC: failed to create dccg!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* DML */ - dml_init_instance(&dc->dml, &dcn3_2_soc, &dcn3_2_ip, DML_PROJECT_DCN32); - - /* IRQ Service */ - init_data.ctx = dc->ctx; - pool->base.irqs = dal_irq_service_dcn32_create(&init_data); - if (!pool->base.irqs) - goto create_fail; - - /* HUBBUB */ - pool->base.hubbub = dcn32_hubbub_create(ctx); - if (pool->base.hubbub == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create hubbub!\n"); - goto create_fail; - } - - /* HUBPs, DPPs, OPPs, TGs, ABMs */ - for (i = 0, j = 0; i < pool->base.res_cap->num_timing_generator; i++) { - - /* if pipe is disabled, skip instance of HW pipe, - * i.e, skip ASIC register instance - */ - if (pipe_fuses & 1 << i) - continue; - - /* HUBPs */ - pool->base.hubps[j] = dcn32_hubp_create(ctx, i); - if (pool->base.hubps[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create hubps!\n"); - goto create_fail; - } - - /* DPPs */ - pool->base.dpps[j] = dcn32_dpp_create(ctx, i); - if (pool->base.dpps[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create dpps!\n"); - goto create_fail; - } - - /* OPPs */ - pool->base.opps[j] = dcn32_opp_create(ctx, i); - if (pool->base.opps[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create output pixel processor!\n"); - goto create_fail; - } - - /* TGs */ - pool->base.timing_generators[j] = dcn32_timing_generator_create( - ctx, i); - if (pool->base.timing_generators[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto create_fail; - } - - /* ABMs */ - pool->base.multiple_abms[j] = dmub_abm_create(ctx, - &abm_regs[i], - &abm_shift, - &abm_mask); - if (pool->base.multiple_abms[j] == NULL) { - dm_error("DC: failed to create abm for pipe %d!\n", i); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* index for resource pool arrays for next valid pipe */ - j++; - } - - /* PSR */ - pool->base.psr = dmub_psr_create(ctx); - if (pool->base.psr == NULL) { - dm_error("DC: failed to create psr obj!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* MPCCs */ - pool->base.mpc = dcn32_mpc_create(ctx, pool->base.res_cap->num_timing_generator, pool->base.res_cap->num_mpc_3dlut); - if (pool->base.mpc == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mpc!\n"); - goto create_fail; - } - - /* DSCs */ - for (i = 0; i < pool->base.res_cap->num_dsc; i++) { - pool->base.dscs[i] = dcn32_dsc_create(ctx, i); - if (pool->base.dscs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create display stream compressor %d!\n", i); - goto create_fail; - } - } - - /* DWB */ - if (!dcn32_dwbc_create(ctx, &pool->base)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create dwbc!\n"); - goto create_fail; - } - - /* MMHUBBUB */ - if (!dcn32_mmhubbub_create(ctx, &pool->base)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mcif_wb!\n"); - goto create_fail; - } - - /* AUX and I2C */ - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - pool->base.engines[i] = dcn32_aux_engine_create(ctx, i); - if (pool->base.engines[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create aux engine!!\n"); - goto create_fail; - } - pool->base.hw_i2cs[i] = dcn32_i2c_hw_create(ctx, i); - if (pool->base.hw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create hw i2c!!\n"); - goto create_fail; - } - pool->base.sw_i2cs[i] = NULL; - } - - /* Audio, HWSeq, Stream Encoders including HPO and virtual, MPC 3D LUTs */ - if (!resource_construct(num_virtual_links, dc, &pool->base, - &res_create_funcs)) - goto create_fail; - - /* HW Sequencer init functions and Plane caps */ - dcn32_hw_sequencer_init_functions(dc); - - dc->caps.max_planes = pool->base.pipe_count; - - for (i = 0; i < dc->caps.max_planes; ++i) - dc->caps.planes[i] = plane_cap; - - dc->cap_funcs = cap_funcs; - - if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { - ddc_init_data.ctx = dc->ctx; - ddc_init_data.link = NULL; - ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; - ddc_init_data.id.enum_id = 0; - ddc_init_data.id.type = OBJECT_TYPE_GENERIC; - pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data); - } else { - pool->base.oem_device = NULL; - } - - dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = false; - dc->dml2_options.use_native_soc_bb_construction = true; - dc->dml2_options.minimize_dispclk_using_odm = true; - - dc->dml2_options.callbacks.dc = dc; - dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; - dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; - dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; - dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; - dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; - dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; - dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; - dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; - - dc->dml2_options.svp_pstate.callbacks.dc = dc; - dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context; - dc->dml2_options.svp_pstate.callbacks.add_stream_to_ctx = &dc_add_stream_to_ctx; - dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params; - dc->dml2_options.svp_pstate.callbacks.create_plane = &dc_create_plane_state; - dc->dml2_options.svp_pstate.callbacks.remove_plane_from_context = &dc_remove_plane_from_context; - dc->dml2_options.svp_pstate.callbacks.remove_stream_from_ctx = &dc_remove_stream_from_ctx; - dc->dml2_options.svp_pstate.callbacks.create_stream_for_sink = &dc_create_stream_for_sink; - dc->dml2_options.svp_pstate.callbacks.plane_state_release = &dc_plane_state_release; - dc->dml2_options.svp_pstate.callbacks.stream_release = &dc_stream_release; - dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc; - - dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us; - dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us; - dc->dml2_options.svp_pstate.subvp_pstate_allow_width_us = dc->caps.subvp_pstate_allow_width_us; - dc->dml2_options.svp_pstate.subvp_swath_height_margin_lines = dc->caps.subvp_swath_height_margin_lines; - - dc->dml2_options.svp_pstate.force_disable_subvp = dc->debug.force_disable_subvp; - dc->dml2_options.svp_pstate.force_enable_subvp = dc->debug.force_subvp_mclk_switch; - - dc->dml2_options.mall_cfg.cache_line_size_bytes = dc->caps.cache_line_size; - dc->dml2_options.mall_cfg.cache_num_ways = dc->caps.cache_num_ways; - dc->dml2_options.mall_cfg.max_cab_allocation_bytes = dc->caps.max_cab_allocation_bytes; - dc->dml2_options.mall_cfg.mblk_height_4bpe_pixels = DCN3_2_MBLK_HEIGHT_4BPE; - dc->dml2_options.mall_cfg.mblk_height_8bpe_pixels = DCN3_2_MBLK_HEIGHT_8BPE; - dc->dml2_options.mall_cfg.mblk_size_bytes = DCN3_2_MALL_MBLK_SIZE_BYTES; - dc->dml2_options.mall_cfg.mblk_width_pixels = DCN3_2_MBLK_WIDTH; - - dc->dml2_options.max_segments_per_hubp = 18; - dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE; - dc->dml2_options.map_dc_pipes_with_callbacks = true; - - if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0)) - dc->config.sdpif_request_limit_words_per_umc = 16; - - DC_FP_END(); - - return true; - -create_fail: - - DC_FP_END(); - - dcn32_resource_destruct(pool); - - return false; -} - -struct resource_pool *dcn32_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc) -{ - struct dcn32_resource_pool *pool = - kzalloc(sizeof(struct dcn32_resource_pool), GFP_KERNEL); - - if (!pool) - return NULL; - - if (dcn32_resource_construct(init_data->num_virtual_links, dc, pool)) - return &pool->base; - - BREAK_TO_DEBUGGER(); - kfree(pool); - return NULL; -} - -/* - * Find the most optimal free pipe from res_ctx, which could be used as a - * secondary dpp pipe for input opp head pipe. - * - * a free pipe - a pipe in input res_ctx not yet used for any streams or - * planes. - * secondary dpp pipe - a pipe gets inserted to a head OPP pipe's MPC blending - * tree. This is typical used for rendering MPO planes or additional offset - * areas in MPCC combine. - * - * Hardware Transition Minimization Algorithm for Finding a Secondary DPP Pipe - * ------------------------------------------------------------------------- - * - * PROBLEM: - * - * 1. There is a hardware limitation that a secondary DPP pipe cannot be - * transferred from one MPC blending tree to the other in a single frame. - * Otherwise it could cause glitches on the screen. - * - * For instance, we cannot transition from state 1 to state 2 in one frame. This - * is because PIPE1 is transferred from PIPE0's MPC blending tree over to - * PIPE2's MPC blending tree, which is not supported by hardware. - * To support this transition we need to first remove PIPE1 from PIPE0's MPC - * blending tree in one frame and then insert PIPE1 to PIPE2's MPC blending tree - * in the next frame. This is not optimal as it will delay the flip for two - * frames. - * - * State 1: - * PIPE0 -- secondary DPP pipe --> (PIPE1) - * PIPE2 -- secondary DPP pipe --> NONE - * - * State 2: - * PIPE0 -- secondary DPP pipe --> NONE - * PIPE2 -- secondary DPP pipe --> (PIPE1) - * - * 2. We want to in general minimize the unnecessary changes in pipe topology. - * If a pipe is already added in current blending tree and there are no changes - * to plane topology, we don't want to swap it with another free pipe - * unnecessarily in every update. Powering up and down a pipe would require a - * full update which delays the flip for 1 frame. If we use the original pipe - * we don't have to toggle its power. So we can flip faster. - */ -static int find_optimal_free_pipe_as_secondary_dpp_pipe( - const struct resource_context *cur_res_ctx, - struct resource_context *new_res_ctx, - const struct resource_pool *pool, - const struct pipe_ctx *new_opp_head) -{ - const struct pipe_ctx *cur_opp_head; - int free_pipe_idx; - - cur_opp_head = &cur_res_ctx->pipe_ctx[new_opp_head->pipe_idx]; - free_pipe_idx = resource_find_free_pipe_used_in_cur_mpc_blending_tree( - cur_res_ctx, new_res_ctx, cur_opp_head); - - /* Up until here if we have not found a free secondary pipe, we will - * need to wait for at least one frame to complete the transition - * sequence. - */ - if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) - free_pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx( - cur_res_ctx, new_res_ctx, pool); - - /* Up until here if we have not found a free secondary pipe, we will - * need to wait for at least two frames to complete the transition - * sequence. It really doesn't matter which pipe we decide take from - * current enabled pipes. It won't save our frame time when we swap only - * one pipe or more pipes. - */ - if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) - free_pipe_idx = resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine( - cur_res_ctx, new_res_ctx, pool); - - if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) - free_pipe_idx = resource_find_any_free_pipe(new_res_ctx, pool); - - return free_pipe_idx; -} - -static struct pipe_ctx *find_idle_secondary_pipe_check_mpo( - struct resource_context *res_ctx, - const struct resource_pool *pool, - const struct pipe_ctx *primary_pipe) -{ - int i; - struct pipe_ctx *secondary_pipe = NULL; - struct pipe_ctx *next_odm_mpo_pipe = NULL; - int primary_index, preferred_pipe_idx; - struct pipe_ctx *old_primary_pipe = NULL; - - /* - * Modified from find_idle_secondary_pipe - * With windowed MPO and ODM, we want to avoid the case where we want a - * free pipe for the left side but the free pipe is being used on the - * right side. - * Add check on current_state if the primary_pipe is the left side, - * to check the right side ( primary_pipe->next_odm_pipe ) to see if - * it is using a pipe for MPO ( primary_pipe->next_odm_pipe->bottom_pipe ) - * - If so, then don't use this pipe - * EXCEPTION - 3 plane ( 2 MPO plane ) case - * - in this case, the primary pipe has already gotten a free pipe for the - * MPO window in the left - * - when it tries to get a free pipe for the MPO window on the right, - * it will see that it is already assigned to the right side - * ( primary_pipe->next_odm_pipe ). But in this case, we want this - * free pipe, since it will be for the right side. So add an - * additional condition, that skipping the free pipe on the right only - * applies if the primary pipe has no bottom pipe currently assigned - */ - if (primary_pipe) { - primary_index = primary_pipe->pipe_idx; - old_primary_pipe = &primary_pipe->stream->ctx->dc->current_state->res_ctx.pipe_ctx[primary_index]; - if ((old_primary_pipe->next_odm_pipe) && (old_primary_pipe->next_odm_pipe->bottom_pipe) - && (!primary_pipe->bottom_pipe)) - next_odm_mpo_pipe = old_primary_pipe->next_odm_pipe->bottom_pipe; - - preferred_pipe_idx = (pool->pipe_count - 1) - primary_pipe->pipe_idx; - if ((res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) && - !(next_odm_mpo_pipe && next_odm_mpo_pipe->pipe_idx == preferred_pipe_idx)) { - secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; - secondary_pipe->pipe_idx = preferred_pipe_idx; - } - } - - /* - * search backwards for the second pipe to keep pipe - * assignment more consistent - */ - if (!secondary_pipe) - for (i = pool->pipe_count - 1; i >= 0; i--) { - if ((res_ctx->pipe_ctx[i].stream == NULL) && - !(next_odm_mpo_pipe && next_odm_mpo_pipe->pipe_idx == i)) { - secondary_pipe = &res_ctx->pipe_ctx[i]; - secondary_pipe->pipe_idx = i; - break; - } - } - - return secondary_pipe; -} - -static struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer( - struct dc_state *state, - const struct resource_pool *pool, - struct dc_stream_state *stream, - const struct pipe_ctx *head_pipe) -{ - struct resource_context *res_ctx = &state->res_ctx; - struct pipe_ctx *idle_pipe, *pipe; - struct resource_context *old_ctx = &stream->ctx->dc->current_state->res_ctx; - int head_index; - - if (!head_pipe) - ASSERT(0); - - /* - * Modified from dcn20_acquire_idle_pipe_for_layer - * Check if head_pipe in old_context already has bottom_pipe allocated. - * - If so, check if that pipe is available in the current context. - * -- If so, reuse pipe from old_context - */ - head_index = head_pipe->pipe_idx; - pipe = &old_ctx->pipe_ctx[head_index]; - if (pipe->bottom_pipe && res_ctx->pipe_ctx[pipe->bottom_pipe->pipe_idx].stream == NULL) { - idle_pipe = &res_ctx->pipe_ctx[pipe->bottom_pipe->pipe_idx]; - idle_pipe->pipe_idx = pipe->bottom_pipe->pipe_idx; - } else { - idle_pipe = find_idle_secondary_pipe_check_mpo(res_ctx, pool, head_pipe); - if (!idle_pipe) - return NULL; - } - - idle_pipe->stream = head_pipe->stream; - idle_pipe->stream_res.tg = head_pipe->stream_res.tg; - idle_pipe->stream_res.opp = head_pipe->stream_res.opp; - - idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; - idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx]; - idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx]; - idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst; - - return idle_pipe; -} - -static int find_optimal_free_pipe_as_secondary_opp_head( - const struct resource_context *cur_res_ctx, - struct resource_context *new_res_ctx, - const struct resource_pool *pool, - const struct pipe_ctx *new_otg_master) -{ - const struct pipe_ctx *cur_otg_master; - int free_pipe_idx; - - cur_otg_master = &cur_res_ctx->pipe_ctx[new_otg_master->pipe_idx]; - free_pipe_idx = resource_find_free_pipe_used_as_sec_opp_head_by_cur_otg_master( - cur_res_ctx, new_res_ctx, cur_otg_master); - - /* Up until here if we have not found a free secondary pipe, we will - * need to wait for at least one frame to complete the transition - * sequence. - */ - if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) - free_pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx( - cur_res_ctx, new_res_ctx, pool); - - if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) - free_pipe_idx = resource_find_any_free_pipe(new_res_ctx, pool); - - return free_pipe_idx; -} - -struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe( - const struct dc_state *cur_ctx, - struct dc_state *new_ctx, - const struct resource_pool *pool, - const struct pipe_ctx *opp_head_pipe) -{ - - int free_pipe_idx; - struct pipe_ctx *free_pipe; - - if (!opp_head_pipe->stream->ctx->dc->config.enable_windowed_mpo_odm) - return dcn32_acquire_idle_pipe_for_head_pipe_in_layer( - new_ctx, pool, opp_head_pipe->stream, opp_head_pipe); - - free_pipe_idx = find_optimal_free_pipe_as_secondary_dpp_pipe( - &cur_ctx->res_ctx, &new_ctx->res_ctx, - pool, opp_head_pipe); - if (free_pipe_idx >= 0) { - free_pipe = &new_ctx->res_ctx.pipe_ctx[free_pipe_idx]; - free_pipe->pipe_idx = free_pipe_idx; - free_pipe->stream = opp_head_pipe->stream; - free_pipe->stream_res.tg = opp_head_pipe->stream_res.tg; - free_pipe->stream_res.opp = opp_head_pipe->stream_res.opp; - - free_pipe->plane_res.hubp = pool->hubps[free_pipe->pipe_idx]; - free_pipe->plane_res.ipp = pool->ipps[free_pipe->pipe_idx]; - free_pipe->plane_res.dpp = pool->dpps[free_pipe->pipe_idx]; - free_pipe->plane_res.mpcc_inst = - pool->dpps[free_pipe->pipe_idx]->inst; - } else { - ASSERT(opp_head_pipe); - free_pipe = NULL; - } - - return free_pipe; -} - -struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_opp_head( - const struct dc_state *cur_ctx, - struct dc_state *new_ctx, - const struct resource_pool *pool, - const struct pipe_ctx *otg_master) -{ - int free_pipe_idx = find_optimal_free_pipe_as_secondary_opp_head( - &cur_ctx->res_ctx, &new_ctx->res_ctx, - pool, otg_master); - struct pipe_ctx *free_pipe; - - if (free_pipe_idx >= 0) { - free_pipe = &new_ctx->res_ctx.pipe_ctx[free_pipe_idx]; - free_pipe->pipe_idx = free_pipe_idx; - free_pipe->stream = otg_master->stream; - free_pipe->stream_res.tg = otg_master->stream_res.tg; - free_pipe->stream_res.dsc = NULL; - free_pipe->stream_res.opp = pool->opps[free_pipe_idx]; - free_pipe->plane_res.mi = pool->mis[free_pipe_idx]; - free_pipe->plane_res.hubp = pool->hubps[free_pipe_idx]; - free_pipe->plane_res.ipp = pool->ipps[free_pipe_idx]; - free_pipe->plane_res.xfm = pool->transforms[free_pipe_idx]; - free_pipe->plane_res.dpp = pool->dpps[free_pipe_idx]; - free_pipe->plane_res.mpcc_inst = pool->dpps[free_pipe_idx]->inst; - if (free_pipe->stream->timing.flags.DSC == 1) { - dcn20_acquire_dsc(free_pipe->stream->ctx->dc, - &new_ctx->res_ctx, - &free_pipe->stream_res.dsc, - free_pipe_idx); - ASSERT(free_pipe->stream_res.dsc); - if (free_pipe->stream_res.dsc == NULL) { - memset(free_pipe, 0, sizeof(*free_pipe)); - free_pipe = NULL; - } - } - } else { - ASSERT(otg_master); - free_pipe = NULL; - } - - return free_pipe; -} - -unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans) -{ - /* - * DCN32 and DCN321 SKUs may have different sizes for MALL - * but we may not be able to access all the MALL space. - * If the num_chans is power of 2, then we can access all - * of the available MALL space. Otherwise, we can only - * access: - * - * max_cab_size_in_bytes = total_cache_size_in_bytes * - * ((2^floor(log2(num_chans)))/num_chans) - * - * Calculating the MALL sizes for all available SKUs, we - * have come up with the follow simplified check. - * - we have max_chans which provides the max MALL size. - * Each chans supports 4MB of MALL so: - * - * total_cache_size_in_bytes = max_chans * 4 MB - * - * - we have avail_chans which shows the number of channels - * we can use if we can't access the entire MALL space. - * It is generally half of max_chans - * - so we use the following checks: - * - * if (num_chans == max_chans), return max_chans - * if (num_chans < max_chans), return avail_chans - * - * - exception is GC_11_0_0 where we can't access max_chans, - * so we define max_avail_chans as the maximum available - * MALL space - * - */ - int gc_11_0_0_max_chans = 48; - int gc_11_0_0_max_avail_chans = 32; - int gc_11_0_0_avail_chans = 16; - int gc_11_0_3_max_chans = 16; - int gc_11_0_3_avail_chans = 8; - int gc_11_0_2_max_chans = 8; - int gc_11_0_2_avail_chans = 4; - - if (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev)) { - return (num_chans == gc_11_0_0_max_chans) ? - gc_11_0_0_max_avail_chans : gc_11_0_0_avail_chans; - } else if (ASICREV_IS_GC_11_0_2(dc->ctx->asic_id.hw_internal_rev)) { - return (num_chans == gc_11_0_2_max_chans) ? - gc_11_0_2_max_chans : gc_11_0_2_avail_chans; - } else { // if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev)) { - return (num_chans == gc_11_0_3_max_chans) ? - gc_11_0_3_max_chans : gc_11_0_3_avail_chans; - } -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h deleted file mode 100644 index 351c8a2843..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h +++ /dev/null @@ -1,1268 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef _DCN32_RESOURCE_H_ -#define _DCN32_RESOURCE_H_ - -#include "core_types.h" - -#define DCN3_2_DEFAULT_DET_SIZE 256 -#define DCN3_2_MAX_DET_SIZE 1152 -#define DCN3_2_MIN_DET_SIZE 128 -#define DCN3_2_MIN_COMPBUF_SIZE_KB 128 -#define DCN3_2_DET_SEG_SIZE 64 -#define DCN3_2_MALL_MBLK_SIZE_BYTES 65536 // 64 * 1024 -#define DCN3_2_MBLK_WIDTH 128 -#define DCN3_2_MBLK_HEIGHT_4BPE 128 -#define DCN3_2_MBLK_HEIGHT_8BPE 64 -#define DCN3_2_DCFCLK_DS_INIT_KHZ 10000 // Choose 10Mhz for init DCFCLK DS freq -#define SUBVP_HIGH_REFRESH_LIST_LEN 4 -#define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800 -#define DCN3_2_VMIN_DISPCLK_HZ 717000000 -#define MIN_SUBVP_DCFCLK_KHZ 400000 - -#define TO_DCN32_RES_POOL(pool)\ - container_of(pool, struct dcn32_resource_pool, base) - -extern struct _vcs_dpi_ip_params_st dcn3_2_ip; -extern struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc; - -struct subvp_high_refresh_list { - int min_refresh; - int max_refresh; - struct resolution { - int width; - int height; - } res[SUBVP_HIGH_REFRESH_LIST_LEN]; -}; - -struct dcn32_resource_pool { - struct resource_pool base; -}; - -struct resource_pool *dcn32_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc); - -struct panel_cntl *dcn32_panel_cntl_create( - const struct panel_cntl_init_data *init_data); - -bool dcn32_acquire_post_bldn_3dlut( - struct resource_context *res_ctx, - const struct resource_pool *pool, - int mpcc_id, - struct dc_3dlut **lut, - struct dc_transfer_func **shaper); - -bool dcn32_release_post_bldn_3dlut( - struct resource_context *res_ctx, - const struct resource_pool *pool, - struct dc_3dlut **lut, - struct dc_transfer_func **shaper); - -bool dcn32_remove_phantom_pipes(struct dc *dc, - struct dc_state *context, bool fast_update); - -void dcn32_retain_phantom_pipes(struct dc *dc, - struct dc_state *context); - -void dcn32_add_phantom_pipes(struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - unsigned int pipe_cnt, - unsigned int index); - -bool dcn32_validate_bandwidth(struct dc *dc, - struct dc_state *context, - bool fast_validate); - -int dcn32_populate_dml_pipes_from_context( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - bool fast_validate); - -void dcn32_calculate_wm_and_dlg( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int pipe_cnt, - int vlevel); - -uint32_t dcn32_helper_mall_bytes_to_ways( - struct dc *dc, - uint32_t total_size_in_mall_bytes); - -uint32_t dcn32_helper_calculate_mall_bytes_for_cursor( - struct dc *dc, - struct pipe_ctx *pipe_ctx, - bool ignore_cursor_buf); - -uint32_t dcn32_helper_calculate_num_ways_for_subvp( - struct dc *dc, - struct dc_state *context); - -void dcn32_merge_pipes_for_subvp(struct dc *dc, - struct dc_state *context); - -bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc, - struct dc_state *context); - -bool dcn32_subvp_in_use(struct dc *dc, - struct dc_state *context); - -bool dcn32_mpo_in_use(struct dc_state *context); - -bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context); -bool dcn32_is_center_timing(struct pipe_ctx *pipe); -bool dcn32_is_psr_capable(struct pipe_ctx *pipe); - -struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe( - const struct dc_state *cur_ctx, - struct dc_state *new_ctx, - const struct resource_pool *pool, - const struct pipe_ctx *opp_head_pipe); - -struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_opp_head( - const struct dc_state *cur_ctx, - struct dc_state *new_ctx, - const struct resource_pool *pool, - const struct pipe_ctx *otg_master); - -void dcn32_release_pipe(struct dc_state *context, - struct pipe_ctx *pipe, - const struct resource_pool *pool); - -void dcn32_determine_det_override(struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes); - -void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes); - -void dcn32_save_mall_state(struct dc *dc, - struct dc_state *context, - struct mall_temp_config *temp_config); - -void dcn32_restore_mall_state(struct dc *dc, - struct dc_state *context, - struct mall_temp_config *temp_config); - -struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, const struct dc_state *context); - -bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe); - -bool dcn32_allow_subvp_high_refresh_rate(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe); - -unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans); - -double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context); - -bool dcn32_check_native_scaling_for_res(struct pipe_ctx *pipe, unsigned int width, unsigned int height); - -bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context); - -bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int vlevel); - -void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes); - -void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context); - -/* definitions for run time init of reg offsets */ - -/* CLK SRC */ -#define CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid) \ - SRI_ARR_ALPHABET(PIXCLK_RESYNC_CNTL, PHYPLL, index, pllid), \ - SRII_ARR_2(PHASE, DP_DTO, 0, index), \ - SRII_ARR_2(PHASE, DP_DTO, 1, index), \ - SRII_ARR_2(PHASE, DP_DTO, 2, index), \ - SRII_ARR_2(PHASE, DP_DTO, 3, index), \ - SRII_ARR_2(MODULO, DP_DTO, 0, index), \ - SRII_ARR_2(MODULO, DP_DTO, 1, index), \ - SRII_ARR_2(MODULO, DP_DTO, 2, index), \ - SRII_ARR_2(MODULO, DP_DTO, 3, index), \ - SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 0, index), \ - SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 1, index), \ - SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 2, index), \ - SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 3, index) - -/* ABM */ -#define ABM_DCN32_REG_LIST_RI(id) \ - SRI_ARR(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \ - SRI_ARR(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \ - SRI_ARR(BL1_PWM_BL_UPDATE_SAMPLE_RATE, ABM, id), \ - SRI_ARR(DC_ABM1_HG_MISC_CTRL, ABM, id), \ - SRI_ARR(DC_ABM1_IPCSC_COEFF_SEL, ABM, id), \ - SRI_ARR(BL1_PWM_CURRENT_ABM_LEVEL, ABM, id), \ - SRI_ARR(BL1_PWM_TARGET_ABM_LEVEL, ABM, id), \ - SRI_ARR(BL1_PWM_USER_LEVEL, ABM, id), \ - SRI_ARR(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \ - SRI_ARR(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \ - SRI_ARR(DC_ABM1_ACE_OFFSET_SLOPE_0, ABM, id), \ - SRI_ARR(DC_ABM1_ACE_THRES_12, ABM, id), NBIO_SR_ARR(BIOS_SCRATCH_2, id) - -/* Audio */ -#define AUD_COMMON_REG_LIST_RI(id) \ - SRI_ARR(AZALIA_F0_CODEC_ENDPOINT_INDEX, AZF0ENDPOINT, id), \ - SRI_ARR(AZALIA_F0_CODEC_ENDPOINT_DATA, AZF0ENDPOINT, id), \ - SR_ARR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS, id), \ - SR_ARR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES, id), \ - SR_ARR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES, id), \ - SR_ARR(DCCG_AUDIO_DTO_SOURCE, id), SR_ARR(DCCG_AUDIO_DTO0_MODULE, id), \ - SR_ARR(DCCG_AUDIO_DTO0_PHASE, id), SR_ARR(DCCG_AUDIO_DTO1_MODULE, id), \ - SR_ARR(DCCG_AUDIO_DTO1_PHASE, id) \ - -/* VPG */ - -#define VPG_DCN3_REG_LIST_RI(id) \ - SRI_ARR(VPG_GENERIC_STATUS, VPG, id), \ - SRI_ARR(VPG_GENERIC_PACKET_ACCESS_CTRL, VPG, id), \ - SRI_ARR(VPG_GENERIC_PACKET_DATA, VPG, id), \ - SRI_ARR(VPG_GSP_FRAME_UPDATE_CTRL, VPG, id), \ - SRI_ARR(VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG, id) - -/* AFMT */ -#define AFMT_DCN3_REG_LIST_RI(id) \ - SRI_ARR(AFMT_INFOFRAME_CONTROL0, AFMT, id), \ - SRI_ARR(AFMT_VBI_PACKET_CONTROL, AFMT, id), \ - SRI_ARR(AFMT_AUDIO_PACKET_CONTROL, AFMT, id), \ - SRI_ARR(AFMT_AUDIO_PACKET_CONTROL2, AFMT, id), \ - SRI_ARR(AFMT_AUDIO_SRC_CONTROL, AFMT, id), \ - SRI_ARR(AFMT_60958_0, AFMT, id), SRI_ARR(AFMT_60958_1, AFMT, id), \ - SRI_ARR(AFMT_60958_2, AFMT, id), SRI_ARR(AFMT_MEM_PWR, AFMT, id) - -/* APG */ -#define APG_DCN31_REG_LIST_RI(id) \ - SRI_ARR(APG_CONTROL, APG, id), SRI_ARR(APG_CONTROL2, APG, id), \ - SRI_ARR(APG_MEM_PWR, APG, id), SRI_ARR(APG_DBG_GEN_CONTROL, APG, id) - -/* Stream encoder */ -#define SE_DCN32_REG_LIST_RI(id) \ - SRI_ARR(AFMT_CNTL, DIG, id), SRI_ARR(DIG_FE_CNTL, DIG, id), \ - SRI_ARR(HDMI_CONTROL, DIG, id), SRI_ARR(HDMI_DB_CONTROL, DIG, id), \ - SRI_ARR(HDMI_GC, DIG, id), \ - SRI_ARR(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \ - SRI_ARR(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \ - SRI_ARR(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \ - SRI_ARR(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \ - SRI_ARR(HDMI_GENERIC_PACKET_CONTROL4, DIG, id), \ - SRI_ARR(HDMI_GENERIC_PACKET_CONTROL5, DIG, id), \ - SRI_ARR(HDMI_GENERIC_PACKET_CONTROL6, DIG, id), \ - SRI_ARR(HDMI_GENERIC_PACKET_CONTROL7, DIG, id), \ - SRI_ARR(HDMI_GENERIC_PACKET_CONTROL8, DIG, id), \ - SRI_ARR(HDMI_GENERIC_PACKET_CONTROL9, DIG, id), \ - SRI_ARR(HDMI_GENERIC_PACKET_CONTROL10, DIG, id), \ - SRI_ARR(HDMI_INFOFRAME_CONTROL0, DIG, id), \ - SRI_ARR(HDMI_INFOFRAME_CONTROL1, DIG, id), \ - SRI_ARR(HDMI_VBI_PACKET_CONTROL, DIG, id), \ - SRI_ARR(HDMI_AUDIO_PACKET_CONTROL, DIG, id), \ - SRI_ARR(HDMI_ACR_PACKET_CONTROL, DIG, id), \ - SRI_ARR(HDMI_ACR_32_0, DIG, id), SRI_ARR(HDMI_ACR_32_1, DIG, id), \ - SRI_ARR(HDMI_ACR_44_0, DIG, id), SRI_ARR(HDMI_ACR_44_1, DIG, id), \ - SRI_ARR(HDMI_ACR_48_0, DIG, id), SRI_ARR(HDMI_ACR_48_1, DIG, id), \ - SRI_ARR(DP_DB_CNTL, DP, id), SRI_ARR(DP_MSA_MISC, DP, id), \ - SRI_ARR(DP_MSA_VBID_MISC, DP, id), SRI_ARR(DP_MSA_COLORIMETRY, DP, id), \ - SRI_ARR(DP_MSA_TIMING_PARAM1, DP, id), \ - SRI_ARR(DP_MSA_TIMING_PARAM2, DP, id), \ - SRI_ARR(DP_MSA_TIMING_PARAM3, DP, id), \ - SRI_ARR(DP_MSA_TIMING_PARAM4, DP, id), \ - SRI_ARR(DP_MSE_RATE_CNTL, DP, id), SRI_ARR(DP_MSE_RATE_UPDATE, DP, id), \ - SRI_ARR(DP_PIXEL_FORMAT, DP, id), SRI_ARR(DP_SEC_CNTL, DP, id), \ - SRI_ARR(DP_SEC_CNTL1, DP, id), SRI_ARR(DP_SEC_CNTL2, DP, id), \ - SRI_ARR(DP_SEC_CNTL5, DP, id), SRI_ARR(DP_SEC_CNTL6, DP, id), \ - SRI_ARR(DP_STEER_FIFO, DP, id), SRI_ARR(DP_VID_M, DP, id), \ - SRI_ARR(DP_VID_N, DP, id), SRI_ARR(DP_VID_STREAM_CNTL, DP, id), \ - SRI_ARR(DP_VID_TIMING, DP, id), SRI_ARR(DP_SEC_AUD_N, DP, id), \ - SRI_ARR(DP_SEC_TIMESTAMP, DP, id), SRI_ARR(DP_DSC_CNTL, DP, id), \ - SRI_ARR(DP_SEC_METADATA_TRANSMISSION, DP, id), \ - SRI_ARR(HDMI_METADATA_PACKET_CONTROL, DIG, id), \ - SRI_ARR(DP_SEC_FRAMING4, DP, id), SRI_ARR(DP_GSP11_CNTL, DP, id), \ - SRI_ARR(DME_CONTROL, DME, id), \ - SRI_ARR(DP_SEC_METADATA_TRANSMISSION, DP, id), \ - SRI_ARR(HDMI_METADATA_PACKET_CONTROL, DIG, id), \ - SRI_ARR(DIG_FE_CNTL, DIG, id), SRI_ARR(DIG_CLOCK_PATTERN, DIG, id), \ - SRI_ARR(DIG_FIFO_CTRL0, DIG, id) - -/* Aux regs */ - -#define AUX_REG_LIST_RI(id) \ - SRI_ARR(AUX_CONTROL, DP_AUX, id), SRI_ARR(AUX_DPHY_RX_CONTROL0, DP_AUX, id), \ - SRI_ARR(AUX_DPHY_RX_CONTROL1, DP_AUX, id) - -#define DCN2_AUX_REG_LIST_RI(id) \ - AUX_REG_LIST_RI(id), SRI_ARR(AUX_DPHY_TX_CONTROL, DP_AUX, id) - -/* HDP */ -#define HPD_REG_LIST_RI(id) SRI_ARR(DC_HPD_CONTROL, HPD, id) - -/* Link encoder */ -#define LE_DCN3_REG_LIST_RI(id) \ - SRI_ARR(DIG_BE_CNTL, DIG, id), SRI_ARR(DIG_BE_EN_CNTL, DIG, id), \ - SRI_ARR(TMDS_CTL_BITS, DIG, id), \ - SRI_ARR(TMDS_DCBALANCER_CONTROL, DIG, id), SRI_ARR(DP_CONFIG, DP, id), \ - SRI_ARR(DP_DPHY_CNTL, DP, id), SRI_ARR(DP_DPHY_PRBS_CNTL, DP, id), \ - SRI_ARR(DP_DPHY_SCRAM_CNTL, DP, id), SRI_ARR(DP_DPHY_SYM0, DP, id), \ - SRI_ARR(DP_DPHY_SYM1, DP, id), SRI_ARR(DP_DPHY_SYM2, DP, id), \ - SRI_ARR(DP_DPHY_TRAINING_PATTERN_SEL, DP, id), \ - SRI_ARR(DP_LINK_CNTL, DP, id), SRI_ARR(DP_LINK_FRAMING_CNTL, DP, id), \ - SRI_ARR(DP_MSE_SAT0, DP, id), SRI_ARR(DP_MSE_SAT1, DP, id), \ - SRI_ARR(DP_MSE_SAT2, DP, id), SRI_ARR(DP_MSE_SAT_UPDATE, DP, id), \ - SRI_ARR(DP_SEC_CNTL, DP, id), SRI_ARR(DP_VID_STREAM_CNTL, DP, id), \ - SRI_ARR(DP_DPHY_FAST_TRAINING, DP, id), SRI_ARR(DP_SEC_CNTL1, DP, id), \ - SRI_ARR(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \ - SRI_ARR(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id) - -#define LE_DCN31_REG_LIST_RI(id) \ - LE_DCN3_REG_LIST_RI(id), SRI_ARR(DP_DPHY_INTERNAL_CTRL, DP, id), \ - SR_ARR(DIO_LINKA_CNTL, id), SR_ARR(DIO_LINKB_CNTL, id), \ - SR_ARR(DIO_LINKC_CNTL, id), SR_ARR(DIO_LINKD_CNTL, id), \ - SR_ARR(DIO_LINKE_CNTL, id), SR_ARR(DIO_LINKF_CNTL, id) - -#define UNIPHY_DCN2_REG_LIST_RI(id, phyid) \ - SRI_ARR_ALPHABET(CLOCK_ENABLE, SYMCLK, id, phyid), \ - SRI_ARR_ALPHABET(CHANNEL_XBAR_CNTL, UNIPHY, id, phyid) - -/* HPO DP stream encoder */ -#define DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id) \ - SR_ARR(DP_STREAM_MAPPER_CONTROL0, id), \ - SR_ARR(DP_STREAM_MAPPER_CONTROL1, id), \ - SR_ARR(DP_STREAM_MAPPER_CONTROL2, id), \ - SR_ARR(DP_STREAM_MAPPER_CONTROL3, id), \ - SRI_ARR(DP_STREAM_ENC_CLOCK_CONTROL, DP_STREAM_ENC, id), \ - SRI_ARR(DP_STREAM_ENC_INPUT_MUX_CONTROL, DP_STREAM_ENC, id), \ - SRI_ARR(DP_STREAM_ENC_AUDIO_CONTROL, DP_STREAM_ENC, id), \ - SRI_ARR(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, DP_STREAM_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_CONTROL, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_VID_PIXEL_FORMAT, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_VID_MSA0, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_VID_MSA1, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_VID_MSA2, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_VID_MSA3, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_VID_MSA4, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_VID_MSA5, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_VID_MSA6, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_VID_MSA7, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_VID_MSA8, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_VID_MSA_CONTROL, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_VID_FIFO_CONTROL, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_VID_STREAM_CONTROL, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_VID_VBID_CONTROL, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_SDP_CONTROL, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL0, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL2, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL3, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL5, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL11, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_VID_CRC_CONTROL, DP_SYM32_ENC, id), \ - SRI_ARR(DP_SYM32_ENC_HBLANK_CONTROL, DP_SYM32_ENC, id) - -/* HPO DP link encoder regs */ -#define DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id) \ - SRI_ARR(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC, id), \ - SRI_ARR(DP_DPHY_SYM32_CONTROL, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_STATUS, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_TP_CONFIG, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_TP_PRBS_SEED0, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_TP_PRBS_SEED1, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_TP_PRBS_SEED2, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_TP_PRBS_SEED3, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_TP_SQ_PULSE, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM0, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM1, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM2, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM3, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM4, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM5, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM6, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM7, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM8, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM9, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM10, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_SAT_VC0, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_SAT_VC1, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_SAT_VC2, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_SAT_VC3, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL0, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL1, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL2, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL3, DP_DPHY_SYM32, id), \ - SRI_ARR(DP_DPHY_SYM32_SAT_UPDATE, DP_DPHY_SYM32, id) - -/* DPP */ -#define DPP_REG_LIST_DCN30_COMMON_RI(id) \ - SRI_ARR(CM_DEALPHA, CM, id), SRI_ARR(CM_MEM_PWR_STATUS, CM, id), \ - SRI_ARR(CM_BIAS_CR_R, CM, id), SRI_ARR(CM_BIAS_Y_G_CB_B, CM, id), \ - SRI_ARR(PRE_DEGAM, CNVC_CFG, id), SRI_ARR(CM_GAMCOR_CONTROL, CM, id), \ - SRI_ARR(CM_GAMCOR_LUT_CONTROL, CM, id), \ - SRI_ARR(CM_GAMCOR_LUT_INDEX, CM, id), \ - SRI_ARR(CM_GAMCOR_LUT_INDEX, CM, id), \ - SRI_ARR(CM_GAMCOR_LUT_DATA, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMB_START_CNTL_B, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMB_START_CNTL_G, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMB_START_CNTL_R, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMB_END_CNTL1_B, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMB_END_CNTL2_B, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMB_END_CNTL1_G, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMB_END_CNTL2_G, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMB_END_CNTL1_R, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMB_END_CNTL2_R, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMB_REGION_0_1, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMB_REGION_32_33, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMB_OFFSET_B, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMB_OFFSET_G, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMB_OFFSET_R, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMB_START_BASE_CNTL_B, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMB_START_BASE_CNTL_G, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMB_START_BASE_CNTL_R, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMA_START_CNTL_B, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMA_START_CNTL_G, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMA_START_CNTL_R, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMA_END_CNTL1_B, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMA_END_CNTL2_B, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMA_END_CNTL1_G, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMA_END_CNTL2_G, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMA_END_CNTL1_R, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMA_END_CNTL2_R, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMA_REGION_0_1, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMA_REGION_32_33, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMA_OFFSET_B, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMA_OFFSET_G, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMA_OFFSET_R, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMA_START_BASE_CNTL_B, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMA_START_BASE_CNTL_G, CM, id), \ - SRI_ARR(CM_GAMCOR_RAMA_START_BASE_CNTL_R, CM, id), \ - SRI_ARR(CM_GAMUT_REMAP_CONTROL, CM, id), \ - SRI_ARR(CM_GAMUT_REMAP_C11_C12, CM, id), \ - SRI_ARR(CM_GAMUT_REMAP_C13_C14, CM, id), \ - SRI_ARR(CM_GAMUT_REMAP_C21_C22, CM, id), \ - SRI_ARR(CM_GAMUT_REMAP_C23_C24, CM, id), \ - SRI_ARR(CM_GAMUT_REMAP_C31_C32, CM, id), \ - SRI_ARR(CM_GAMUT_REMAP_C33_C34, CM, id), \ - SRI_ARR(CM_GAMUT_REMAP_B_C11_C12, CM, id), \ - SRI_ARR(CM_GAMUT_REMAP_B_C13_C14, CM, id), \ - SRI_ARR(CM_GAMUT_REMAP_B_C21_C22, CM, id), \ - SRI_ARR(CM_GAMUT_REMAP_B_C23_C24, CM, id), \ - SRI_ARR(CM_GAMUT_REMAP_B_C31_C32, CM, id), \ - SRI_ARR(CM_GAMUT_REMAP_B_C33_C34, CM, id), \ - SRI_ARR(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \ - SRI_ARR(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \ - SRI_ARR(OTG_H_BLANK, DSCL, id), SRI_ARR(OTG_V_BLANK, DSCL, id), \ - SRI_ARR(SCL_MODE, DSCL, id), SRI_ARR(LB_DATA_FORMAT, DSCL, id), \ - SRI_ARR(LB_MEMORY_CTRL, DSCL, id), SRI_ARR(DSCL_AUTOCAL, DSCL, id), \ - SRI_ARR(DSCL_CONTROL, DSCL, id), \ - SRI_ARR(SCL_TAP_CONTROL, DSCL, id), \ - SRI_ARR(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \ - SRI_ARR(SCL_COEF_RAM_TAP_DATA, DSCL, id), \ - SRI_ARR(DSCL_2TAP_CONTROL, DSCL, id), SRI_ARR(MPC_SIZE, DSCL, id), \ - SRI_ARR(SCL_HORZ_FILTER_SCALE_RATIO, DSCL, id), \ - SRI_ARR(SCL_VERT_FILTER_SCALE_RATIO, DSCL, id), \ - SRI_ARR(SCL_HORZ_FILTER_SCALE_RATIO_C, DSCL, id), \ - SRI_ARR(SCL_VERT_FILTER_SCALE_RATIO_C, DSCL, id), \ - SRI_ARR(SCL_HORZ_FILTER_INIT, DSCL, id), \ - SRI_ARR(SCL_HORZ_FILTER_INIT_C, DSCL, id), \ - SRI_ARR(SCL_VERT_FILTER_INIT, DSCL, id), \ - SRI_ARR(SCL_VERT_FILTER_INIT_C, DSCL, id), \ - SRI_ARR(RECOUT_START, DSCL, id), SRI_ARR(RECOUT_SIZE, DSCL, id), \ - SRI_ARR(PRE_DEALPHA, CNVC_CFG, id), SRI_ARR(PRE_REALPHA, CNVC_CFG, id), \ - SRI_ARR(PRE_CSC_MODE, CNVC_CFG, id), \ - SRI_ARR(PRE_CSC_C11_C12, CNVC_CFG, id), \ - SRI_ARR(PRE_CSC_C33_C34, CNVC_CFG, id), \ - SRI_ARR(PRE_CSC_B_C11_C12, CNVC_CFG, id), \ - SRI_ARR(PRE_CSC_B_C33_C34, CNVC_CFG, id), \ - SRI_ARR(CM_POST_CSC_CONTROL, CM, id), \ - SRI_ARR(CM_POST_CSC_C11_C12, CM, id), \ - SRI_ARR(CM_POST_CSC_C33_C34, CM, id), \ - SRI_ARR(CM_POST_CSC_B_C11_C12, CM, id), \ - SRI_ARR(CM_POST_CSC_B_C33_C34, CM, id), \ - SRI_ARR(CM_MEM_PWR_CTRL, CM, id), SRI_ARR(CM_CONTROL, CM, id), \ - SRI_ARR(FORMAT_CONTROL, CNVC_CFG, id), \ - SRI_ARR(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \ - SRI_ARR(CURSOR0_CONTROL, CNVC_CUR, id), \ - SRI_ARR(CURSOR0_COLOR0, CNVC_CUR, id), \ - SRI_ARR(CURSOR0_COLOR1, CNVC_CUR, id), \ - SRI_ARR(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id), \ - SRI_ARR(DPP_CONTROL, DPP_TOP, id), SRI_ARR(CM_HDR_MULT_COEF, CM, id), \ - SRI_ARR(CURSOR_CONTROL, CURSOR0_, id), \ - SRI_ARR(ALPHA_2BIT_LUT, CNVC_CFG, id), \ - SRI_ARR(FCNV_FP_BIAS_R, CNVC_CFG, id), \ - SRI_ARR(FCNV_FP_BIAS_G, CNVC_CFG, id), \ - SRI_ARR(FCNV_FP_BIAS_B, CNVC_CFG, id), \ - SRI_ARR(FCNV_FP_SCALE_R, CNVC_CFG, id), \ - SRI_ARR(FCNV_FP_SCALE_G, CNVC_CFG, id), \ - SRI_ARR(FCNV_FP_SCALE_B, CNVC_CFG, id), \ - SRI_ARR(COLOR_KEYER_CONTROL, CNVC_CFG, id), \ - SRI_ARR(COLOR_KEYER_ALPHA, CNVC_CFG, id), \ - SRI_ARR(COLOR_KEYER_RED, CNVC_CFG, id), \ - SRI_ARR(COLOR_KEYER_GREEN, CNVC_CFG, id), \ - SRI_ARR(COLOR_KEYER_BLUE, CNVC_CFG, id), \ - SRI_ARR(CURSOR_CONTROL, CURSOR0_, id), \ - SRI_ARR(OBUF_MEM_PWR_CTRL, DSCL, id), \ - SRI_ARR(DSCL_MEM_PWR_STATUS, DSCL, id), \ - SRI_ARR(DSCL_MEM_PWR_CTRL, DSCL, id) - -/* OPP */ -#define OPP_REG_LIST_DCN_RI(id) \ - SRI_ARR(FMT_BIT_DEPTH_CONTROL, FMT, id), SRI_ARR(FMT_CONTROL, FMT, id), \ - SRI_ARR(FMT_DITHER_RAND_R_SEED, FMT, id), \ - SRI_ARR(FMT_DITHER_RAND_G_SEED, FMT, id), \ - SRI_ARR(FMT_DITHER_RAND_B_SEED, FMT, id), \ - SRI_ARR(FMT_CLAMP_CNTL, FMT, id), \ - SRI_ARR(FMT_DYNAMIC_EXP_CNTL, FMT, id), \ - SRI_ARR(FMT_MAP420_MEMORY_CONTROL, FMT, id), \ - SRI_ARR(OPPBUF_CONTROL, OPPBUF, id), \ - SRI_ARR(OPPBUF_3D_PARAMETERS_0, OPPBUF, id), \ - SRI_ARR(OPPBUF_3D_PARAMETERS_1, OPPBUF, id), \ - SRI_ARR(OPP_PIPE_CONTROL, OPP_PIPE, id) \ - -#define OPP_REG_LIST_DCN10_RI(id) OPP_REG_LIST_DCN_RI(id) - -#define OPP_DPG_REG_LIST_RI(id) \ - SRI_ARR(DPG_CONTROL, DPG, id), SRI_ARR(DPG_DIMENSIONS, DPG, id), \ - SRI_ARR(DPG_OFFSET_SEGMENT, DPG, id), SRI_ARR(DPG_COLOUR_B_CB, DPG, id), \ - SRI_ARR(DPG_COLOUR_G_Y, DPG, id), SRI_ARR(DPG_COLOUR_R_CR, DPG, id), \ - SRI_ARR(DPG_RAMP_CONTROL, DPG, id), SRI_ARR(DPG_STATUS, DPG, id) - -#define OPP_REG_LIST_DCN30_RI(id) \ - OPP_REG_LIST_DCN10_RI(id), OPP_DPG_REG_LIST_RI(id), \ - SRI_ARR(FMT_422_CONTROL, FMT, id) - -/* Aux engine regs */ -#define AUX_COMMON_REG_LIST0_RI(id) \ - SRI_ARR(AUX_CONTROL, DP_AUX, id), SRI_ARR(AUX_ARB_CONTROL, DP_AUX, id), \ - SRI_ARR(AUX_SW_DATA, DP_AUX, id), SRI_ARR(AUX_SW_CONTROL, DP_AUX, id), \ - SRI_ARR(AUX_INTERRUPT_CONTROL, DP_AUX, id), \ - SRI_ARR(AUX_DPHY_RX_CONTROL1, DP_AUX, id), \ - SRI_ARR(AUX_SW_STATUS, DP_AUX, id) - -/* DWBC */ -#define DWBC_COMMON_REG_LIST_DCN30_RI(id) \ - SR_ARR(DWB_ENABLE_CLK_CTRL, id), SR_ARR(DWB_MEM_PWR_CTRL, id), \ - SR_ARR(FC_MODE_CTRL, id), SR_ARR(FC_FLOW_CTRL, id), \ - SR_ARR(FC_WINDOW_START, id), SR_ARR(FC_WINDOW_SIZE, id), \ - SR_ARR(FC_SOURCE_SIZE, id), SR_ARR(DWB_UPDATE_CTRL, id), \ - SR_ARR(DWB_CRC_CTRL, id), SR_ARR(DWB_CRC_MASK_R_G, id), \ - SR_ARR(DWB_CRC_MASK_B_A, id), SR_ARR(DWB_CRC_VAL_R_G, id), \ - SR_ARR(DWB_CRC_VAL_B_A, id), SR_ARR(DWB_OUT_CTRL, id), \ - SR_ARR(DWB_MMHUBBUB_BACKPRESSURE_CNT_EN, id), \ - SR_ARR(DWB_MMHUBBUB_BACKPRESSURE_CNT, id), \ - SR_ARR(DWB_HOST_READ_CONTROL, id), SR_ARR(DWB_SOFT_RESET, id), \ - SR_ARR(DWB_HDR_MULT_COEF, id), SR_ARR(DWB_GAMUT_REMAP_MODE, id), \ - SR_ARR(DWB_GAMUT_REMAP_COEF_FORMAT, id), \ - SR_ARR(DWB_GAMUT_REMAPA_C11_C12, id), \ - SR_ARR(DWB_GAMUT_REMAPA_C13_C14, id), \ - SR_ARR(DWB_GAMUT_REMAPA_C21_C22, id), \ - SR_ARR(DWB_GAMUT_REMAPA_C23_C24, id), \ - SR_ARR(DWB_GAMUT_REMAPA_C31_C32, id), \ - SR_ARR(DWB_GAMUT_REMAPA_C33_C34, id), \ - SR_ARR(DWB_GAMUT_REMAPB_C11_C12, id), \ - SR_ARR(DWB_GAMUT_REMAPB_C13_C14, id), \ - SR_ARR(DWB_GAMUT_REMAPB_C21_C22, id), \ - SR_ARR(DWB_GAMUT_REMAPB_C23_C24, id), \ - SR_ARR(DWB_GAMUT_REMAPB_C31_C32, id), \ - SR_ARR(DWB_GAMUT_REMAPB_C33_C34, id), SR_ARR(DWB_OGAM_CONTROL, id), \ - SR_ARR(DWB_OGAM_LUT_INDEX, id), SR_ARR(DWB_OGAM_LUT_DATA, id), \ - SR_ARR(DWB_OGAM_LUT_CONTROL, id), \ - SR_ARR(DWB_OGAM_RAMA_START_CNTL_B, id), \ - SR_ARR(DWB_OGAM_RAMA_START_CNTL_G, id), \ - SR_ARR(DWB_OGAM_RAMA_START_CNTL_R, id), \ - SR_ARR(DWB_OGAM_RAMA_START_BASE_CNTL_B, id), \ - SR_ARR(DWB_OGAM_RAMA_START_SLOPE_CNTL_B, id), \ - SR_ARR(DWB_OGAM_RAMA_START_BASE_CNTL_G, id), \ - SR_ARR(DWB_OGAM_RAMA_START_SLOPE_CNTL_G, id), \ - SR_ARR(DWB_OGAM_RAMA_START_BASE_CNTL_R, id), \ - SR_ARR(DWB_OGAM_RAMA_START_SLOPE_CNTL_R, id), \ - SR_ARR(DWB_OGAM_RAMA_END_CNTL1_B, id), \ - SR_ARR(DWB_OGAM_RAMA_END_CNTL2_B, id), \ - SR_ARR(DWB_OGAM_RAMA_END_CNTL1_G, id), \ - SR_ARR(DWB_OGAM_RAMA_END_CNTL2_G, id), \ - SR_ARR(DWB_OGAM_RAMA_END_CNTL1_R, id), \ - SR_ARR(DWB_OGAM_RAMA_END_CNTL2_R, id), \ - SR_ARR(DWB_OGAM_RAMA_OFFSET_B, id), SR_ARR(DWB_OGAM_RAMA_OFFSET_G, id), \ - SR_ARR(DWB_OGAM_RAMA_OFFSET_R, id), \ - SR_ARR(DWB_OGAM_RAMA_REGION_0_1, id), \ - SR_ARR(DWB_OGAM_RAMA_REGION_2_3, id), \ - SR_ARR(DWB_OGAM_RAMA_REGION_4_5, id), \ - SR_ARR(DWB_OGAM_RAMA_REGION_6_7, id), \ - SR_ARR(DWB_OGAM_RAMA_REGION_8_9, id), \ - SR_ARR(DWB_OGAM_RAMA_REGION_10_11, id), \ - SR_ARR(DWB_OGAM_RAMA_REGION_12_13, id), \ - SR_ARR(DWB_OGAM_RAMA_REGION_14_15, id), \ - SR_ARR(DWB_OGAM_RAMA_REGION_16_17, id), \ - SR_ARR(DWB_OGAM_RAMA_REGION_18_19, id), \ - SR_ARR(DWB_OGAM_RAMA_REGION_20_21, id), \ - SR_ARR(DWB_OGAM_RAMA_REGION_22_23, id), \ - SR_ARR(DWB_OGAM_RAMA_REGION_24_25, id), \ - SR_ARR(DWB_OGAM_RAMA_REGION_26_27, id), \ - SR_ARR(DWB_OGAM_RAMA_REGION_28_29, id), \ - SR_ARR(DWB_OGAM_RAMA_REGION_30_31, id), \ - SR_ARR(DWB_OGAM_RAMA_REGION_32_33, id), \ - SR_ARR(DWB_OGAM_RAMB_START_CNTL_B, id), \ - SR_ARR(DWB_OGAM_RAMB_START_CNTL_G, id), \ - SR_ARR(DWB_OGAM_RAMB_START_CNTL_R, id), \ - SR_ARR(DWB_OGAM_RAMB_START_BASE_CNTL_B, id), \ - SR_ARR(DWB_OGAM_RAMB_START_SLOPE_CNTL_B, id), \ - SR_ARR(DWB_OGAM_RAMB_START_BASE_CNTL_G, id), \ - SR_ARR(DWB_OGAM_RAMB_START_SLOPE_CNTL_G, id), \ - SR_ARR(DWB_OGAM_RAMB_START_BASE_CNTL_R, id), \ - SR_ARR(DWB_OGAM_RAMB_START_SLOPE_CNTL_R, id), \ - SR_ARR(DWB_OGAM_RAMB_END_CNTL1_B, id), \ - SR_ARR(DWB_OGAM_RAMB_END_CNTL2_B, id), \ - SR_ARR(DWB_OGAM_RAMB_END_CNTL1_G, id), \ - SR_ARR(DWB_OGAM_RAMB_END_CNTL2_G, id), \ - SR_ARR(DWB_OGAM_RAMB_END_CNTL1_R, id), \ - SR_ARR(DWB_OGAM_RAMB_END_CNTL2_R, id), \ - SR_ARR(DWB_OGAM_RAMB_OFFSET_B, id), SR_ARR(DWB_OGAM_RAMB_OFFSET_G, id), \ - SR_ARR(DWB_OGAM_RAMB_OFFSET_R, id), \ - SR_ARR(DWB_OGAM_RAMB_REGION_0_1, id), \ - SR_ARR(DWB_OGAM_RAMB_REGION_2_3, id), \ - SR_ARR(DWB_OGAM_RAMB_REGION_4_5, id), \ - SR_ARR(DWB_OGAM_RAMB_REGION_6_7, id), \ - SR_ARR(DWB_OGAM_RAMB_REGION_8_9, id), \ - SR_ARR(DWB_OGAM_RAMB_REGION_10_11, id), \ - SR_ARR(DWB_OGAM_RAMB_REGION_12_13, id), \ - SR_ARR(DWB_OGAM_RAMB_REGION_14_15, id), \ - SR_ARR(DWB_OGAM_RAMB_REGION_16_17, id), \ - SR_ARR(DWB_OGAM_RAMB_REGION_18_19, id), \ - SR_ARR(DWB_OGAM_RAMB_REGION_20_21, id), \ - SR_ARR(DWB_OGAM_RAMB_REGION_22_23, id), \ - SR_ARR(DWB_OGAM_RAMB_REGION_24_25, id), \ - SR_ARR(DWB_OGAM_RAMB_REGION_26_27, id), \ - SR_ARR(DWB_OGAM_RAMB_REGION_28_29, id), \ - SR_ARR(DWB_OGAM_RAMB_REGION_30_31, id), \ - SR_ARR(DWB_OGAM_RAMB_REGION_32_33, id) - -/* MCIF */ - -#define MCIF_WB_COMMON_REG_LIST_DCN32_RI(inst) \ - SRI2_ARR(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUFMGR_STATUS, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_PITCH, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_1_STATUS, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_1_STATUS2, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_2_STATUS, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_2_STATUS2, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_3_STATUS, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_3_STATUS2, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_4_STATUS, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_4_STATUS2, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_SCLK_CHANGE, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_1_ADDR_Y, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_1_ADDR_C, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_2_ADDR_Y, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_2_ADDR_C, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_3_ADDR_Y, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_3_ADDR_C, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_4_ADDR_C, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, MMHUBBUB, inst), \ - SRI2_ARR(MCIF_WB_NB_PSTATE_CONTROL, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_WATERMARK, MMHUBBUB, inst), \ - SRI2_ARR(MCIF_WB_CLOCK_GATER_CONTROL, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_SELF_REFRESH_CONTROL, MCIF_WB, inst), \ - SRI2_ARR(MULTI_LEVEL_QOS_CTRL, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_SECURITY_LEVEL, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_LUMA_SIZE, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_1_RESOLUTION, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_2_RESOLUTION, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_3_RESOLUTION, MCIF_WB, inst), \ - SRI2_ARR(MCIF_WB_BUF_4_RESOLUTION, MCIF_WB, inst), \ - SRI2_ARR(MMHUBBUB_MEM_PWR_CNTL, MMHUBBUB, inst), \ - SRI2_ARR(MMHUBBUB_WARMUP_ADDR_REGION, MMHUBBUB, inst), \ - SRI2_ARR(MMHUBBUB_WARMUP_BASE_ADDR_HIGH, MMHUBBUB, inst), \ - SRI2_ARR(MMHUBBUB_WARMUP_BASE_ADDR_LOW, MMHUBBUB, inst), \ - SRI2_ARR(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB, inst) - -/* DSC */ - -#define DSC_REG_LIST_DCN20_RI(id) \ - SRI_ARR(DSC_TOP_CONTROL, DSC_TOP, id), \ - SRI_ARR(DSC_DEBUG_CONTROL, DSC_TOP, id), \ - SRI_ARR(DSCC_CONFIG0, DSCC, id), SRI_ARR(DSCC_CONFIG1, DSCC, id), \ - SRI_ARR(DSCC_STATUS, DSCC, id), \ - SRI_ARR(DSCC_INTERRUPT_CONTROL_STATUS, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG0, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG1, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG2, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG3, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG4, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG5, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG6, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG7, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG8, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG9, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG10, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG11, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG12, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG13, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG14, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG15, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG16, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG17, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG18, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG19, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG20, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG21, DSCC, id), \ - SRI_ARR(DSCC_PPS_CONFIG22, DSCC, id), \ - SRI_ARR(DSCC_MEM_POWER_CONTROL, DSCC, id), \ - SRI_ARR(DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC, id), \ - SRI_ARR(DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC, id), \ - SRI_ARR(DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC, id), \ - SRI_ARR(DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC, id), \ - SRI_ARR(DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC, id), \ - SRI_ARR(DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC, id), \ - SRI_ARR(DSCC_MAX_ABS_ERROR0, DSCC, id), \ - SRI_ARR(DSCC_MAX_ABS_ERROR1, DSCC, id), \ - SRI_ARR(DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id), \ - SRI_ARR(DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id), \ - SRI_ARR(DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id), \ - SRI_ARR(DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id), \ - SRI_ARR(DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id), \ - SRI_ARR(DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id), \ - SRI_ARR(DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id), \ - SRI_ARR(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id), \ - SRI_ARR(DSCCIF_CONFIG0, DSCCIF, id), \ - SRI_ARR(DSCCIF_CONFIG1, DSCCIF, id), \ - SRI_ARR(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id) - -/* MPC */ - -#define MPC_DWB_MUX_REG_LIST_DCN3_0_RI(inst) \ - SRII_DWB(DWB_MUX, MUX, MPC_DWB, inst) - -#define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0_RI(inst) \ - SRII(MUX, MPC_OUT, inst), VUPDATE_SRII(CUR, VUPDATE_LOCK_SET, inst) - -#define MPC_OUT_MUX_REG_LIST_DCN3_0_RI(inst) \ - MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0_RI(inst), SRII(CSC_MODE, MPC_OUT, inst), \ - SRII(CSC_C11_C12_A, MPC_OUT, inst), SRII(CSC_C33_C34_A, MPC_OUT, inst), \ - SRII(CSC_C11_C12_B, MPC_OUT, inst), SRII(CSC_C33_C34_B, MPC_OUT, inst), \ - SRII(DENORM_CONTROL, MPC_OUT, inst), \ - SRII(DENORM_CLAMP_G_Y, MPC_OUT, inst), \ - SRII(DENORM_CLAMP_B_CB, MPC_OUT, inst), SR(MPC_OUT_CSC_COEF_FORMAT) - -#define MPC_COMMON_REG_LIST_DCN1_0_RI(inst) \ - SRII(MPCC_TOP_SEL, MPCC, inst), SRII(MPCC_BOT_SEL, MPCC, inst), \ - SRII(MPCC_CONTROL, MPCC, inst), SRII(MPCC_STATUS, MPCC, inst), \ - SRII(MPCC_OPP_ID, MPCC, inst), SRII(MPCC_BG_G_Y, MPCC, inst), \ - SRII(MPCC_BG_R_CR, MPCC, inst), SRII(MPCC_BG_B_CB, MPCC, inst), \ - SRII(MPCC_SM_CONTROL, MPCC, inst), \ - SRII(MPCC_UPDATE_LOCK_SEL, MPCC, inst) - -#define MPC_REG_LIST_DCN3_0_RI(inst) \ - MPC_COMMON_REG_LIST_DCN1_0_RI(inst), SRII(MPCC_TOP_GAIN, MPCC, inst), \ - SRII(MPCC_BOT_GAIN_INSIDE, MPCC, inst), \ - SRII(MPCC_BOT_GAIN_OUTSIDE, MPCC, inst), \ - SRII(MPCC_MEM_PWR_CTRL, MPCC, inst), \ - SRII(MPCC_OGAM_LUT_INDEX, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_LUT_DATA, MPCC_OGAM, inst), \ - SRII(MPCC_GAMUT_REMAP_COEF_FORMAT, MPCC_OGAM, inst), \ - SRII(MPCC_GAMUT_REMAP_MODE, MPCC_OGAM, inst), \ - SRII(MPC_GAMUT_REMAP_C11_C12_A, MPCC_OGAM, inst), \ - SRII(MPC_GAMUT_REMAP_C33_C34_A, MPCC_OGAM, inst), \ - SRII(MPC_GAMUT_REMAP_C11_C12_B, MPCC_OGAM, inst), \ - SRII(MPC_GAMUT_REMAP_C33_C34_B, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMA_START_CNTL_G, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMA_START_CNTL_R, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_B, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_G, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_R, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMA_END_CNTL1_G, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMA_END_CNTL2_G, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMA_END_CNTL1_R, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMA_END_CNTL2_R, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMA_REGION_32_33, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMA_OFFSET_B, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMA_OFFSET_G, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMA_OFFSET_R, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_B, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_G, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_R, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMB_START_CNTL_B, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMB_START_CNTL_G, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMB_START_CNTL_R, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_B, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_G, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_R, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMB_END_CNTL1_B, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMB_END_CNTL2_B, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMB_END_CNTL1_G, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMB_END_CNTL2_G, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMB_END_CNTL1_R, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMB_END_CNTL2_R, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMB_REGION_32_33, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMB_OFFSET_B, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMB_OFFSET_G, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMB_OFFSET_R, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_B, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_G, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_R, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_CONTROL, MPCC_OGAM, inst), \ - SRII(MPCC_OGAM_LUT_CONTROL, MPCC_OGAM, inst) - -#define MPC_REG_LIST_DCN3_2_RI(inst) \ - MPC_REG_LIST_DCN3_0_RI(inst),\ - SRII(MPCC_MOVABLE_CM_LOCATION_CONTROL, MPCC, inst),\ - SRII(MPCC_MCM_SHAPER_CONTROL, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_OFFSET_R, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_OFFSET_G, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_OFFSET_B, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_SCALE_R, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_SCALE_G_B, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_LUT_INDEX, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_LUT_DATA, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_START_CNTL_B, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_START_CNTL_G, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_START_CNTL_R, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_END_CNTL_B, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_END_CNTL_G, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_END_CNTL_R, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_REGION_0_1, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_REGION_2_3, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_REGION_4_5, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_REGION_6_7, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_REGION_8_9, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_REGION_10_11, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_REGION_12_13, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_REGION_14_15, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_REGION_16_17, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_REGION_18_19, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_REGION_20_21, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_REGION_22_23, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_REGION_24_25, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_REGION_26_27, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_REGION_28_29, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_REGION_30_31, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMA_REGION_32_33, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_START_CNTL_B, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_START_CNTL_G, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_START_CNTL_R, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_END_CNTL_B, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_END_CNTL_G, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_END_CNTL_R, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_REGION_0_1, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_REGION_2_3, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_REGION_4_5, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_REGION_6_7, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_REGION_8_9, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_REGION_10_11, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_REGION_12_13, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_REGION_14_15, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_REGION_16_17, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_REGION_18_19, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_REGION_20_21, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_REGION_22_23, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_REGION_24_25, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_REGION_26_27, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_REGION_28_29, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_REGION_30_31, MPCC_MCM, inst),\ - SRII(MPCC_MCM_SHAPER_RAMB_REGION_32_33, MPCC_MCM, inst),\ - SRII(MPCC_MCM_3DLUT_MODE, MPCC_MCM, inst), /*TODO: may need to add other 3DLUT regs*/\ - SRII(MPCC_MCM_3DLUT_INDEX, MPCC_MCM, inst),\ - SRII(MPCC_MCM_3DLUT_DATA, MPCC_MCM, inst),\ - SRII(MPCC_MCM_3DLUT_DATA_30BIT, MPCC_MCM, inst),\ - SRII(MPCC_MCM_3DLUT_READ_WRITE_CONTROL, MPCC_MCM, inst),\ - SRII(MPCC_MCM_3DLUT_OUT_NORM_FACTOR, MPCC_MCM, inst),\ - SRII(MPCC_MCM_3DLUT_OUT_OFFSET_R, MPCC_MCM, inst),\ - SRII(MPCC_MCM_3DLUT_OUT_OFFSET_G, MPCC_MCM, inst),\ - SRII(MPCC_MCM_3DLUT_OUT_OFFSET_B, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_CONTROL, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_LUT_INDEX, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_LUT_DATA, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_LUT_CONTROL, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_START_CNTL_B, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_START_CNTL_G, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_START_CNTL_R, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL1_B, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL2_B, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL1_G, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL2_G, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL1_R, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL2_R, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_OFFSET_B, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_OFFSET_G, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_OFFSET_R, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_REGION_0_1, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_REGION_2_3, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_REGION_4_5, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_REGION_6_7, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_REGION_8_9, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_REGION_10_11, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_REGION_12_13, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_REGION_14_15, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_REGION_16_17, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_REGION_18_19, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_REGION_20_21, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_REGION_22_23, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_REGION_24_25, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_REGION_26_27, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_REGION_28_29, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_REGION_30_31, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMA_REGION_32_33, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_START_CNTL_B, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_START_CNTL_G, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_START_CNTL_R, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL1_B, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL2_B, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL1_G, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL2_G, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL1_R, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL2_R, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_OFFSET_B, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_OFFSET_G, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_OFFSET_R, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_REGION_0_1, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_REGION_2_3, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_REGION_4_5, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_REGION_6_7, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_REGION_8_9, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_REGION_10_11, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_REGION_12_13, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_REGION_14_15, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_REGION_16_17, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_REGION_18_19, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_REGION_20_21, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_REGION_22_23, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_REGION_24_25, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_REGION_26_27, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_REGION_28_29, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_REGION_30_31, MPCC_MCM, inst),\ - SRII(MPCC_MCM_1DLUT_RAMB_REGION_32_33, MPCC_MCM, inst),\ - SRII(MPCC_MCM_MEM_PWR_CTRL, MPCC_MCM, inst) -/* OPTC */ - -#define OPTC_COMMON_REG_LIST_DCN3_2_RI(inst) \ - SRI_ARR(OTG_VSTARTUP_PARAM, OTG, inst), \ - SRI_ARR(OTG_VUPDATE_PARAM, OTG, inst), \ - SRI_ARR(OTG_VREADY_PARAM, OTG, inst), \ - SRI_ARR(OTG_MASTER_UPDATE_LOCK, OTG, inst), \ - SRI_ARR(OTG_GLOBAL_CONTROL0, OTG, inst), \ - SRI_ARR(OTG_GLOBAL_CONTROL1, OTG, inst), \ - SRI_ARR(OTG_GLOBAL_CONTROL2, OTG, inst), \ - SRI_ARR(OTG_GLOBAL_CONTROL4, OTG, inst), \ - SRI_ARR(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst), \ - SRI_ARR(OTG_H_TOTAL, OTG, inst), \ - SRI_ARR(OTG_H_BLANK_START_END, OTG, inst), \ - SRI_ARR(OTG_H_SYNC_A, OTG, inst), SRI_ARR(OTG_H_SYNC_A_CNTL, OTG, inst), \ - SRI_ARR(OTG_H_TIMING_CNTL, OTG, inst), SRI_ARR(OTG_V_TOTAL, OTG, inst), \ - SRI_ARR(OTG_V_BLANK_START_END, OTG, inst), \ - SRI_ARR(OTG_V_SYNC_A, OTG, inst), SRI_ARR(OTG_V_SYNC_A_CNTL, OTG, inst), \ - SRI_ARR(OTG_CONTROL, OTG, inst), SRI_ARR(OTG_STEREO_CONTROL, OTG, inst), \ - SRI_ARR(OTG_3D_STRUCTURE_CONTROL, OTG, inst), \ - SRI_ARR(OTG_STEREO_STATUS, OTG, inst), \ - SRI_ARR(OTG_V_TOTAL_MAX, OTG, inst), \ - SRI_ARR(OTG_V_TOTAL_MIN, OTG, inst), \ - SRI_ARR(OTG_V_TOTAL_CONTROL, OTG, inst), \ - SRI_ARR(OTG_TRIGA_CNTL, OTG, inst), \ - SRI_ARR(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst), \ - SRI_ARR(OTG_STATIC_SCREEN_CONTROL, OTG, inst), \ - SRI_ARR(OTG_STATUS_FRAME_COUNT, OTG, inst), \ - SRI_ARR(OTG_STATUS, OTG, inst), SRI_ARR(OTG_STATUS_POSITION, OTG, inst), \ - SRI_ARR(OTG_NOM_VERT_POSITION, OTG, inst), \ - SRI_ARR(OTG_M_CONST_DTO0, OTG, inst), \ - SRI_ARR(OTG_M_CONST_DTO1, OTG, inst), \ - SRI_ARR(OTG_CLOCK_CONTROL, OTG, inst), \ - SRI_ARR(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst), \ - SRI_ARR(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst), \ - SRI_ARR(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst), \ - SRI_ARR(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst), \ - SRI_ARR(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst), \ - SRI_ARR(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst), \ - SRI_ARR(OPTC_INPUT_CLOCK_CONTROL, ODM, inst), \ - SRI_ARR(OPTC_DATA_SOURCE_SELECT, ODM, inst), \ - SRI_ARR(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst), \ - SRI_ARR(CONTROL, VTG, inst), SRI_ARR(OTG_VERT_SYNC_CONTROL, OTG, inst), \ - SRI_ARR(OTG_GSL_CONTROL, OTG, inst), SRI_ARR(OTG_CRC_CNTL, OTG, inst), \ - SRI_ARR(OTG_CRC0_DATA_RG, OTG, inst), \ - SRI_ARR(OTG_CRC0_DATA_B, OTG, inst), \ - SRI_ARR(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst), \ - SRI_ARR(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst), \ - SRI_ARR(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst), \ - SRI_ARR(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst), \ - SR_ARR(GSL_SOURCE_SELECT, inst), \ - SRI_ARR(OTG_TRIGA_MANUAL_TRIG, OTG, inst), \ - SRI_ARR(OTG_GLOBAL_CONTROL1, OTG, inst), \ - SRI_ARR(OTG_GLOBAL_CONTROL2, OTG, inst), \ - SRI_ARR(OTG_GSL_WINDOW_X, OTG, inst), \ - SRI_ARR(OTG_GSL_WINDOW_Y, OTG, inst), \ - SRI_ARR(OTG_VUPDATE_KEEPOUT, OTG, inst), \ - SRI_ARR(OTG_DSC_START_POSITION, OTG, inst), \ - SRI_ARR(OTG_DRR_TRIGGER_WINDOW, OTG, inst), \ - SRI_ARR(OTG_DRR_V_TOTAL_CHANGE, OTG, inst), \ - SRI_ARR(OPTC_DATA_FORMAT_CONTROL, ODM, inst), \ - SRI_ARR(OPTC_BYTES_PER_PIXEL, ODM, inst), \ - SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst), \ - SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \ - SRI_ARR(OTG_DRR_CONTROL, OTG, inst) - -/* HUBP */ - -#define HUBP_REG_LIST_DCN_VM_RI(id) \ - SRI_ARR(NOM_PARAMETERS_0, HUBPREQ, id), \ - SRI_ARR(NOM_PARAMETERS_1, HUBPREQ, id), \ - SRI_ARR(NOM_PARAMETERS_2, HUBPREQ, id), \ - SRI_ARR(NOM_PARAMETERS_3, HUBPREQ, id), \ - SRI_ARR(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id) -#define HUBP_REG_LIST_DCN_RI(id) \ - SRI_ARR(DCHUBP_CNTL, HUBP, id), SRI_ARR(HUBPREQ_DEBUG_DB, HUBP, id), \ - SRI_ARR(HUBPREQ_DEBUG, HUBP, id), SRI_ARR(DCSURF_ADDR_CONFIG, HUBP, id), \ - SRI_ARR(DCSURF_TILING_CONFIG, HUBP, id), \ - SRI_ARR(DCSURF_SURFACE_PITCH, HUBPREQ, id), \ - SRI_ARR(DCSURF_SURFACE_PITCH_C, HUBPREQ, id), \ - SRI_ARR(DCSURF_SURFACE_CONFIG, HUBP, id), \ - SRI_ARR(DCSURF_FLIP_CONTROL, HUBPREQ, id), \ - SRI_ARR(DCSURF_PRI_VIEWPORT_DIMENSION, HUBP, id), \ - SRI_ARR(DCSURF_PRI_VIEWPORT_START, HUBP, id), \ - SRI_ARR(DCSURF_SEC_VIEWPORT_DIMENSION, HUBP, id), \ - SRI_ARR(DCSURF_SEC_VIEWPORT_START, HUBP, id), \ - SRI_ARR(DCSURF_PRI_VIEWPORT_DIMENSION_C, HUBP, id), \ - SRI_ARR(DCSURF_PRI_VIEWPORT_START_C, HUBP, id), \ - SRI_ARR(DCSURF_SEC_VIEWPORT_DIMENSION_C, HUBP, id), \ - SRI_ARR(DCSURF_SEC_VIEWPORT_START_C, HUBP, id), \ - SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id), \ - SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS, HUBPREQ, id), \ - SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id), \ - SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS, HUBPREQ, id), \ - SRI_ARR(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, HUBPREQ, id), \ - SRI_ARR(DCSURF_PRIMARY_META_SURFACE_ADDRESS, HUBPREQ, id), \ - SRI_ARR(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, HUBPREQ, id), \ - SRI_ARR(DCSURF_SECONDARY_META_SURFACE_ADDRESS, HUBPREQ, id), \ - SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id), \ - SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS_C, HUBPREQ, id), \ - SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id), \ - SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS_C, HUBPREQ, id), \ - SRI_ARR(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id), \ - SRI_ARR(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, HUBPREQ, id), \ - SRI_ARR(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id), \ - SRI_ARR(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, HUBPREQ, id), \ - SRI_ARR(DCSURF_SURFACE_INUSE, HUBPREQ, id), \ - SRI_ARR(DCSURF_SURFACE_INUSE_HIGH, HUBPREQ, id), \ - SRI_ARR(DCSURF_SURFACE_INUSE_C, HUBPREQ, id), \ - SRI_ARR(DCSURF_SURFACE_INUSE_HIGH_C, HUBPREQ, id), \ - SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE, HUBPREQ, id), \ - SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE_HIGH, HUBPREQ, id), \ - SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE_C, HUBPREQ, id), \ - SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, HUBPREQ, id), \ - SRI_ARR(DCSURF_SURFACE_CONTROL, HUBPREQ, id), \ - SRI_ARR(DCSURF_SURFACE_FLIP_INTERRUPT, HUBPREQ, id), \ - SRI_ARR(HUBPRET_CONTROL, HUBPRET, id), \ - SRI_ARR(HUBPRET_READ_LINE_STATUS, HUBPRET, id), \ - SRI_ARR(DCN_EXPANSION_MODE, HUBPREQ, id), \ - SRI_ARR(DCHUBP_REQ_SIZE_CONFIG, HUBP, id), \ - SRI_ARR(DCHUBP_REQ_SIZE_CONFIG_C, HUBP, id), \ - SRI_ARR(BLANK_OFFSET_0, HUBPREQ, id), \ - SRI_ARR(BLANK_OFFSET_1, HUBPREQ, id), \ - SRI_ARR(DST_DIMENSIONS, HUBPREQ, id), \ - SRI_ARR(DST_AFTER_SCALER, HUBPREQ, id), \ - SRI_ARR(VBLANK_PARAMETERS_0, HUBPREQ, id), \ - SRI_ARR(REF_FREQ_TO_PIX_FREQ, HUBPREQ, id), \ - SRI_ARR(VBLANK_PARAMETERS_1, HUBPREQ, id), \ - SRI_ARR(VBLANK_PARAMETERS_3, HUBPREQ, id), \ - SRI_ARR(NOM_PARAMETERS_4, HUBPREQ, id), \ - SRI_ARR(NOM_PARAMETERS_5, HUBPREQ, id), \ - SRI_ARR(PER_LINE_DELIVERY_PRE, HUBPREQ, id), \ - SRI_ARR(PER_LINE_DELIVERY, HUBPREQ, id), \ - SRI_ARR(VBLANK_PARAMETERS_2, HUBPREQ, id), \ - SRI_ARR(VBLANK_PARAMETERS_4, HUBPREQ, id), \ - SRI_ARR(NOM_PARAMETERS_6, HUBPREQ, id), \ - SRI_ARR(NOM_PARAMETERS_7, HUBPREQ, id), \ - SRI_ARR(DCN_TTU_QOS_WM, HUBPREQ, id), \ - SRI_ARR(DCN_GLOBAL_TTU_CNTL, HUBPREQ, id), \ - SRI_ARR(DCN_SURF0_TTU_CNTL0, HUBPREQ, id), \ - SRI_ARR(DCN_SURF0_TTU_CNTL1, HUBPREQ, id), \ - SRI_ARR(DCN_SURF1_TTU_CNTL0, HUBPREQ, id), \ - SRI_ARR(DCN_SURF1_TTU_CNTL1, HUBPREQ, id), \ - SRI_ARR(DCN_CUR0_TTU_CNTL0, HUBPREQ, id), \ - SRI_ARR(DCN_CUR0_TTU_CNTL1, HUBPREQ, id), \ - SRI_ARR(HUBP_CLK_CNTL, HUBP, id) -#define HUBP_REG_LIST_DCN2_COMMON_RI(id) \ - HUBP_REG_LIST_DCN_RI(id), HUBP_REG_LIST_DCN_VM_RI(id), \ - SRI_ARR(PREFETCH_SETTINGS, HUBPREQ, id), \ - SRI_ARR(PREFETCH_SETTINGS_C, HUBPREQ, id), \ - SRI_ARR(DCN_VM_SYSTEM_APERTURE_LOW_ADDR, HUBPREQ, id), \ - SRI_ARR(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, HUBPREQ, id), \ - SRI_ARR(CURSOR_SETTINGS, HUBPREQ, id), \ - SRI_ARR(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR0_, id), \ - SRI_ARR(CURSOR_SURFACE_ADDRESS, CURSOR0_, id), \ - SRI_ARR(CURSOR_SIZE, CURSOR0_, id), \ - SRI_ARR(CURSOR_CONTROL, CURSOR0_, id), \ - SRI_ARR(CURSOR_POSITION, CURSOR0_, id), \ - SRI_ARR(CURSOR_HOT_SPOT, CURSOR0_, id), \ - SRI_ARR(CURSOR_DST_OFFSET, CURSOR0_, id), \ - SRI_ARR(DMDATA_ADDRESS_HIGH, CURSOR0_, id), \ - SRI_ARR(DMDATA_ADDRESS_LOW, CURSOR0_, id), \ - SRI_ARR(DMDATA_CNTL, CURSOR0_, id), \ - SRI_ARR(DMDATA_SW_CNTL, CURSOR0_, id), \ - SRI_ARR(DMDATA_QOS_CNTL, CURSOR0_, id), \ - SRI_ARR(DMDATA_SW_DATA, CURSOR0_, id), \ - SRI_ARR(DMDATA_STATUS, CURSOR0_, id), \ - SRI_ARR(FLIP_PARAMETERS_0, HUBPREQ, id), \ - SRI_ARR(FLIP_PARAMETERS_1, HUBPREQ, id), \ - SRI_ARR(FLIP_PARAMETERS_2, HUBPREQ, id), \ - SRI_ARR(DCN_CUR1_TTU_CNTL0, HUBPREQ, id), \ - SRI_ARR(DCN_CUR1_TTU_CNTL1, HUBPREQ, id), \ - SRI_ARR(DCSURF_FLIP_CONTROL2, HUBPREQ, id), \ - SRI_ARR(VMID_SETTINGS_0, HUBPREQ, id) -#define HUBP_REG_LIST_DCN21_RI(id) \ - HUBP_REG_LIST_DCN2_COMMON_RI(id), SRI_ARR(FLIP_PARAMETERS_3, HUBPREQ, id), \ - SRI_ARR(FLIP_PARAMETERS_4, HUBPREQ, id), \ - SRI_ARR(FLIP_PARAMETERS_5, HUBPREQ, id), \ - SRI_ARR(FLIP_PARAMETERS_6, HUBPREQ, id), \ - SRI_ARR(VBLANK_PARAMETERS_5, HUBPREQ, id), \ - SRI_ARR(VBLANK_PARAMETERS_6, HUBPREQ, id) -#define HUBP_REG_LIST_DCN30_RI(id) \ - HUBP_REG_LIST_DCN21_RI(id), SRI_ARR(DCN_DMDATA_VM_CNTL, HUBPREQ, id) -#define HUBP_REG_LIST_DCN32_RI(id) \ - HUBP_REG_LIST_DCN30_RI(id), SRI_ARR(DCHUBP_MALL_CONFIG, HUBP, id), \ - SRI_ARR(DCHUBP_VMPG_CONFIG, HUBP, id), \ - SRI_ARR(UCLK_PSTATE_FORCE, HUBPREQ, id) - -/* HUBBUB */ - -#define HUBBUB_REG_LIST_DCN32_RI(id) \ - SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A), \ - SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B), \ - SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C), \ - SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D), \ - SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL), \ - SR(DCHUBBUB_ARB_DRAM_STATE_CNTL), SR(DCHUBBUB_ARB_SAT_LEVEL), \ - SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND), SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ - SR(DCHUBBUB_SOFT_RESET), SR(DCHUBBUB_CRC_CTRL), \ - SR(DCN_VM_FB_LOCATION_BASE), SR(DCN_VM_FB_LOCATION_TOP), \ - SR(DCN_VM_FB_OFFSET), SR(DCN_VM_AGP_BOT), SR(DCN_VM_AGP_TOP), \ - SR(DCN_VM_AGP_BASE), HUBBUB_SR_WATERMARK_REG_LIST(), \ - SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A), SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B), \ - SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C), SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D), \ - SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A), \ - SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B), \ - SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C), \ - SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D), \ - SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A), \ - SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B), \ - SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C), \ - SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D), SR(DCHUBBUB_DET0_CTRL), \ - SR(DCHUBBUB_DET1_CTRL), SR(DCHUBBUB_DET2_CTRL), SR(DCHUBBUB_DET3_CTRL), \ - SR(DCHUBBUB_COMPBUF_CTRL), SR(COMPBUF_RESERVED_SPACE), \ - SR(DCHUBBUB_DEBUG_CTRL_0), \ - SR(DCHUBBUB_ARB_USR_RETRAINING_CNTL), \ - SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A), \ - SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B), \ - SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C), \ - SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D), \ - SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A), \ - SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B), \ - SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C), \ - SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D), \ - SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A), \ - SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B), \ - SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C), \ - SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D), \ - SR(DCHUBBUB_ARB_MALL_CNTL), \ - SR(DCN_VM_FAULT_ADDR_MSB), SR(DCN_VM_FAULT_ADDR_LSB), \ - SR(DCN_VM_FAULT_CNTL), SR(DCN_VM_FAULT_STATUS), \ - SR(SDPIF_REQUEST_RATE_LIMIT) - -/* DCCG */ - -#define DCCG_REG_LIST_DCN32_RI() \ - SR(DPPCLK_DTO_CTRL), DCCG_SRII(DTO_PARAM, DPPCLK, 0), \ - DCCG_SRII(DTO_PARAM, DPPCLK, 1), DCCG_SRII(DTO_PARAM, DPPCLK, 2), \ - DCCG_SRII(DTO_PARAM, DPPCLK, 3), DCCG_SRII(CLOCK_CNTL, HDMICHARCLK, 0), \ - SR(PHYASYMCLK_CLOCK_CNTL), SR(PHYBSYMCLK_CLOCK_CNTL), \ - SR(PHYCSYMCLK_CLOCK_CNTL), SR(PHYDSYMCLK_CLOCK_CNTL), \ - SR(PHYESYMCLK_CLOCK_CNTL), SR(DPSTREAMCLK_CNTL), SR(HDMISTREAMCLK_CNTL), \ - SR(SYMCLK32_SE_CNTL), SR(SYMCLK32_LE_CNTL), \ - DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0), DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1), \ - DCCG_SRII(PIXEL_RATE_CNTL, OTG, 2), DCCG_SRII(PIXEL_RATE_CNTL, OTG, 3), \ - DCCG_SRII(MODULO, DTBCLK_DTO, 0), DCCG_SRII(MODULO, DTBCLK_DTO, 1), \ - DCCG_SRII(MODULO, DTBCLK_DTO, 2), DCCG_SRII(MODULO, DTBCLK_DTO, 3), \ - DCCG_SRII(PHASE, DTBCLK_DTO, 0), DCCG_SRII(PHASE, DTBCLK_DTO, 1), \ - DCCG_SRII(PHASE, DTBCLK_DTO, 2), DCCG_SRII(PHASE, DTBCLK_DTO, 3), \ - SR(DCCG_AUDIO_DTBCLK_DTO_MODULO), SR(DCCG_AUDIO_DTBCLK_DTO_PHASE), \ - SR(OTG_PIXEL_RATE_DIV), SR(DTBCLK_P_CNTL), \ - SR(DCCG_AUDIO_DTO_SOURCE), SR(DENTIST_DISPCLK_CNTL) - -/* VMID */ -#define DCN20_VMID_REG_LIST_RI(id) \ - SRI_ARR(CNTL, DCN_VM_CONTEXT, id), \ - SRI_ARR(PAGE_TABLE_BASE_ADDR_HI32, DCN_VM_CONTEXT, id), \ - SRI_ARR(PAGE_TABLE_BASE_ADDR_LO32, DCN_VM_CONTEXT, id), \ - SRI_ARR(PAGE_TABLE_START_ADDR_HI32, DCN_VM_CONTEXT, id), \ - SRI_ARR(PAGE_TABLE_START_ADDR_LO32, DCN_VM_CONTEXT, id), \ - SRI_ARR(PAGE_TABLE_END_ADDR_HI32, DCN_VM_CONTEXT, id), \ - SRI_ARR(PAGE_TABLE_END_ADDR_LO32, DCN_VM_CONTEXT, id) - -/* I2C HW */ - -#define I2C_HW_ENGINE_COMMON_REG_LIST_RI(id) \ - SRI_ARR_I2C(SETUP, DC_I2C_DDC, id), SRI_ARR_I2C(SPEED, DC_I2C_DDC, id), \ - SRI_ARR_I2C(HW_STATUS, DC_I2C_DDC, id), \ - SR_ARR_I2C(DC_I2C_ARBITRATION, id), \ - SR_ARR_I2C(DC_I2C_CONTROL, id), SR_ARR_I2C(DC_I2C_SW_STATUS, id), \ - SR_ARR_I2C(DC_I2C_TRANSACTION0, id), SR_ARR_I2C(DC_I2C_TRANSACTION1, id),\ - SR_ARR_I2C(DC_I2C_TRANSACTION2, id), SR_ARR_I2C(DC_I2C_TRANSACTION3, id),\ - SR_ARR_I2C(DC_I2C_DATA, id), SR_ARR_I2C(MICROSECOND_TIME_BASE_DIV, id) - -#define I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id) \ - I2C_HW_ENGINE_COMMON_REG_LIST_RI(id), SR_ARR_I2C(DIO_MEM_PWR_CTRL, id), \ - SR_ARR_I2C(DIO_MEM_PWR_STATUS, id) - -#endif /* _DCN32_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index 1f89428499..f98def6c8c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -24,10 +24,11 @@ */ // header file of functions being implemented -#include "dcn32_resource.h" +#include "dcn32/dcn32_resource.h" #include "dcn20/dcn20_resource.h" #include "dml/dcn32/display_mode_vba_util_32.h" #include "dml/dcn32/dcn32_fpu.h" +#include "dc_state_priv.h" static bool is_dual_plane(enum surface_pixel_format format) { @@ -190,7 +191,7 @@ bool dcn32_subvp_in_use(struct dc *dc, for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) + if (dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) return true; } return false; @@ -264,18 +265,17 @@ static void override_det_for_subvp(struct dc *dc, struct dc_state *context, uint // Do not override if a stream has multiple planes for (i = 0; i < context->stream_count; i++) { - if (context->stream_status[i].plane_count > 1) { + if (context->stream_status[i].plane_count > 1) return; - } - if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) { + + if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM) stream_count++; - } } for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - if (pipe_ctx->stream && pipe_ctx->plane_state && pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) { + if (pipe_ctx->stream && pipe_ctx->plane_state && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) { if (dcn32_allow_subvp_high_refresh_rate(dc, context, pipe_ctx)) { if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) { @@ -290,7 +290,7 @@ static void override_det_for_subvp(struct dc *dc, struct dc_state *context, uint for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - if (pipe_ctx->stream && pipe_ctx->plane_state && pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) { + if (pipe_ctx->stream && pipe_ctx->plane_state && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) { if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) { if (pipe_segments[i] > 4) pipe_segments[i] = 4; @@ -337,14 +337,14 @@ void dcn32_determine_det_override(struct dc *dc, for (i = 0; i < context->stream_count; i++) { /* Don't count SubVP streams for DET allocation */ - if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) + if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM) stream_count++; } if (stream_count > 0) { stream_segments = 18 / stream_count; for (i = 0; i < context->stream_count; i++) { - if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM) + if (dc_state_get_stream_subvp_type(context, context->streams[i]) == SUBVP_PHANTOM) continue; if (context->stream_status[i].plane_count > 0) @@ -430,71 +430,6 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context, dcn32_determine_det_override(dc, context, pipes); } -/** - * dcn32_save_mall_state(): Save MALL (SubVP) state for fast validation cases - * - * This function saves the MALL (SubVP) case for fast validation cases. For fast validation, - * there are situations where a shallow copy of the dc->current_state is created for the - * validation. In this case we want to save and restore the mall config because we always - * teardown subvp at the beginning of validation (and don't attempt to add it back if it's - * fast validation). If we don't restore the subvp config in cases of fast validation + - * shallow copy of the dc->current_state, the dc->current_state will have a partially - * removed subvp state when we did not intend to remove it. - * - * NOTE: This function ONLY works if the streams are not moved to a different pipe in the - * validation. We don't expect this to happen in fast_validation=1 cases. - * - * @dc: Current DC state - * @context: New DC state to be programmed - * @temp_config: struct used to cache the existing MALL state - * - * Return: void - */ -void dcn32_save_mall_state(struct dc *dc, - struct dc_state *context, - struct mall_temp_config *temp_config) -{ - uint32_t i; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - - if (pipe->stream) - temp_config->mall_stream_config[i] = pipe->stream->mall_stream_config; - - if (pipe->plane_state) - temp_config->is_phantom_plane[i] = pipe->plane_state->is_phantom; - } -} - -/** - * dcn32_restore_mall_state(): Restore MALL (SubVP) state for fast validation cases - * - * Restore the MALL state based on the previously saved state from dcn32_save_mall_state - * - * @dc: Current DC state - * @context: New DC state to be programmed, restore MALL state into here - * @temp_config: struct that has the cached MALL state - * - * Return: void - */ -void dcn32_restore_mall_state(struct dc *dc, - struct dc_state *context, - struct mall_temp_config *temp_config) -{ - uint32_t i; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - - if (pipe->stream) - pipe->stream->mall_stream_config = temp_config->mall_stream_config[i]; - - if (pipe->plane_state) - pipe->plane_state->is_phantom = temp_config->is_phantom_plane[i]; - } -} - #define MAX_STRETCHED_V_BLANK 1000 // in micro-seconds (must ensure to match value in FW) /* * Scaling factor for v_blank stretch calculations considering timing in @@ -589,13 +524,14 @@ static int get_refresh_rate(struct dc_stream_state *fpo_candidate_stream) * * Return: Pointer to FPO stream candidate if config can support FPO, otherwise NULL */ -struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, const struct dc_state *context) +struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context) { int refresh_rate = 0; const int minimum_refreshrate_supported = 120; struct dc_stream_state *fpo_candidate_stream = NULL; bool is_fpo_vactive = false; uint32_t fpo_vactive_margin_us = 0; + struct dc_stream_status *fpo_stream_status = NULL; if (context == NULL) return NULL; @@ -618,16 +554,28 @@ struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stre DC_FP_START(); dcn32_assign_fpo_vactive_candidate(dc, context, &fpo_candidate_stream); DC_FP_END(); - + if (fpo_candidate_stream) + fpo_stream_status = dc_state_get_stream_status(context, fpo_candidate_stream); DC_FP_START(); is_fpo_vactive = dcn32_find_vactive_pipe(dc, context, dc->debug.fpo_vactive_min_active_margin_us); DC_FP_END(); if (!is_fpo_vactive || dc->debug.disable_fpo_vactive) return NULL; - } else + } else { fpo_candidate_stream = context->streams[0]; + if (fpo_candidate_stream) + fpo_stream_status = dc_state_get_stream_status(context, fpo_candidate_stream); + } - if (!fpo_candidate_stream) + /* In DCN32/321, FPO uses per-pipe P-State force. + * If there's no planes, HUBP is power gated and + * therefore programming UCLK_PSTATE_FORCE does + * nothing (P-State will always be asserted naturally + * on a pipe that has HUBP power gated. Therefore we + * only want to enable FPO if the FPO pipe has both + * a stream and a plane. + */ + if (!fpo_candidate_stream || !fpo_stream_status || fpo_stream_status->plane_count == 0) return NULL; if (fpo_candidate_stream->sink->edid_caps.panel_patch.disable_fams) @@ -665,6 +613,30 @@ bool dcn32_check_native_scaling_for_res(struct pipe_ctx *pipe, unsigned int widt return is_native_scaling; } +/** + * disallow_subvp_in_active_plus_blank() - Function to determine disallowed subvp + drr/vblank configs + * + * @pipe: subvp pipe to be used for the subvp + drr/vblank config + * + * Since subvp is being enabled on more configs (such as 1080p60), we want + * to explicitly block any configs that we don't want to enable. We do not + * want to enable any 1080p60 (SubVP) + drr / vblank configs since these + * are already convered by FPO. + * + * Return: True if disallowed, false otherwise + */ +static bool disallow_subvp_in_active_plus_blank(struct pipe_ctx *pipe) +{ + bool disallow = false; + + if (resource_is_pipe_type(pipe, OPP_HEAD) && + resource_is_pipe_type(pipe, DPP_PIPE)) { + if (pipe->stream->timing.v_addressable == 1080 && pipe->stream->timing.h_addressable == 1920) + disallow = true; + } + return disallow; +} + /** * dcn32_subvp_drr_admissable() - Determine if SubVP + DRR config is admissible * @@ -688,21 +660,24 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context) bool drr_pipe_found = false; bool drr_psr_capable = false; uint64_t refresh_rate = 0; + bool subvp_disallow = false; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); if (resource_is_pipe_type(pipe, OPP_HEAD) && resource_is_pipe_type(pipe, DPP_PIPE)) { - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + if (pipe_mall_type == SUBVP_MAIN) { subvp_count++; + subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe); refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); } - if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { + if (pipe_mall_type == SUBVP_NONE) { non_subvp_pipes++; drr_psr_capable = (drr_psr_capable || dcn32_is_psr_capable(pipe)); if (pipe->stream->ignore_msa_timing_param && @@ -713,7 +688,7 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context) } } - if (subvp_count == 1 && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable && + if (subvp_count == 1 && !subvp_disallow && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable && ((uint32_t)refresh_rate < 120)) result = true; @@ -746,21 +721,24 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int struct vba_vars_st *vba = &context->bw_ctx.dml.vba; bool vblank_psr_capable = false; uint64_t refresh_rate = 0; + bool subvp_disallow = false; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); if (resource_is_pipe_type(pipe, OPP_HEAD) && resource_is_pipe_type(pipe, DPP_PIPE)) { - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + if (pipe_mall_type == SUBVP_MAIN) { subvp_count++; + subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe); refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); } - if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { + if (pipe_mall_type == SUBVP_NONE) { non_subvp_pipes++; vblank_psr_capable = (vblank_psr_capable || dcn32_is_psr_capable(pipe)); if (pipe->stream->ignore_msa_timing_param && @@ -772,7 +750,7 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int } if (subvp_count == 1 && non_subvp_pipes == 1 && !drr_pipe_found && !vblank_psr_capable && - ((uint32_t)refresh_rate < 120) && + ((uint32_t)refresh_rate < 120) && !subvp_disallow && vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp) result = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/Makefile b/drivers/gpu/drm/amd/display/dc/dcn321/Makefile index 0a199c83bb..c195c47f58 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn321/Makefile @@ -10,7 +10,7 @@ # # Makefile for dcn321. -DCN321 = dcn321_resource.o dcn321_dio_link_encoder.o +DCN321 = dcn321_dio_link_encoder.o AMD_DAL_DCN321 = $(addprefix $(AMDDALPATH)/dc/dcn321/,$(DCN321)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c deleted file mode 100644 index 3b7505b5f0..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ /dev/null @@ -1,2068 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright 2019 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" -#include "dc.h" - -#include "dcn32/dcn32_init.h" - -#include "resource.h" -#include "include/irq_service_interface.h" -#include "dcn32/dcn32_resource.h" -#include "dcn321_resource.h" - -#include "dcn20/dcn20_resource.h" -#include "dcn30/dcn30_resource.h" - -#include "dml/dcn321/dcn321_fpu.h" - -#include "dcn10/dcn10_ipp.h" -#include "dcn30/dcn30_hubbub.h" -#include "dcn31/dcn31_hubbub.h" -#include "dcn32/dcn32_hubbub.h" -#include "dcn32/dcn32_mpc.h" -#include "dcn32/dcn32_hubp.h" -#include "irq/dcn32/irq_service_dcn32.h" -#include "dcn32/dcn32_dpp.h" -#include "dcn32/dcn32_optc.h" -#include "dcn20/dcn20_hwseq.h" -#include "dcn30/dcn30_hwseq.h" -#include "dce110/dce110_hwseq.h" -#include "dcn30/dcn30_opp.h" -#include "dcn20/dcn20_dsc.h" -#include "dcn30/dcn30_vpg.h" -#include "dcn30/dcn30_afmt.h" -#include "dcn30/dcn30_dio_stream_encoder.h" -#include "dcn32/dcn32_dio_stream_encoder.h" -#include "dcn31/dcn31_hpo_dp_stream_encoder.h" -#include "dcn31/dcn31_hpo_dp_link_encoder.h" -#include "dcn32/dcn32_hpo_dp_link_encoder.h" -#include "dcn31/dcn31_apg.h" -#include "dcn31/dcn31_dio_link_encoder.h" -#include "dcn32/dcn32_dio_link_encoder.h" -#include "dcn321_dio_link_encoder.h" -#include "dce/dce_clock_source.h" -#include "dce/dce_audio.h" -#include "dce/dce_hwseq.h" -#include "clk_mgr.h" -#include "virtual/virtual_stream_encoder.h" -#include "dml/display_mode_vba.h" -#include "dcn32/dcn32_dccg.h" -#include "dcn10/dcn10_resource.h" -#include "link.h" -#include "dcn31/dcn31_panel_cntl.h" - -#include "dcn30/dcn30_dwb.h" -#include "dcn32/dcn32_mmhubbub.h" - -#include "dcn/dcn_3_2_1_offset.h" -#include "dcn/dcn_3_2_1_sh_mask.h" -#include "nbio/nbio_4_3_0_offset.h" - -#include "reg_helper.h" -#include "dce/dmub_abm.h" -#include "dce/dmub_psr.h" -#include "dce/dce_aux.h" -#include "dce/dce_i2c.h" - -#include "dml/dcn30/display_mode_vba_30.h" -#include "vm_helper.h" -#include "dcn20/dcn20_vmid.h" - -#define DC_LOGGER_INIT(logger) - -enum dcn321_clk_src_array_id { - DCN321_CLK_SRC_PLL0, - DCN321_CLK_SRC_PLL1, - DCN321_CLK_SRC_PLL2, - DCN321_CLK_SRC_PLL3, - DCN321_CLK_SRC_PLL4, - DCN321_CLK_SRC_TOTAL -}; - -/* begin ********************* - * macros to expend register list macro defined in HW object header file - */ - -/* DCN */ -#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg] - -#define BASE(seg) BASE_INNER(seg) - -#define SR(reg_name)\ - REG_STRUCT.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ - reg ## reg_name -#define SR_ARR(reg_name, id)\ - REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ - reg ## reg_name -#define SR_ARR_INIT(reg_name, id, value)\ - REG_STRUCT[id].reg_name = value - -#define SRI(reg_name, block, id)\ - REG_STRUCT.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRI_ARR(reg_name, block, id)\ - REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SR_ARR_I2C(reg_name, id) \ - REG_STRUCT[id-1].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name - -#define SRI_ARR_I2C(reg_name, block, id)\ - REG_STRUCT[id-1].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRI_ARR_ALPHABET(reg_name, block, index, id)\ - REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRI2(reg_name, block, id)\ - .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ - reg ## reg_name -#define SRI2_ARR(reg_name, block, id)\ - REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ - reg ## reg_name - -#define SRIR(var_name, reg_name, block, id)\ - .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII(reg_name, block, id)\ - REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII_ARR_2(reg_name, block, id, inst)\ - REG_STRUCT[inst].reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII_MPC_RMU(reg_name, block, id)\ - .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII_DWB(reg_name, temp_name, block, id)\ - REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## temp_name - -#define DCCG_SRII(reg_name, block, id)\ - REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ - .field_name = reg_name ## __ ## field_name ## post_fix - -#define VUPDATE_SRII(reg_name, block, id)\ - REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ - reg ## reg_name ## _ ## block ## id - -/* NBIO */ -#define NBIO_BASE_INNER(seg) ctx->nbio_reg_offsets[seg] - -#define NBIO_BASE(seg) \ - NBIO_BASE_INNER(seg) - -#define NBIO_SR(reg_name)\ - REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \ - regBIF_BX0_ ## reg_name -#define NBIO_SR_ARR(reg_name, id)\ - REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \ - regBIF_BX0_ ## reg_name - -#define CTX ctx -#define REG(reg_name) \ - (ctx->dcn_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) - -static struct bios_registers bios_regs; - -#define bios_regs_init() \ - ( \ - NBIO_SR(BIOS_SCRATCH_3),\ - NBIO_SR(BIOS_SCRATCH_6)\ - ) - -#define clk_src_regs_init(index, pllid)\ - CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid) - -static struct dce110_clk_src_regs clk_src_regs[5]; - -static const struct dce110_clk_src_shift cs_shift = { - CS_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT) -}; - -static const struct dce110_clk_src_mask cs_mask = { - CS_COMMON_MASK_SH_LIST_DCN3_2(_MASK) -}; - -#define abm_regs_init(id)\ - ABM_DCN32_REG_LIST_RI(id) - -static struct dce_abm_registers abm_regs[4]; - -static const struct dce_abm_shift abm_shift = { - ABM_MASK_SH_LIST_DCN32(__SHIFT) -}; - -static const struct dce_abm_mask abm_mask = { - ABM_MASK_SH_LIST_DCN32(_MASK) -}; - -#define audio_regs_init(id)\ - AUD_COMMON_REG_LIST_RI(id) - -static struct dce_audio_registers audio_regs[5]; - -#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ - AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) - -static const struct dce_audio_shift audio_shift = { - DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_audio_mask audio_mask = { - DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) -}; - -#define vpg_regs_init(id)\ - VPG_DCN3_REG_LIST_RI(id) - -static struct dcn30_vpg_registers vpg_regs[10]; - -static const struct dcn30_vpg_shift vpg_shift = { - DCN3_VPG_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn30_vpg_mask vpg_mask = { - DCN3_VPG_MASK_SH_LIST(_MASK) -}; - -#define afmt_regs_init(id)\ - AFMT_DCN3_REG_LIST_RI(id) - -static struct dcn30_afmt_registers afmt_regs[6]; - -static const struct dcn30_afmt_shift afmt_shift = { - DCN3_AFMT_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn30_afmt_mask afmt_mask = { - DCN3_AFMT_MASK_SH_LIST(_MASK) -}; - -#define apg_regs_init(id)\ - APG_DCN31_REG_LIST_RI(id) - -static struct dcn31_apg_registers apg_regs[4]; - -static const struct dcn31_apg_shift apg_shift = { - DCN31_APG_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_apg_mask apg_mask = { - DCN31_APG_MASK_SH_LIST(_MASK) -}; - -#define stream_enc_regs_init(id)\ - SE_DCN32_REG_LIST_RI(id) - -static struct dcn10_stream_enc_registers stream_enc_regs[5]; - -static const struct dcn10_stream_encoder_shift se_shift = { - SE_COMMON_MASK_SH_LIST_DCN32(__SHIFT) -}; - -static const struct dcn10_stream_encoder_mask se_mask = { - SE_COMMON_MASK_SH_LIST_DCN32(_MASK) -}; - - -#define aux_regs_init(id)\ - DCN2_AUX_REG_LIST_RI(id) - -static struct dcn10_link_enc_aux_registers link_enc_aux_regs[5]; - -#define hpd_regs_init(id)\ - HPD_REG_LIST_RI(id) - -static struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[5]; - -#define link_regs_init(id, phyid)\ - ( \ - LE_DCN31_REG_LIST_RI(id), \ - UNIPHY_DCN2_REG_LIST_RI(id, phyid)\ - ) - /*DPCS_DCN31_REG_LIST(id),*/ \ - -static struct dcn10_link_enc_registers link_enc_regs[5]; - -static const struct dcn10_link_enc_shift le_shift = { - LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \ -// DPCS_DCN31_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn10_link_enc_mask le_mask = { - LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), \ -// DPCS_DCN31_MASK_SH_LIST(_MASK) -}; - -#define hpo_dp_stream_encoder_reg_init(id)\ - DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id) - -static struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[4]; - -static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { - DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { - DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) -}; - - -#define hpo_dp_link_encoder_reg_init(id)\ - DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id) - /*DCN3_1_RDPCSTX_REG_LIST(0),*/ - /*DCN3_1_RDPCSTX_REG_LIST(1),*/ - /*DCN3_1_RDPCSTX_REG_LIST(2),*/ - /*DCN3_1_RDPCSTX_REG_LIST(3),*/ - -static struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[2]; - -static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { - DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { - DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK) -}; - -#define dpp_regs_init(id)\ - DPP_REG_LIST_DCN30_COMMON_RI(id) - -static struct dcn3_dpp_registers dpp_regs[4]; - -static const struct dcn3_dpp_shift tf_shift = { - DPP_REG_LIST_SH_MASK_DCN30_COMMON(__SHIFT) -}; - -static const struct dcn3_dpp_mask tf_mask = { - DPP_REG_LIST_SH_MASK_DCN30_COMMON(_MASK) -}; - - -#define opp_regs_init(id)\ - OPP_REG_LIST_DCN30_RI(id) - -static struct dcn20_opp_registers opp_regs[4]; - -static const struct dcn20_opp_shift opp_shift = { - OPP_MASK_SH_LIST_DCN20(__SHIFT) -}; - -static const struct dcn20_opp_mask opp_mask = { - OPP_MASK_SH_LIST_DCN20(_MASK) -}; - -#define aux_engine_regs_init(id) \ - ( \ - AUX_COMMON_REG_LIST0_RI(id), SR_ARR_INIT(AUXN_IMPCAL, id, 0), \ - SR_ARR_INIT(AUXP_IMPCAL, id, 0), \ - SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK), \ - SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK)\ - ) - -static struct dce110_aux_registers aux_engine_regs[5]; - -static const struct dce110_aux_registers_shift aux_shift = { - DCN_AUX_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce110_aux_registers_mask aux_mask = { - DCN_AUX_MASK_SH_LIST(_MASK) -}; - -#define dwbc_regs_dcn3_init(id)\ - DWBC_COMMON_REG_LIST_DCN30_RI(id) - -static struct dcn30_dwbc_registers dwbc30_regs[1]; - -static const struct dcn30_dwbc_shift dwbc30_shift = { - DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dcn30_dwbc_mask dwbc30_mask = { - DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -#define mcif_wb_regs_dcn3_init(id)\ - MCIF_WB_COMMON_REG_LIST_DCN32_RI(id) - -static struct dcn30_mmhubbub_registers mcif_wb30_regs[1]; - -static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN32(__SHIFT) -}; - -static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN32(_MASK) -}; - -#define dsc_regsDCN20_init(id)\ - DSC_REG_LIST_DCN20_RI(id) - -static struct dcn20_dsc_registers dsc_regs[4]; - -static const struct dcn20_dsc_shift dsc_shift = { - DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) -}; - -static const struct dcn20_dsc_mask dsc_mask = { - DSC_REG_LIST_SH_MASK_DCN20(_MASK) -}; - -static struct dcn30_mpc_registers mpc_regs; -#define dcn_mpc_regs_init()\ - MPC_REG_LIST_DCN3_2_RI(0),\ - MPC_REG_LIST_DCN3_2_RI(1),\ - MPC_REG_LIST_DCN3_2_RI(2),\ - MPC_REG_LIST_DCN3_2_RI(3),\ - MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\ - MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\ - MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\ - MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\ - MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0) - -static const struct dcn30_mpc_shift mpc_shift = { - MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT) -}; - -static const struct dcn30_mpc_mask mpc_mask = { - MPC_COMMON_MASK_SH_LIST_DCN32(_MASK) -}; - -#define optc_regs_init(id)\ - OPTC_COMMON_REG_LIST_DCN3_2_RI(id) - -static struct dcn_optc_registers optc_regs[4]; - -static const struct dcn_optc_shift optc_shift = { - OPTC_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT) -}; - -static const struct dcn_optc_mask optc_mask = { - OPTC_COMMON_MASK_SH_LIST_DCN3_2(_MASK) -}; - -#define hubp_regs_init(id) \ - HUBP_REG_LIST_DCN32_RI(id) - -static struct dcn_hubp2_registers hubp_regs[4]; - -static const struct dcn_hubp2_shift hubp_shift = { - HUBP_MASK_SH_LIST_DCN32(__SHIFT) -}; - -static const struct dcn_hubp2_mask hubp_mask = { - HUBP_MASK_SH_LIST_DCN32(_MASK) -}; - -static struct dcn_hubbub_registers hubbub_reg; -#define hubbub_reg_init()\ - HUBBUB_REG_LIST_DCN32_RI(0) - -static const struct dcn_hubbub_shift hubbub_shift = { - HUBBUB_MASK_SH_LIST_DCN32(__SHIFT) -}; - -static const struct dcn_hubbub_mask hubbub_mask = { - HUBBUB_MASK_SH_LIST_DCN32(_MASK) -}; - -static struct dccg_registers dccg_regs; - -#define dccg_regs_init()\ - DCCG_REG_LIST_DCN32_RI() - -static const struct dccg_shift dccg_shift = { - DCCG_MASK_SH_LIST_DCN32(__SHIFT) -}; - -static const struct dccg_mask dccg_mask = { - DCCG_MASK_SH_LIST_DCN32(_MASK) -}; - - -#define SRII2(reg_name_pre, reg_name_post, id)\ - .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \ - ## id ## _ ## reg_name_post ## _BASE_IDX) + \ - reg ## reg_name_pre ## id ## _ ## reg_name_post - - -#define HWSEQ_DCN32_REG_LIST()\ - SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ - SR(DIO_MEM_PWR_CTRL), \ - SR(ODM_MEM_PWR_CTRL3), \ - SR(MMHUBBUB_MEM_PWR_CNTL), \ - SR(DCCG_GATE_DISABLE_CNTL), \ - SR(DCCG_GATE_DISABLE_CNTL2), \ - SR(DCFCLK_CNTL),\ - SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ - SRII(PIXEL_RATE_CNTL, OTG, 0), \ - SRII(PIXEL_RATE_CNTL, OTG, 1),\ - SRII(PIXEL_RATE_CNTL, OTG, 2),\ - SRII(PIXEL_RATE_CNTL, OTG, 3),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\ - SR(MICROSECOND_TIME_BASE_DIV), \ - SR(MILLISECOND_TIME_BASE_DIV), \ - SR(DISPCLK_FREQ_CHANGE_CNTL), \ - SR(RBBMIF_TIMEOUT_DIS), \ - SR(RBBMIF_TIMEOUT_DIS_2), \ - SR(DCHUBBUB_CRC_CTRL), \ - SR(DPP_TOP0_DPP_CRC_CTRL), \ - SR(DPP_TOP0_DPP_CRC_VAL_B_A), \ - SR(DPP_TOP0_DPP_CRC_VAL_R_G), \ - SR(MPC_CRC_CTRL), \ - SR(MPC_CRC_RESULT_GB), \ - SR(MPC_CRC_RESULT_C), \ - SR(MPC_CRC_RESULT_AR), \ - SR(DOMAIN0_PG_CONFIG), \ - SR(DOMAIN1_PG_CONFIG), \ - SR(DOMAIN2_PG_CONFIG), \ - SR(DOMAIN3_PG_CONFIG), \ - SR(DOMAIN16_PG_CONFIG), \ - SR(DOMAIN17_PG_CONFIG), \ - SR(DOMAIN18_PG_CONFIG), \ - SR(DOMAIN19_PG_CONFIG), \ - SR(DOMAIN0_PG_STATUS), \ - SR(DOMAIN1_PG_STATUS), \ - SR(DOMAIN2_PG_STATUS), \ - SR(DOMAIN3_PG_STATUS), \ - SR(DOMAIN16_PG_STATUS), \ - SR(DOMAIN17_PG_STATUS), \ - SR(DOMAIN18_PG_STATUS), \ - SR(DOMAIN19_PG_STATUS), \ - SR(D1VGA_CONTROL), \ - SR(D2VGA_CONTROL), \ - SR(D3VGA_CONTROL), \ - SR(D4VGA_CONTROL), \ - SR(D5VGA_CONTROL), \ - SR(D6VGA_CONTROL), \ - SR(DC_IP_REQUEST_CNTL), \ - SR(AZALIA_AUDIO_DTO), \ - SR(AZALIA_CONTROLLER_CLOCK_GATING) - -static struct dce_hwseq_registers hwseq_reg; - -#define hwseq_reg_init()\ - HWSEQ_DCN32_REG_LIST() - -#define HWSEQ_DCN32_MASK_SH_LIST(mask_sh)\ - HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ - HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ - HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ - HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ - HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \ - HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ - HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ - HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh) - -static const struct dce_hwseq_shift hwseq_shift = { - HWSEQ_DCN32_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_hwseq_mask hwseq_mask = { - HWSEQ_DCN32_MASK_SH_LIST(_MASK) -}; -#define vmid_regs_init(id)\ - DCN20_VMID_REG_LIST_RI(id) - -static struct dcn_vmid_registers vmid_regs[16]; - -static const struct dcn20_vmid_shift vmid_shifts = { - DCN20_VMID_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn20_vmid_mask vmid_masks = { - DCN20_VMID_MASK_SH_LIST(_MASK) -}; - -static const struct resource_caps res_cap_dcn321 = { - .num_timing_generator = 4, - .num_opp = 4, - .num_video_plane = 4, - .num_audio = 5, - .num_stream_encoder = 5, - .num_hpo_dp_stream_encoder = 4, - .num_hpo_dp_link_encoder = 2, - .num_pll = 5, - .num_dwb = 1, - .num_ddc = 5, - .num_vmid = 16, - .num_mpc_3dlut = 4, - .num_dsc = 4, -}; - -static const struct dc_plane_cap plane_cap = { - .type = DC_PLANE_TYPE_DCN_UNIVERSAL, - .per_pixel_alpha = true, - - .pixel_format_support = { - .argb8888 = true, - .nv12 = true, - .fp16 = true, - .p010 = true, - .ayuv = false, - }, - - .max_upscale_factor = { - .argb8888 = 16000, - .nv12 = 16000, - .fp16 = 16000 - }, - - // 6:1 downscaling ratio: 1000/6 = 166.666 - .max_downscale_factor = { - .argb8888 = 167, - .nv12 = 167, - .fp16 = 167 - }, - 64, - 64 -}; - -static const struct dc_debug_options debug_defaults_drv = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = false, - .clock_trace = true, - .disable_pplib_clock_request = false, - .pipe_split_policy = MPC_SPLIT_AVOID, - .force_single_disp_pipe_split = false, - .disable_dcc = DCC_ENABLE, - .vsr_support = true, - .performance_trace = false, - .max_downscale_src_width = 7680,/*upto 8K*/ - .disable_pplib_wm_range = false, - .scl_reset_length10 = true, - .sanity_checks = false, - .underflow_assert_delay_us = 0xFFFFFFFF, - .dwb_fi_phase = -1, // -1 = disable, - .dmub_command_table = true, - .enable_mem_low_power = { - .bits = { - .vga = false, - .i2c = false, - .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled - .dscl = false, - .cm = false, - .mpc = false, - .optc = true, - } - }, - .use_max_lb = true, - .force_disable_subvp = false, - .exit_idle_opt_for_cursor_updates = true, - .enable_single_display_2to1_odm_policy = true, - - /*must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions*/ - .enable_double_buffered_dsc_pg_support = true, - .enable_dp_dig_pixel_rate_div_policy = 1, - .allow_sw_cursor_fallback = false, // Linux can't do SW cursor "fallback" - .alloc_extra_way_for_cursor = true, - .min_prefetch_in_strobe_ns = 60000, // 60us - .disable_unbounded_requesting = false, - .override_dispclk_programming = true, - .disable_fpo_optimizations = false, - .fpo_vactive_margin_us = 2000, // 2000us - .disable_fpo_vactive = false, - .disable_boot_optimizations = false, - .disable_subvp_high_refresh = false, - .fpo_vactive_min_active_margin_us = 200, - .fpo_vactive_max_blank_us = 1000, - .enable_legacy_fast_update = false, - .disable_dc_mode_overwrite = true, - .using_dml2 = false, -}; - -static struct dce_aux *dcn321_aux_engine_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); - - if (!aux_engine) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT aux_engine_regs - aux_engine_regs_init(0), - aux_engine_regs_init(1), - aux_engine_regs_init(2), - aux_engine_regs_init(3), - aux_engine_regs_init(4); - - dce110_aux_engine_construct(aux_engine, ctx, inst, - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst], - &aux_mask, - &aux_shift, - ctx->dc->caps.extended_aux_timeout_support); - - return &aux_engine->base; -} -#define i2c_inst_regs_init(id)\ - I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id) - -static struct dce_i2c_registers i2c_hw_regs[5]; - -static const struct dce_i2c_shift i2c_shifts = { - I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT) -}; - -static const struct dce_i2c_mask i2c_masks = { - I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) -}; - -static struct dce_i2c_hw *dcn321_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); - - if (!dce_i2c_hw) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT i2c_hw_regs - i2c_inst_regs_init(1), - i2c_inst_regs_init(2), - i2c_inst_regs_init(3), - i2c_inst_regs_init(4), - i2c_inst_regs_init(5); - - dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, - &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); - - return dce_i2c_hw; -} - -static struct clock_source *dcn321_clock_source_create( - struct dc_context *ctx, - struct dc_bios *bios, - enum clock_source_id id, - const struct dce110_clk_src_regs *regs, - bool dp_clk_src) -{ - struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); - - if (!clk_src) - return NULL; - - if (dcn31_clk_src_construct(clk_src, ctx, bios, id, - regs, &cs_shift, &cs_mask)) { - clk_src->base.dp_clk_src = dp_clk_src; - return &clk_src->base; - } - - kfree(clk_src); - BREAK_TO_DEBUGGER(); - return NULL; -} - -static struct hubbub *dcn321_hubbub_create(struct dc_context *ctx) -{ - int i; - - struct dcn20_hubbub *hubbub2 = kzalloc(sizeof(struct dcn20_hubbub), - GFP_KERNEL); - - if (!hubbub2) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT hubbub_reg - hubbub_reg_init(); - -#undef REG_STRUCT -#define REG_STRUCT vmid_regs - vmid_regs_init(0), - vmid_regs_init(1), - vmid_regs_init(2), - vmid_regs_init(3), - vmid_regs_init(4), - vmid_regs_init(5), - vmid_regs_init(6), - vmid_regs_init(7), - vmid_regs_init(8), - vmid_regs_init(9), - vmid_regs_init(10), - vmid_regs_init(11), - vmid_regs_init(12), - vmid_regs_init(13), - vmid_regs_init(14), - vmid_regs_init(15); - - hubbub32_construct(hubbub2, ctx, - &hubbub_reg, - &hubbub_shift, - &hubbub_mask, - ctx->dc->dml.ip.det_buffer_size_kbytes, - ctx->dc->dml.ip.pixel_chunk_size_kbytes, - ctx->dc->dml.ip.config_return_buffer_size_in_kbytes); - - - for (i = 0; i < res_cap_dcn321.num_vmid; i++) { - struct dcn20_vmid *vmid = &hubbub2->vmid[i]; - - vmid->ctx = ctx; - - vmid->regs = &vmid_regs[i]; - vmid->shifts = &vmid_shifts; - vmid->masks = &vmid_masks; - } - - return &hubbub2->base; -} - -static struct hubp *dcn321_hubp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn20_hubp *hubp2 = - kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); - - if (!hubp2) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT hubp_regs - hubp_regs_init(0), - hubp_regs_init(1), - hubp_regs_init(2), - hubp_regs_init(3); - - if (hubp32_construct(hubp2, ctx, inst, - &hubp_regs[inst], &hubp_shift, &hubp_mask)) - return &hubp2->base; - - BREAK_TO_DEBUGGER(); - kfree(hubp2); - return NULL; -} - -static void dcn321_dpp_destroy(struct dpp **dpp) -{ - kfree(TO_DCN30_DPP(*dpp)); - *dpp = NULL; -} - -static struct dpp *dcn321_dpp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn3_dpp *dpp3 = - kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); - - if (!dpp3) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT dpp_regs - dpp_regs_init(0), - dpp_regs_init(1), - dpp_regs_init(2), - dpp_regs_init(3); - - if (dpp32_construct(dpp3, ctx, inst, - &dpp_regs[inst], &tf_shift, &tf_mask)) - return &dpp3->base; - - BREAK_TO_DEBUGGER(); - kfree(dpp3); - return NULL; -} - -static struct mpc *dcn321_mpc_create( - struct dc_context *ctx, - int num_mpcc, - int num_rmu) -{ - struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), - GFP_KERNEL); - - if (!mpc30) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT mpc_regs - dcn_mpc_regs_init(); - - dcn32_mpc_construct(mpc30, ctx, - &mpc_regs, - &mpc_shift, - &mpc_mask, - num_mpcc, - num_rmu); - - return &mpc30->base; -} - -static struct output_pixel_processor *dcn321_opp_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_opp *opp2 = - kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); - - if (!opp2) { - BREAK_TO_DEBUGGER(); - return NULL; - } - -#undef REG_STRUCT -#define REG_STRUCT opp_regs - opp_regs_init(0), - opp_regs_init(1), - opp_regs_init(2), - opp_regs_init(3); - - dcn20_opp_construct(opp2, ctx, inst, - &opp_regs[inst], &opp_shift, &opp_mask); - return &opp2->base; -} - - -static struct timing_generator *dcn321_timing_generator_create( - struct dc_context *ctx, - uint32_t instance) -{ - struct optc *tgn10 = - kzalloc(sizeof(struct optc), GFP_KERNEL); - - if (!tgn10) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT optc_regs - optc_regs_init(0), - optc_regs_init(1), - optc_regs_init(2), - optc_regs_init(3); - - tgn10->base.inst = instance; - tgn10->base.ctx = ctx; - - tgn10->tg_regs = &optc_regs[instance]; - tgn10->tg_shift = &optc_shift; - tgn10->tg_mask = &optc_mask; - - dcn32_timing_generator_init(tgn10); - - return &tgn10->base; -} - -static const struct encoder_feature_support link_enc_feature = { - .max_hdmi_deep_color = COLOR_DEPTH_121212, - .max_hdmi_pixel_clock = 600000, - .hdmi_ycbcr420_supported = true, - .dp_ycbcr420_supported = true, - .fec_supported = true, - .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_HBR3_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true, - .flags.bits.IS_TPS4_CAPABLE = true -}; - -static struct link_encoder *dcn321_link_encoder_create( - struct dc_context *ctx, - const struct encoder_init_data *enc_init_data) -{ - struct dcn20_link_encoder *enc20 = - kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - - if (!enc20) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT link_enc_aux_regs - aux_regs_init(0), - aux_regs_init(1), - aux_regs_init(2), - aux_regs_init(3), - aux_regs_init(4); - -#undef REG_STRUCT -#define REG_STRUCT link_enc_hpd_regs - hpd_regs_init(0), - hpd_regs_init(1), - hpd_regs_init(2), - hpd_regs_init(3), - hpd_regs_init(4); - -#undef REG_STRUCT -#define REG_STRUCT link_enc_regs - link_regs_init(0, A), - link_regs_init(1, B), - link_regs_init(2, C), - link_regs_init(3, D), - link_regs_init(4, E); - - dcn321_link_encoder_construct(enc20, - enc_init_data, - &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], - &link_enc_aux_regs[enc_init_data->channel - 1], - &link_enc_hpd_regs[enc_init_data->hpd_source], - &le_shift, - &le_mask); - - return &enc20->enc10.base; -} - -static void read_dce_straps( - struct dc_context *ctx, - struct resource_straps *straps) -{ - generic_reg_get(ctx, ctx->dcn_reg_offsets[regDC_PINSTRAPS_BASE_IDX] + regDC_PINSTRAPS, - FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); - -} - -static struct audio *dcn321_create_audio( - struct dc_context *ctx, unsigned int inst) -{ - -#undef REG_STRUCT -#define REG_STRUCT audio_regs - audio_regs_init(0), - audio_regs_init(1), - audio_regs_init(2), - audio_regs_init(3), - audio_regs_init(4); - - return dce_audio_create(ctx, inst, - &audio_regs[inst], &audio_shift, &audio_mask); -} - -static struct vpg *dcn321_vpg_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL); - - if (!vpg3) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT vpg_regs - vpg_regs_init(0), - vpg_regs_init(1), - vpg_regs_init(2), - vpg_regs_init(3), - vpg_regs_init(4), - vpg_regs_init(5), - vpg_regs_init(6), - vpg_regs_init(7), - vpg_regs_init(8), - vpg_regs_init(9); - - vpg3_construct(vpg3, ctx, inst, - &vpg_regs[inst], - &vpg_shift, - &vpg_mask); - - return &vpg3->base; -} - -static struct afmt *dcn321_afmt_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL); - - if (!afmt3) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT afmt_regs - afmt_regs_init(0), - afmt_regs_init(1), - afmt_regs_init(2), - afmt_regs_init(3), - afmt_regs_init(4), - afmt_regs_init(5); - - afmt3_construct(afmt3, ctx, inst, - &afmt_regs[inst], - &afmt_shift, - &afmt_mask); - - return &afmt3->base; -} - -static struct apg *dcn321_apg_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); - - if (!apg31) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT apg_regs - apg_regs_init(0), - apg_regs_init(1), - apg_regs_init(2), - apg_regs_init(3); - - apg31_construct(apg31, ctx, inst, - &apg_regs[inst], - &apg_shift, - &apg_mask); - - return &apg31->base; -} - -static struct stream_encoder *dcn321_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dcn10_stream_encoder *enc1; - struct vpg *vpg; - struct afmt *afmt; - int vpg_inst; - int afmt_inst; - - /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ - if (eng_id <= ENGINE_ID_DIGF) { - vpg_inst = eng_id; - afmt_inst = eng_id; - } else - return NULL; - - enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); - vpg = dcn321_vpg_create(ctx, vpg_inst); - afmt = dcn321_afmt_create(ctx, afmt_inst); - - if (!enc1 || !vpg || !afmt) { - kfree(enc1); - kfree(vpg); - kfree(afmt); - return NULL; - } - -#undef REG_STRUCT -#define REG_STRUCT stream_enc_regs - stream_enc_regs_init(0), - stream_enc_regs_init(1), - stream_enc_regs_init(2), - stream_enc_regs_init(3), - stream_enc_regs_init(4); - - dcn32_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, - eng_id, vpg, afmt, - &stream_enc_regs[eng_id], - &se_shift, &se_mask); - - return &enc1->base; -} - -static struct hpo_dp_stream_encoder *dcn321_hpo_dp_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; - struct vpg *vpg; - struct apg *apg; - uint32_t hpo_dp_inst; - uint32_t vpg_inst; - uint32_t apg_inst; - - ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); - hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; - - /* Mapping of VPG register blocks to HPO DP block instance: - * VPG[6] -> HPO_DP[0] - * VPG[7] -> HPO_DP[1] - * VPG[8] -> HPO_DP[2] - * VPG[9] -> HPO_DP[3] - */ - vpg_inst = hpo_dp_inst + 6; - - /* Mapping of APG register blocks to HPO DP block instance: - * APG[0] -> HPO_DP[0] - * APG[1] -> HPO_DP[1] - * APG[2] -> HPO_DP[2] - * APG[3] -> HPO_DP[3] - */ - apg_inst = hpo_dp_inst; - - /* allocate HPO stream encoder and create VPG sub-block */ - hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); - vpg = dcn321_vpg_create(ctx, vpg_inst); - apg = dcn321_apg_create(ctx, apg_inst); - - if (!hpo_dp_enc31 || !vpg || !apg) { - kfree(hpo_dp_enc31); - kfree(vpg); - kfree(apg); - return NULL; - } - -#undef REG_STRUCT -#define REG_STRUCT hpo_dp_stream_enc_regs - hpo_dp_stream_encoder_reg_init(0), - hpo_dp_stream_encoder_reg_init(1), - hpo_dp_stream_encoder_reg_init(2), - hpo_dp_stream_encoder_reg_init(3); - - dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, - hpo_dp_inst, eng_id, vpg, apg, - &hpo_dp_stream_enc_regs[hpo_dp_inst], - &hpo_dp_se_shift, &hpo_dp_se_mask); - - return &hpo_dp_enc31->base; -} - -static struct hpo_dp_link_encoder *dcn321_hpo_dp_link_encoder_create( - uint8_t inst, - struct dc_context *ctx) -{ - struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; - - /* allocate HPO link encoder */ - hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); - -#undef REG_STRUCT -#define REG_STRUCT hpo_dp_link_enc_regs - hpo_dp_link_encoder_reg_init(0), - hpo_dp_link_encoder_reg_init(1); - - hpo_dp_link_encoder32_construct(hpo_dp_enc31, ctx, inst, - &hpo_dp_link_enc_regs[inst], - &hpo_dp_le_shift, &hpo_dp_le_mask); - - return &hpo_dp_enc31->base; -} - -static struct dce_hwseq *dcn321_hwseq_create( - struct dc_context *ctx) -{ - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); - -#undef REG_STRUCT -#define REG_STRUCT hwseq_reg - hwseq_reg_init(); - - if (hws) { - hws->ctx = ctx; - hws->regs = &hwseq_reg; - hws->shifts = &hwseq_shift; - hws->masks = &hwseq_mask; - } - return hws; -} -static const struct resource_create_funcs res_create_funcs = { - .read_dce_straps = read_dce_straps, - .create_audio = dcn321_create_audio, - .create_stream_encoder = dcn321_stream_encoder_create, - .create_hpo_dp_stream_encoder = dcn321_hpo_dp_stream_encoder_create, - .create_hpo_dp_link_encoder = dcn321_hpo_dp_link_encoder_create, - .create_hwseq = dcn321_hwseq_create, -}; - -static void dcn321_resource_destruct(struct dcn321_resource_pool *pool) -{ - unsigned int i; - - for (i = 0; i < pool->base.stream_enc_count; i++) { - if (pool->base.stream_enc[i] != NULL) { - if (pool->base.stream_enc[i]->vpg != NULL) { - kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); - pool->base.stream_enc[i]->vpg = NULL; - } - if (pool->base.stream_enc[i]->afmt != NULL) { - kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); - pool->base.stream_enc[i]->afmt = NULL; - } - kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); - pool->base.stream_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { - if (pool->base.hpo_dp_stream_enc[i] != NULL) { - if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { - kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); - pool->base.hpo_dp_stream_enc[i]->vpg = NULL; - } - if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { - kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); - pool->base.hpo_dp_stream_enc[i]->apg = NULL; - } - kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); - pool->base.hpo_dp_stream_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { - if (pool->base.hpo_dp_link_enc[i] != NULL) { - kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); - pool->base.hpo_dp_link_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_dsc; i++) { - if (pool->base.dscs[i] != NULL) - dcn20_dsc_destroy(&pool->base.dscs[i]); - } - - if (pool->base.mpc != NULL) { - kfree(TO_DCN20_MPC(pool->base.mpc)); - pool->base.mpc = NULL; - } - if (pool->base.hubbub != NULL) { - kfree(TO_DCN20_HUBBUB(pool->base.hubbub)); - pool->base.hubbub = NULL; - } - for (i = 0; i < pool->base.pipe_count; i++) { - if (pool->base.dpps[i] != NULL) - dcn321_dpp_destroy(&pool->base.dpps[i]); - - if (pool->base.ipps[i] != NULL) - pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); - - if (pool->base.hubps[i] != NULL) { - kfree(TO_DCN20_HUBP(pool->base.hubps[i])); - pool->base.hubps[i] = NULL; - } - - if (pool->base.irqs != NULL) - dal_irq_service_destroy(&pool->base.irqs); - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - if (pool->base.engines[i] != NULL) - dce110_engine_destroy(&pool->base.engines[i]); - if (pool->base.hw_i2cs[i] != NULL) { - kfree(pool->base.hw_i2cs[i]); - pool->base.hw_i2cs[i] = NULL; - } - if (pool->base.sw_i2cs[i] != NULL) { - kfree(pool->base.sw_i2cs[i]); - pool->base.sw_i2cs[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - if (pool->base.opps[i] != NULL) - pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - if (pool->base.timing_generators[i] != NULL) { - kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); - pool->base.timing_generators[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_dwb; i++) { - if (pool->base.dwbc[i] != NULL) { - kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); - pool->base.dwbc[i] = NULL; - } - if (pool->base.mcif_wb[i] != NULL) { - kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); - pool->base.mcif_wb[i] = NULL; - } - } - - for (i = 0; i < pool->base.audio_count; i++) { - if (pool->base.audios[i]) - dce_aud_destroy(&pool->base.audios[i]); - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] != NULL) { - dcn20_clock_source_destroy(&pool->base.clock_sources[i]); - pool->base.clock_sources[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { - if (pool->base.mpc_lut[i] != NULL) { - dc_3dlut_func_release(pool->base.mpc_lut[i]); - pool->base.mpc_lut[i] = NULL; - } - if (pool->base.mpc_shaper[i] != NULL) { - dc_transfer_func_release(pool->base.mpc_shaper[i]); - pool->base.mpc_shaper[i] = NULL; - } - } - - if (pool->base.dp_clock_source != NULL) { - dcn20_clock_source_destroy(&pool->base.dp_clock_source); - pool->base.dp_clock_source = NULL; - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - if (pool->base.multiple_abms[i] != NULL) - dce_abm_destroy(&pool->base.multiple_abms[i]); - } - - if (pool->base.psr != NULL) - dmub_psr_destroy(&pool->base.psr); - - if (pool->base.dccg != NULL) - dcn_dccg_destroy(&pool->base.dccg); - - if (pool->base.oem_device != NULL) { - struct dc *dc = pool->base.oem_device->ctx->dc; - - dc->link_srv->destroy_ddc_service(&pool->base.oem_device); - } -} - - -static bool dcn321_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t dwb_count = pool->res_cap->num_dwb; - - for (i = 0; i < dwb_count; i++) { - struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), - GFP_KERNEL); - - if (!dwbc30) { - dm_error("DC: failed to create dwbc30!\n"); - return false; - } - -#undef REG_STRUCT -#define REG_STRUCT dwbc30_regs - dwbc_regs_dcn3_init(0); - - dcn30_dwbc_construct(dwbc30, ctx, - &dwbc30_regs[i], - &dwbc30_shift, - &dwbc30_mask, - i); - - pool->dwbc[i] = &dwbc30->base; - } - return true; -} - -static bool dcn321_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t dwb_count = pool->res_cap->num_dwb; - - for (i = 0; i < dwb_count; i++) { - struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), - GFP_KERNEL); - - if (!mcif_wb30) { - dm_error("DC: failed to create mcif_wb30!\n"); - return false; - } - -#undef REG_STRUCT -#define REG_STRUCT mcif_wb30_regs - mcif_wb_regs_dcn3_init(0); - - dcn32_mmhubbub_construct(mcif_wb30, ctx, - &mcif_wb30_regs[i], - &mcif_wb30_shift, - &mcif_wb30_mask, - i); - - pool->mcif_wb[i] = &mcif_wb30->base; - } - return true; -} - -static struct display_stream_compressor *dcn321_dsc_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_dsc *dsc = - kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); - - if (!dsc) { - BREAK_TO_DEBUGGER(); - return NULL; - } - -#undef REG_STRUCT -#define REG_STRUCT dsc_regs - dsc_regsDCN20_init(0), - dsc_regsDCN20_init(1), - dsc_regsDCN20_init(2), - dsc_regsDCN20_init(3); - - dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); - - dsc->max_image_width = 6016; - - return &dsc->base; -} - -static void dcn321_destroy_resource_pool(struct resource_pool **pool) -{ - struct dcn321_resource_pool *dcn321_pool = TO_DCN321_RES_POOL(*pool); - - dcn321_resource_destruct(dcn321_pool); - kfree(dcn321_pool); - *pool = NULL; -} - -static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn20_get_dcc_compression_cap, - .get_subvp_en = dcn32_subvp_in_use, -}; - -static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) -{ - DC_FP_START(); - dcn321_update_bw_bounding_box_fpu(dc, bw_params); - if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) - dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); - DC_FP_END(); -} - -static struct resource_funcs dcn321_res_pool_funcs = { - .destroy = dcn321_destroy_resource_pool, - .link_enc_create = dcn321_link_encoder_create, - .link_enc_create_minimal = NULL, - .panel_cntl_create = dcn32_panel_cntl_create, - .validate_bandwidth = dcn32_validate_bandwidth, - .calculate_wm_and_dlg = dcn32_calculate_wm_and_dlg, - .populate_dml_pipes = dcn32_populate_dml_pipes_from_context, - .acquire_free_pipe_as_secondary_dpp_pipe = dcn32_acquire_free_pipe_as_secondary_dpp_pipe, - .acquire_free_pipe_as_secondary_opp_head = dcn32_acquire_free_pipe_as_secondary_opp_head, - .release_pipe = dcn20_release_pipe, - .add_stream_to_ctx = dcn30_add_stream_to_ctx, - .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, - .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, - .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, - .set_mcif_arb_params = dcn30_set_mcif_arb_params, - .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, - .acquire_post_bldn_3dlut = dcn32_acquire_post_bldn_3dlut, - .release_post_bldn_3dlut = dcn32_release_post_bldn_3dlut, - .update_bw_bounding_box = dcn321_update_bw_bounding_box, - .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, - .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, - .add_phantom_pipes = dcn32_add_phantom_pipes, - .remove_phantom_pipes = dcn32_remove_phantom_pipes, - .retain_phantom_pipes = dcn32_retain_phantom_pipes, - .save_mall_state = dcn32_save_mall_state, - .restore_mall_state = dcn32_restore_mall_state, - .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params, -}; - -static uint32_t read_pipe_fuses(struct dc_context *ctx) -{ - uint32_t value = REG_READ(CC_DC_PIPE_DIS); - /* DCN321 support max 4 pipes */ - value = value & 0xf; - return value; -} - - -static bool dcn321_resource_construct( - uint8_t num_virtual_links, - struct dc *dc, - struct dcn321_resource_pool *pool) -{ - int i, j; - struct dc_context *ctx = dc->ctx; - struct irq_service_init_data init_data; - struct ddc_service_init_data ddc_init_data = {0}; - uint32_t pipe_fuses = 0; - uint32_t num_pipes = 4; - -#undef REG_STRUCT -#define REG_STRUCT bios_regs - bios_regs_init(); - -#undef REG_STRUCT -#define REG_STRUCT clk_src_regs - clk_src_regs_init(0, A), - clk_src_regs_init(1, B), - clk_src_regs_init(2, C), - clk_src_regs_init(3, D), - clk_src_regs_init(4, E); - -#undef REG_STRUCT -#define REG_STRUCT abm_regs - abm_regs_init(0), - abm_regs_init(1), - abm_regs_init(2), - abm_regs_init(3); - -#undef REG_STRUCT -#define REG_STRUCT dccg_regs - dccg_regs_init(); - - - ctx->dc_bios->regs = &bios_regs; - - pool->base.res_cap = &res_cap_dcn321; - /* max number of pipes for ASIC before checking for pipe fuses */ - num_pipes = pool->base.res_cap->num_timing_generator; - pipe_fuses = read_pipe_fuses(ctx); - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) - if (pipe_fuses & 1 << i) - num_pipes--; - - if (pipe_fuses & 1) - ASSERT(0); //Unexpected - Pipe 0 should always be fully functional! - - if (pipe_fuses & CC_DC_PIPE_DIS__DC_FULL_DIS_MASK) - ASSERT(0); //Entire DCN is harvested! - - /* within dml lib, initial value is hard coded, if ASIC pipe is fused, the - * value will be changed, update max_num_dpp and max_num_otg for dml. - */ - dcn3_21_ip.max_num_dpp = num_pipes; - dcn3_21_ip.max_num_otg = num_pipes; - - pool->base.funcs = &dcn321_res_pool_funcs; - - /************************************************* - * Resource + asic cap harcoding * - *************************************************/ - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - pool->base.timing_generator_count = num_pipes; - pool->base.pipe_count = num_pipes; - pool->base.mpcc_count = num_pipes; - dc->caps.max_downscale_ratio = 600; - dc->caps.i2c_speed_in_khz = 100; - dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/ - /* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/ - dc->caps.max_cursor_size = 64; - dc->caps.min_horizontal_blanking_period = 80; - dc->caps.dmdata_alloc_size = 2048; - dc->caps.mall_size_per_mem_channel = 4; - dc->caps.mall_size_total = 0; - dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; - dc->caps.cache_line_size = 64; - dc->caps.cache_num_ways = 16; - - /* Calculate the available MALL space */ - dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall( - dc, dc->ctx->dc_bios->vram_info.num_chans) * - dc->caps.mall_size_per_mem_channel * 1024 * 1024; - dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes; - - dc->caps.subvp_fw_processing_delay_us = 15; - dc->caps.subvp_drr_max_vblank_margin_us = 40; - dc->caps.subvp_prefetch_end_to_mall_start_us = 15; - dc->caps.subvp_swath_height_margin_lines = 16; - dc->caps.subvp_pstate_allow_width_us = 20; - dc->caps.subvp_vertical_int_margin_us = 30; - dc->caps.subvp_drr_vblank_start_margin_us = 100; // 100us margin - dc->caps.max_slave_planes = 2; - dc->caps.max_slave_yuv_planes = 2; - dc->caps.max_slave_rgb_planes = 2; - dc->caps.post_blend_color_processing = true; - dc->caps.force_dp_tps4_for_cp2520 = true; - dc->caps.dp_hpo = true; - dc->caps.dp_hdmi21_pcon_support = true; - dc->caps.edp_dsc_support = true; - dc->caps.extended_aux_timeout_support = true; - dc->caps.dmcub_support = true; - dc->caps.max_v_total = (1 << 15) - 1; - - /* Color pipeline capabilities */ - dc->caps.color.dpp.dcn_arch = 1; - dc->caps.color.dpp.input_lut_shared = 0; - dc->caps.color.dpp.icsc = 1; - dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr - dc->caps.color.dpp.dgam_rom_caps.srgb = 1; - dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; - dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; - dc->caps.color.dpp.dgam_rom_caps.pq = 1; - dc->caps.color.dpp.dgam_rom_caps.hlg = 1; - dc->caps.color.dpp.post_csc = 1; - dc->caps.color.dpp.gamma_corr = 1; - dc->caps.color.dpp.dgam_rom_for_yuv = 0; - - dc->caps.color.dpp.hw_3d_lut = 1; - dc->caps.color.dpp.ogam_ram = 1; - // no OGAM ROM on DCN2 and later ASICs - dc->caps.color.dpp.ogam_rom_caps.srgb = 0; - dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; - dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.dpp.ogam_rom_caps.pq = 0; - dc->caps.color.dpp.ogam_rom_caps.hlg = 0; - dc->caps.color.dpp.ocsc = 0; - - dc->caps.color.mpc.gamut_remap = 1; - dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //4, configurable to be before or after BLND in MPCC - dc->caps.color.mpc.ogam_ram = 1; - dc->caps.color.mpc.ogam_rom_caps.srgb = 0; - dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; - dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.mpc.ogam_rom_caps.pq = 0; - dc->caps.color.mpc.ogam_rom_caps.hlg = 0; - dc->caps.color.mpc.ocsc = 1; - - dc->config.dc_mode_clk_limit_support = true; - /* read VBIOS LTTPR caps */ - { - if (ctx->dc_bios->funcs->get_lttpr_caps) { - enum bp_result bp_query_result; - uint8_t is_vbios_lttpr_enable = 0; - - bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); - dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; - } - - /* interop bit is implicit */ - { - dc->caps.vbios_lttpr_aware = true; - } - } - - if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) - dc->debug = debug_defaults_drv; - - // Init the vm_helper - if (dc->vm_helper) - vm_helper_init(dc->vm_helper, 16); - - /************************************************* - * Create resources * - *************************************************/ - - /* Clock Sources for Pixel Clock*/ - pool->base.clock_sources[DCN321_CLK_SRC_PLL0] = - dcn321_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL0, - &clk_src_regs[0], false); - pool->base.clock_sources[DCN321_CLK_SRC_PLL1] = - dcn321_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL1, - &clk_src_regs[1], false); - pool->base.clock_sources[DCN321_CLK_SRC_PLL2] = - dcn321_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL2, - &clk_src_regs[2], false); - pool->base.clock_sources[DCN321_CLK_SRC_PLL3] = - dcn321_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL3, - &clk_src_regs[3], false); - pool->base.clock_sources[DCN321_CLK_SRC_PLL4] = - dcn321_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL4, - &clk_src_regs[4], false); - - pool->base.clk_src_count = DCN321_CLK_SRC_TOTAL; - - /* todo: not reuse phy_pll registers */ - pool->base.dp_clock_source = - dcn321_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_ID_DP_DTO, - &clk_src_regs[0], true); - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - - /* DCCG */ - pool->base.dccg = dccg32_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); - if (pool->base.dccg == NULL) { - dm_error("DC: failed to create dccg!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* DML */ - dml_init_instance(&dc->dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32); - - /* IRQ Service */ - init_data.ctx = dc->ctx; - pool->base.irqs = dal_irq_service_dcn32_create(&init_data); - if (!pool->base.irqs) - goto create_fail; - - /* HUBBUB */ - pool->base.hubbub = dcn321_hubbub_create(ctx); - if (pool->base.hubbub == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create hubbub!\n"); - goto create_fail; - } - - /* HUBPs, DPPs, OPPs, TGs, ABMs */ - for (i = 0, j = 0; i < pool->base.res_cap->num_timing_generator; i++) { - - /* if pipe is disabled, skip instance of HW pipe, - * i.e, skip ASIC register instance - */ - if (pipe_fuses & 1 << i) - continue; - - pool->base.hubps[j] = dcn321_hubp_create(ctx, i); - if (pool->base.hubps[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create hubps!\n"); - goto create_fail; - } - - pool->base.dpps[j] = dcn321_dpp_create(ctx, i); - if (pool->base.dpps[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create dpps!\n"); - goto create_fail; - } - - pool->base.opps[j] = dcn321_opp_create(ctx, i); - if (pool->base.opps[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create output pixel processor!\n"); - goto create_fail; - } - - pool->base.timing_generators[j] = dcn321_timing_generator_create( - ctx, i); - if (pool->base.timing_generators[j] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto create_fail; - } - - pool->base.multiple_abms[j] = dmub_abm_create(ctx, - &abm_regs[i], - &abm_shift, - &abm_mask); - if (pool->base.multiple_abms[j] == NULL) { - dm_error("DC: failed to create abm for pipe %d!\n", i); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* index for resource pool arrays for next valid pipe */ - j++; - } - - /* PSR */ - pool->base.psr = dmub_psr_create(ctx); - if (pool->base.psr == NULL) { - dm_error("DC: failed to create psr obj!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* MPCCs */ - pool->base.mpc = dcn321_mpc_create(ctx, pool->base.res_cap->num_timing_generator, pool->base.res_cap->num_mpc_3dlut); - if (pool->base.mpc == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mpc!\n"); - goto create_fail; - } - - /* DSCs */ - for (i = 0; i < pool->base.res_cap->num_dsc; i++) { - pool->base.dscs[i] = dcn321_dsc_create(ctx, i); - if (pool->base.dscs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create display stream compressor %d!\n", i); - goto create_fail; - } - } - - /* DWB */ - if (!dcn321_dwbc_create(ctx, &pool->base)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create dwbc!\n"); - goto create_fail; - } - - /* MMHUBBUB */ - if (!dcn321_mmhubbub_create(ctx, &pool->base)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mcif_wb!\n"); - goto create_fail; - } - - /* AUX and I2C */ - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - pool->base.engines[i] = dcn321_aux_engine_create(ctx, i); - if (pool->base.engines[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create aux engine!!\n"); - goto create_fail; - } - pool->base.hw_i2cs[i] = dcn321_i2c_hw_create(ctx, i); - if (pool->base.hw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create hw i2c!!\n"); - goto create_fail; - } - pool->base.sw_i2cs[i] = NULL; - } - - /* Audio, HWSeq, Stream Encoders including HPO and virtual, MPC 3D LUTs */ - if (!resource_construct(num_virtual_links, dc, &pool->base, - &res_create_funcs)) - goto create_fail; - - /* HW Sequencer init functions and Plane caps */ - dcn32_hw_sequencer_init_functions(dc); - - dc->caps.max_planes = pool->base.pipe_count; - - for (i = 0; i < dc->caps.max_planes; ++i) - dc->caps.planes[i] = plane_cap; - - dc->cap_funcs = cap_funcs; - - if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { - ddc_init_data.ctx = dc->ctx; - ddc_init_data.link = NULL; - ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; - ddc_init_data.id.enum_id = 0; - ddc_init_data.id.type = OBJECT_TYPE_GENERIC; - pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data); - } else { - pool->base.oem_device = NULL; - } - - dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = false; - dc->dml2_options.use_native_soc_bb_construction = true; - dc->dml2_options.minimize_dispclk_using_odm = true; - - dc->dml2_options.callbacks.dc = dc; - dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; - dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; - dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; - dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; - dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; - dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; - dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; - dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; - - dc->dml2_options.svp_pstate.callbacks.dc = dc; - dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context; - dc->dml2_options.svp_pstate.callbacks.add_stream_to_ctx = &dc_add_stream_to_ctx; - dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params; - dc->dml2_options.svp_pstate.callbacks.create_plane = &dc_create_plane_state; - dc->dml2_options.svp_pstate.callbacks.remove_plane_from_context = &dc_remove_plane_from_context; - dc->dml2_options.svp_pstate.callbacks.remove_stream_from_ctx = &dc_remove_stream_from_ctx; - dc->dml2_options.svp_pstate.callbacks.create_stream_for_sink = &dc_create_stream_for_sink; - dc->dml2_options.svp_pstate.callbacks.plane_state_release = &dc_plane_state_release; - dc->dml2_options.svp_pstate.callbacks.stream_release = &dc_stream_release; - dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc; - - dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us; - dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us; - dc->dml2_options.svp_pstate.subvp_pstate_allow_width_us = dc->caps.subvp_pstate_allow_width_us; - dc->dml2_options.svp_pstate.subvp_swath_height_margin_lines = dc->caps.subvp_swath_height_margin_lines; - - dc->dml2_options.svp_pstate.force_disable_subvp = dc->debug.force_disable_subvp; - dc->dml2_options.svp_pstate.force_enable_subvp = dc->debug.force_subvp_mclk_switch; - - dc->dml2_options.mall_cfg.cache_line_size_bytes = dc->caps.cache_line_size; - dc->dml2_options.mall_cfg.cache_num_ways = dc->caps.cache_num_ways; - dc->dml2_options.mall_cfg.max_cab_allocation_bytes = dc->caps.max_cab_allocation_bytes; - dc->dml2_options.mall_cfg.mblk_height_4bpe_pixels = DCN3_2_MBLK_HEIGHT_4BPE; - dc->dml2_options.mall_cfg.mblk_height_8bpe_pixels = DCN3_2_MBLK_HEIGHT_8BPE; - dc->dml2_options.mall_cfg.mblk_size_bytes = DCN3_2_MALL_MBLK_SIZE_BYTES; - dc->dml2_options.mall_cfg.mblk_width_pixels = DCN3_2_MBLK_WIDTH; - - dc->dml2_options.max_segments_per_hubp = 18; - dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE; - - return true; - -create_fail: - - dcn321_resource_destruct(pool); - - return false; -} - -struct resource_pool *dcn321_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc) -{ - struct dcn321_resource_pool *pool = - kzalloc(sizeof(struct dcn321_resource_pool), GFP_KERNEL); - - if (!pool) - return NULL; - - if (dcn321_resource_construct(init_data->num_virtual_links, dc, pool)) - return &pool->base; - - BREAK_TO_DEBUGGER(); - kfree(pool); - return NULL; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.h b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.h deleted file mode 100644 index 82cbf009f2..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef _DCN321_RESOURCE_H_ -#define _DCN321_RESOURCE_H_ - -#include "core_types.h" - -#define TO_DCN321_RES_POOL(pool)\ - container_of(pool, struct dcn321_resource_pool, base) - -extern struct _vcs_dpi_ip_params_st dcn3_21_ip; -extern struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc; - -struct dcn321_resource_pool { - struct resource_pool base; -}; - -struct resource_pool *dcn321_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc); - -#endif /* _DCN321_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile index 20d0eef1a1..0e317e0c36 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile @@ -10,9 +10,9 @@ # # Makefile for DCN35. -DCN35 = dcn35_resource.o dcn35_init.o dcn35_dio_stream_encoder.o \ - dcn35_dio_link_encoder.o dcn35_dccg.o dcn35_optc.o \ - dcn35_dsc.o dcn35_hubp.o dcn35_hubbub.o \ +DCN35 = dcn35_dio_stream_encoder.o \ + dcn35_dio_link_encoder.o dcn35_dccg.o \ + dcn35_hubp.o dcn35_hubbub.o \ dcn35_mmhubbub.o dcn35_opp.o dcn35_dpp.o dcn35_pg_cntl.o dcn35_dwb.o AMD_DAL_DCN35 = $(addprefix $(AMDDALPATH)/dc/dcn35/,$(DCN35)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c index 479f3683c0..f1ba7bb792 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c @@ -256,6 +256,21 @@ static void dccg35_set_dtbclk_dto( if (params->ref_dtbclk_khz && req_dtbclk_khz) { uint32_t modulo, phase; + switch (params->otg_inst) { + case 0: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 1); + break; + case 1: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, 1); + break; + case 2: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, 1); + break; + case 3: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, 1); + break; + } + // phase / modulo = dtbclk / dtbclk ref modulo = params->ref_dtbclk_khz * 1000; phase = req_dtbclk_khz * 1000; @@ -280,6 +295,21 @@ static void dccg35_set_dtbclk_dto( REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], PIPE_DTO_SRC_SEL[params->otg_inst], 2); } else { + switch (params->otg_inst) { + case 0: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 0); + break; + case 1: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, 0); + break; + case 2: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, 0); + break; + case 3: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, 0); + break; + } + REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[params->otg_inst], DTBCLK_DTO_ENABLE[params->otg_inst], 0, PIPE_DTO_SRC_SEL[params->otg_inst], params->is_hdmi ? 0 : 1); @@ -476,6 +506,64 @@ static void dccg35_dpp_root_clock_control( dccg->dpp_clock_gated[dpp_inst] = !clock_on; } +static void dccg35_disable_symclk32_se( + struct dccg *dccg, + int hpo_se_inst) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + /* set refclk as the source for symclk32_se */ + switch (hpo_se_inst) { + case 0: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE0_SRC_SEL, 0, + SYMCLK32_SE0_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE0_GATE_DISABLE, 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, +// SYMCLK32_ROOT_SE0_GATE_DISABLE, 0); + } + break; + case 1: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE1_SRC_SEL, 0, + SYMCLK32_SE1_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE1_GATE_DISABLE, 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, +// SYMCLK32_ROOT_SE1_GATE_DISABLE, 0); + } + break; + case 2: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE2_SRC_SEL, 0, + SYMCLK32_SE2_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE2_GATE_DISABLE, 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, +// SYMCLK32_ROOT_SE2_GATE_DISABLE, 0); + } + break; + case 3: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE3_SRC_SEL, 0, + SYMCLK32_SE3_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE3_GATE_DISABLE, 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, +// SYMCLK32_ROOT_SE3_GATE_DISABLE, 0); + } + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + void dccg35_init(struct dccg *dccg) { int otg_inst; @@ -484,7 +572,7 @@ void dccg35_init(struct dccg *dccg) * will cause DCN to hang. */ for (otg_inst = 0; otg_inst < 4; otg_inst++) - dccg31_disable_symclk32_se(dccg, otg_inst); + dccg35_disable_symclk32_se(dccg, otg_inst); if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) for (otg_inst = 0; otg_inst < 2; otg_inst++) @@ -758,7 +846,7 @@ static const struct dccg_funcs dccg35_funcs = { .dccg_init = dccg35_init, .set_dpstreamclk = dccg35_set_dpstreamclk, .enable_symclk32_se = dccg31_enable_symclk32_se, - .disable_symclk32_se = dccg31_disable_symclk32_se, + .disable_symclk32_se = dccg35_disable_symclk32_se, .enable_symclk32_le = dccg31_enable_symclk32_le, .disable_symclk32_le = dccg31_disable_symclk32_le, .set_symclk32_le_root_clock_gating = dccg31_set_symclk32_le_root_clock_gating, diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h index 423feb4c2f..1586a45ca3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h @@ -34,6 +34,8 @@ #define DCCG_REG_LIST_DCN35() \ DCCG_REG_LIST_DCN314(),\ SR(DPPCLK_CTRL),\ + SR(DCCG_GATE_DISABLE_CNTL4),\ + SR(DCCG_GATE_DISABLE_CNTL5),\ SR(DCCG_GATE_DISABLE_CNTL6),\ SR(DCCG_GLOBAL_FGCG_REP_CNTL),\ SR(SYMCLKA_CLOCK_ENABLE),\ @@ -174,7 +176,61 @@ DCCG_SF(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_SRC_SEL, mask_sh),\ DCCG_SF(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_SRC_SEL, mask_sh),\ DCCG_SF(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_SRC_SEL, mask_sh),\ - DCCG_SF(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_SRC_SEL, mask_sh) + DCCG_SF(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_SRC_SEL, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_FE_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_FE_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_FE_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, HDMICHARCLK0_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL6, HDMISTREAMCLK0_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE1_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE2_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE3_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE1_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE1_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE2_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE3_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE1_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYA_REFCLK_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYB_REFCLK_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYC_REFCLK_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYD_REFCLK_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYE_REFCLK_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_PHASE, mask_sh),\ + DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL, DISPCLK_DCCG_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, HDMISTREAMCLK0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\ struct dccg *dccg35_create( struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c index 81e349d583..da94e5309f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c @@ -184,6 +184,8 @@ void dcn35_link_encoder_construct( enc10->base.hpd_source = init_data->hpd_source; enc10->base.connector = init_data->connector; + if (enc10->base.connector.id == CONNECTOR_ID_USBC) + enc10->base.features.flags.bits.DP_IS_USB_C = 1; enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; @@ -238,8 +240,6 @@ void dcn35_link_encoder_construct( } enc10->base.features.flags.bits.HDMI_6GB_EN = 1; - if (enc10->base.connector.id == CONNECTOR_ID_USBC) - enc10->base.features.flags.bits.DP_IS_USB_C = 1; if (bp_funcs->get_connector_speed_cap_info) result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios, diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c deleted file mode 100644 index 71d2dff998..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c +++ /dev/null @@ -1,60 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright 2023 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dcn35_dsc.h" -#include "reg_helper.h" - -/* Macro definitios for REG_SET macros*/ -#define CTX \ - dsc20->base.ctx - -#define REG(reg)\ - dsc20->dsc_regs->reg - -#undef FN -#define FN(reg_name, field_name) \ - ((const struct dcn35_dsc_shift *)(dsc20->dsc_shift))->field_name, \ - ((const struct dcn35_dsc_mask *)(dsc20->dsc_mask))->field_name - -#define DC_LOGGER \ - dsc->ctx->logger - -void dsc35_construct(struct dcn20_dsc *dsc, - struct dc_context *ctx, - int inst, - const struct dcn20_dsc_registers *dsc_regs, - const struct dcn35_dsc_shift *dsc_shift, - const struct dcn35_dsc_mask *dsc_mask) -{ - dsc2_construct(dsc, ctx, inst, dsc_regs, - (const struct dcn20_dsc_shift *)(dsc_shift), - (const struct dcn20_dsc_mask *)(dsc_mask)); -} - -void dsc35_set_fgcg(struct dcn20_dsc *dsc20, bool enable) -{ - REG_UPDATE(DSC_TOP_CONTROL, DSC_FGCG_REP_DIS, !enable); -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h deleted file mode 100644 index 133ad38842..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h +++ /dev/null @@ -1,59 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright 2023 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DCN35_DSC_H__ -#define __DCN35_DSC_H__ - -#include "dcn20/dcn20_dsc.h" - -#define DSC_REG_LIST_SH_MASK_DCN35(mask_sh) \ - DSC_REG_LIST_SH_MASK_DCN20(mask_sh), \ - DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_FGCG_REP_DIS, mask_sh) - -#define DSC_FIELD_LIST_DCN35(type) \ - struct { \ - DSC_FIELD_LIST_DCN20(type); \ - type DSC_FGCG_REP_DIS; \ - } - -struct dcn35_dsc_shift { - DSC_FIELD_LIST_DCN35(uint8_t); -}; - -struct dcn35_dsc_mask { - DSC_FIELD_LIST_DCN35(uint32_t); -}; - -void dsc35_construct(struct dcn20_dsc *dsc, - struct dc_context *ctx, - int inst, - const struct dcn20_dsc_registers *dsc_regs, - const struct dcn35_dsc_shift *dsc_shift, - const struct dcn35_dsc_mask *dsc_mask); - -void dsc35_set_fgcg(struct dcn20_dsc *dsc20, bool enable); - -#endif /* __DCN35_DSC_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c deleted file mode 100644 index 296bf3a38c..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c +++ /dev/null @@ -1,171 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright 2023 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dce110/dce110_hwseq.h" -#include "dcn10/dcn10_hwseq.h" -#include "dcn20/dcn20_hwseq.h" -#include "dcn21/dcn21_hwseq.h" -#include "dcn30/dcn30_hwseq.h" -#include "dcn301/dcn301_hwseq.h" -#include "dcn31/dcn31_hwseq.h" -#include "dcn32/dcn32_hwseq.h" -#include "dcn35/dcn35_hwseq.h" - -#include "dcn35_init.h" - -static const struct hw_sequencer_funcs dcn35_funcs = { - .program_gamut_remap = dcn30_program_gamut_remap, - .init_hw = dcn35_init_hw, - .power_down_on_boot = dcn35_power_down_on_boot, - .apply_ctx_to_hw = dce110_apply_ctx_to_hw, - .apply_ctx_for_surface = NULL, - .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, - .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, - .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, - .update_plane_addr = dcn20_update_plane_addr, - .update_dchub = dcn10_update_dchub, - .update_pending_status = dcn10_update_pending_status, - .program_output_csc = dcn20_program_output_csc, - .enable_accelerated_mode = dce110_enable_accelerated_mode, - .enable_timing_synchronization = dcn10_enable_timing_synchronization, - .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, - .update_info_frame = dcn31_update_info_frame, - .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, - .enable_stream = dcn20_enable_stream, - .disable_stream = dce110_disable_stream, - .unblank_stream = dcn32_unblank_stream, - .blank_stream = dce110_blank_stream, - .enable_audio_stream = dce110_enable_audio_stream, - .disable_audio_stream = dce110_disable_audio_stream, - .disable_plane = dcn35_disable_plane, - .disable_pixel_data = dcn20_disable_pixel_data, - .pipe_control_lock = dcn20_pipe_control_lock, - .interdependent_update_lock = dcn10_lock_all_pipes, - .cursor_lock = dcn10_cursor_lock, - .prepare_bandwidth = dcn35_prepare_bandwidth, - .optimize_bandwidth = dcn35_optimize_bandwidth, - .update_bandwidth = dcn20_update_bandwidth, - .set_drr = dcn10_set_drr, - .get_position = dcn10_get_position, - .set_static_screen_control = dcn30_set_static_screen_control, - .setup_stereo = dcn10_setup_stereo, - .set_avmute = dcn30_set_avmute, - .log_hw_state = dcn10_log_hw_state, - .get_hw_state = dcn10_get_hw_state, - .clear_status_bits = dcn10_clear_status_bits, - .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, - .edp_backlight_control = dce110_edp_backlight_control, - .edp_power_control = dce110_edp_power_control, - .edp_wait_for_T12 = dce110_edp_wait_for_T12, - .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, - .set_cursor_position = dcn10_set_cursor_position, - .set_cursor_attribute = dcn10_set_cursor_attribute, - .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, - .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, - .set_clock = dcn10_set_clock, - .get_clock = dcn10_get_clock, - .program_triplebuffer = dcn20_program_triple_buffer, - .enable_writeback = dcn30_enable_writeback, - .disable_writeback = dcn30_disable_writeback, - .update_writeback = dcn30_update_writeback, - .mmhubbub_warmup = dcn30_mmhubbub_warmup, - .dmdata_status_done = dcn20_dmdata_status_done, - .program_dmdata_engine = dcn30_program_dmdata_engine, - .set_dmdata_attributes = dcn20_set_dmdata_attributes, - .init_sys_ctx = dcn31_init_sys_ctx, - .init_vm_ctx = dcn20_init_vm_ctx, - .set_flip_control_gsl = dcn20_set_flip_control_gsl, - .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, - .calc_vupdate_position = dcn10_calc_vupdate_position, - .power_down = dce110_power_down, - .set_backlight_level = dcn21_set_backlight_level, - .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, - .set_pipe = dcn21_set_pipe, - .enable_lvds_link_output = dce110_enable_lvds_link_output, - .enable_tmds_link_output = dce110_enable_tmds_link_output, - .enable_dp_link_output = dce110_enable_dp_link_output, - .disable_link_output = dcn32_disable_link_output, - .z10_restore = dcn35_z10_restore, - .z10_save_init = dcn31_z10_save_init, - .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, - .optimize_pwr_state = dcn21_optimize_pwr_state, - .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, - .update_visual_confirm_color = dcn10_update_visual_confirm_color, - .apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations, - .update_dsc_pg = dcn32_update_dsc_pg, - .calc_blocks_to_gate = dcn35_calc_blocks_to_gate, - .calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate, - .block_power_control = dcn35_block_power_control, - .root_clock_control = dcn35_root_clock_control, - .set_idle_state = dcn35_set_idle_state, - .get_idle_state = dcn35_get_idle_state -}; - -static const struct hwseq_private_funcs dcn35_private_funcs = { - .init_pipes = dcn35_init_pipes, - .update_plane_addr = dcn20_update_plane_addr, - .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, - .update_mpcc = dcn20_update_mpcc, - .set_input_transfer_func = dcn32_set_input_transfer_func, - .set_output_transfer_func = dcn32_set_output_transfer_func, - .power_down = dce110_power_down, - .enable_display_power_gating = dcn10_dummy_display_power_gating, - .blank_pixel_data = dcn20_blank_pixel_data, - .reset_hw_ctx_wrap = dcn31_reset_hw_ctx_wrap, - .enable_stream_timing = dcn20_enable_stream_timing, - .edp_backlight_control = dce110_edp_backlight_control, - .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, - .did_underflow_occur = dcn10_did_underflow_occur, - .init_blank = dcn20_init_blank, - .disable_vga = NULL, - .bios_golden_init = dcn10_bios_golden_init, - .plane_atomic_disable = dcn35_plane_atomic_disable, - //.plane_atomic_disable = dcn20_plane_atomic_disable,/*todo*/ - //.hubp_pg_control = dcn35_hubp_pg_control, - .enable_power_gating_plane = dcn35_enable_power_gating_plane, - .dpp_root_clock_control = dcn35_dpp_root_clock_control, - .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, - .update_odm = dcn35_update_odm, - .set_hdr_multiplier = dcn10_set_hdr_multiplier, - .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, - .wait_for_blank_complete = dcn20_wait_for_blank_complete, - .dccg_init = dcn20_dccg_init, - .set_mcm_luts = dcn32_set_mcm_luts, - .setup_hpo_hw_control = dcn35_setup_hpo_hw_control, - .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values, - .set_pixels_per_cycle = dcn32_set_pixels_per_cycle, - .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, - .dsc_pg_control = dcn35_dsc_pg_control, - .dsc_pg_status = dcn32_dsc_pg_status, - .enable_plane = dcn35_enable_plane, -}; - -void dcn35_hw_sequencer_construct(struct dc *dc) -{ - dc->hwss = dcn35_funcs; - dc->hwseq->funcs = dcn35_private_funcs; - -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h deleted file mode 100644 index b67015032c..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h +++ /dev/null @@ -1,34 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright 2023 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_DCN35_INIT_H__ -#define __DC_DCN35_INIT_H__ - -struct dc; - -void dcn35_hw_sequencer_construct(struct dc *dc); - -#endif /* __DC_DCN35_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c deleted file mode 100644 index 5b15475088..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c +++ /dev/null @@ -1,300 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright 2023 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dcn35_optc.h" - -#include "dcn30/dcn30_optc.h" -#include "dcn31/dcn31_optc.h" -#include "dcn32/dcn32_optc.h" -#include "reg_helper.h" -#include "dc.h" -#include "dcn_calc_math.h" - -#define REG(reg)\ - optc1->tg_regs->reg - -#define CTX \ - optc1->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - optc1->tg_shift->field_name, optc1->tg_mask->field_name - -/** - * optc35_set_odm_combine() - Enable CRTC - call ASIC Control Object to enable Timing generator. - * - * @optc: Output Pipe Timing Combine instance reference. - * @opp_id: Output Plane Processor instance ID. - * @opp_cnt: Output Plane Processor count. - * @timing: Timing parameters used to configure DCN blocks. - * - * Return: void. - */ -static void optc35_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, - struct dc_crtc_timing *timing) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - uint32_t memory_mask = 0; - int h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right; - int mpcc_hactive = h_active / opp_cnt; - /* Each memory instance is 2048x(314x2) bits to support half line of 4096 */ - int odm_mem_count = (h_active + 2047) / 2048; - - /* - * display <= 4k : 2 memories + 2 pipes - * 4k < display <= 8k : 4 memories + 2 pipes - * 8k < display <= 12k : 6 memories + 4 pipes - */ - if (opp_cnt == 4) { - if (odm_mem_count <= 2) - memory_mask = 0x3; - else if (odm_mem_count <= 4) - memory_mask = 0xf; - else - memory_mask = 0x3f; - } else { - if (odm_mem_count <= 2) - memory_mask = 0x1 << (opp_id[0] * 2) | 0x1 << (opp_id[1] * 2); - else if (odm_mem_count <= 4) - memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2); - else - memory_mask = 0x77; - } - - REG_SET(OPTC_MEMORY_CONFIG, 0, - OPTC_MEM_SEL, memory_mask); - - if (opp_cnt == 2) { - REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0, - OPTC_NUM_OF_INPUT_SEGMENT, 1, - OPTC_SEG0_SRC_SEL, opp_id[0], - OPTC_SEG1_SRC_SEL, opp_id[1]); - } else if (opp_cnt == 4) { - REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, - OPTC_NUM_OF_INPUT_SEGMENT, 3, - OPTC_SEG0_SRC_SEL, opp_id[0], - OPTC_SEG1_SRC_SEL, opp_id[1], - OPTC_SEG2_SRC_SEL, opp_id[2], - OPTC_SEG3_SRC_SEL, opp_id[3]); - } - - REG_UPDATE(OPTC_WIDTH_CONTROL, - OPTC_SEGMENT_WIDTH, mpcc_hactive); - - REG_UPDATE(OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, opp_cnt - 1); - optc1->opp_count = opp_cnt; -} - -static bool optc35_enable_crtc(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - /* opp instance for OTG, 1 to 1 mapping and odm will adjust */ - REG_UPDATE(OPTC_DATA_SOURCE_SELECT, - OPTC_SEG0_SRC_SEL, optc->inst); - - /* VTG enable first is for HW workaround */ - REG_UPDATE(CONTROL, - VTG0_ENABLE, 1); - - REG_SEQ_START(); - - /* Enable CRTC */ - REG_UPDATE_2(OTG_CONTROL, - OTG_DISABLE_POINT_CNTL, 2, - OTG_MASTER_EN, 1); - - REG_SEQ_SUBMIT(); - REG_SEQ_WAIT_DONE(); - - return true; -} - -/* disable_crtc */ -static bool optc35_disable_crtc(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT, - OPTC_SEG0_SRC_SEL, 0xf, - OPTC_SEG1_SRC_SEL, 0xf, - OPTC_SEG2_SRC_SEL, 0xf, - OPTC_SEG3_SRC_SEL, 0xf, - OPTC_NUM_OF_INPUT_SEGMENT, 0); - - REG_UPDATE(OPTC_MEMORY_CONFIG, - OPTC_MEM_SEL, 0); - - /* disable otg request until end of the first line - * in the vertical blank region - */ - REG_UPDATE(OTG_CONTROL, - OTG_MASTER_EN, 0); - - REG_UPDATE(CONTROL, - VTG0_ENABLE, 0); - - /* CRTC disabled, so disable clock. */ - REG_WAIT(OTG_CLOCK_CONTROL, - OTG_BUSY, 0, - 1, 100000); - optc1_clear_optc_underflow(optc); - - return true; -} - -static void optc35_phantom_crtc_post_enable(struct timing_generator *optc) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - /* Disable immediately. */ - REG_UPDATE_2(OTG_CONTROL, OTG_DISABLE_POINT_CNTL, 0, OTG_MASTER_EN, 0); - - /* CRTC disabled, so disable clock. */ - REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000); -} - -static bool optc35_configure_crc(struct timing_generator *optc, - const struct crc_params *params) -{ - struct optc *optc1 = DCN10TG_FROM_TG(optc); - - if (!optc1_is_tg_enabled(optc)) - return false; - REG_WRITE(OTG_CRC_CNTL, 0); - if (!params->enable) - return true; - REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL, - OTG_CRC0_WINDOWA_X_START, params->windowa_x_start, - OTG_CRC0_WINDOWA_X_END, params->windowa_x_end); - REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL, - OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start, - OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end); - REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL, - OTG_CRC0_WINDOWB_X_START, params->windowb_x_start, - OTG_CRC0_WINDOWB_X_END, params->windowb_x_end); - REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL, - OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start, - OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end); - if (optc1->base.ctx->dc->debug.otg_crc_db && optc1->tg_mask->OTG_CRC_WINDOW_DB_EN != 0) { - REG_UPDATE_4(OTG_CRC_CNTL, - OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0, - OTG_CRC0_SELECT, params->selection, - OTG_CRC_EN, 1, - OTG_CRC_WINDOW_DB_EN, 1); - } else - REG_UPDATE_3(OTG_CRC_CNTL, - OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0, - OTG_CRC0_SELECT, params->selection, - OTG_CRC_EN, 1); - return true; -} - -static struct timing_generator_funcs dcn35_tg_funcs = { - .validate_timing = optc1_validate_timing, - .program_timing = optc1_program_timing, - .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, - .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, - .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, - .program_global_sync = optc1_program_global_sync, - .enable_crtc = optc35_enable_crtc, - .disable_crtc = optc35_disable_crtc, - .immediate_disable_crtc = optc31_immediate_disable_crtc, - .phantom_crtc_post_enable = optc35_phantom_crtc_post_enable, - /* used by enable_timing_synchronization. Not need for FPGA */ - .is_counter_moving = optc1_is_counter_moving, - .get_position = optc1_get_position, - .get_frame_count = optc1_get_vblank_counter, - .get_scanoutpos = optc1_get_crtc_scanoutpos, - .get_otg_active_size = optc1_get_otg_active_size, - .set_early_control = optc1_set_early_control, - /* used by enable_timing_synchronization. Not need for FPGA */ - .wait_for_state = optc1_wait_for_state, - .set_blank_color = optc3_program_blank_color, - .did_triggered_reset_occur = optc1_did_triggered_reset_occur, - .triplebuffer_lock = optc3_triplebuffer_lock, - .triplebuffer_unlock = optc2_triplebuffer_unlock, - .enable_reset_trigger = optc1_enable_reset_trigger, - .enable_crtc_reset = optc1_enable_crtc_reset, - .disable_reset_trigger = optc1_disable_reset_trigger, - .lock = optc3_lock, - .unlock = optc1_unlock, - .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, - .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, - .enable_optc_clock = optc1_enable_optc_clock, - .set_drr = optc31_set_drr, - .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, - .set_vtotal_min_max = optc1_set_vtotal_min_max, - .set_static_screen_control = optc1_set_static_screen_control, - .program_stereo = optc1_program_stereo, - .is_stereo_left_eye = optc1_is_stereo_left_eye, - .tg_init = optc3_tg_init, - .is_tg_enabled = optc1_is_tg_enabled, - .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, - .clear_optc_underflow = optc1_clear_optc_underflow, - .setup_global_swap_lock = NULL, - .get_crc = optc1_get_crc, - .configure_crc = optc35_configure_crc, - .set_dsc_config = optc3_set_dsc_config, - .get_dsc_status = optc2_get_dsc_status, - .set_dwb_source = NULL, - .set_odm_bypass = optc32_set_odm_bypass, - .set_odm_combine = optc35_set_odm_combine, - .get_optc_source = optc2_get_optc_source, - .set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode, - .set_out_mux = optc3_set_out_mux, - .set_drr_trigger_window = optc3_set_drr_trigger_window, - .set_vtotal_change_limit = optc3_set_vtotal_change_limit, - .set_gsl = optc2_set_gsl, - .set_gsl_source_select = optc2_set_gsl_source_select, - .set_vtg_params = optc1_set_vtg_params, - .program_manual_trigger = optc2_program_manual_trigger, - .setup_manual_trigger = optc2_setup_manual_trigger, - .get_hw_timing = optc1_get_hw_timing, - .init_odm = optc3_init_odm, -}; - -void dcn35_timing_generator_init(struct optc *optc1) -{ - optc1->base.funcs = &dcn35_tg_funcs; - - optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; - optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; - - optc1->min_h_blank = 32; - optc1->min_v_blank = 3; - optc1->min_v_blank_interlace = 5; - optc1->min_h_sync_width = 4; - optc1->min_v_sync_width = 1; - - dcn35_timing_generator_set_fgcg( - optc1, CTX->dc->debug.enable_fine_grain_clock_gating.bits.optc); -} - -void dcn35_timing_generator_set_fgcg(struct optc *optc1, bool enable) -{ - REG_UPDATE(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, !enable); -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h deleted file mode 100644 index 1f422e4c46..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h +++ /dev/null @@ -1,74 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright 2023 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_OPTC_DCN35_H__ -#define __DC_OPTC_DCN35_H__ - -#include "dcn10/dcn10_optc.h" -#include "dcn32/dcn32_optc.h" -#define OPTC_COMMON_MASK_SH_LIST_DCN3_5(mask_sh)\ - OPTC_COMMON_MASK_SH_LIST_DCN3_2(mask_sh),\ - SF(OTG0_OTG_CRC_CNTL, OTG_CRC_WINDOW_DB_EN, mask_sh),\ - SF(OTG0_OTG_CRC1_DATA_RG, CRC1_R_CR, mask_sh),\ - SF(OTG0_OTG_CRC1_DATA_RG, CRC1_G_Y, mask_sh),\ - SF(OTG0_OTG_CRC1_DATA_B, CRC1_B_CB, mask_sh),\ - SF(OTG0_OTG_CRC2_DATA_RG, CRC2_R_CR, mask_sh),\ - SF(OTG0_OTG_CRC2_DATA_RG, CRC2_G_Y, mask_sh),\ - SF(OTG0_OTG_CRC2_DATA_B, CRC2_B_CB, mask_sh),\ - SF(OTG0_OTG_CRC3_DATA_RG, CRC3_R_CR, mask_sh),\ - SF(OTG0_OTG_CRC3_DATA_RG, CRC3_G_Y, mask_sh),\ - SF(OTG0_OTG_CRC3_DATA_B, CRC3_B_CB, mask_sh),\ - SF(OTG0_OTG_CRC1_WINDOWA_X_CONTROL, OTG_CRC1_WINDOWA_X_START, mask_sh),\ - SF(OTG0_OTG_CRC1_WINDOWA_X_CONTROL, OTG_CRC1_WINDOWA_X_END, mask_sh),\ - SF(OTG0_OTG_CRC1_WINDOWA_Y_CONTROL, OTG_CRC1_WINDOWA_Y_START, mask_sh),\ - SF(OTG0_OTG_CRC1_WINDOWA_Y_CONTROL, OTG_CRC1_WINDOWA_Y_END, mask_sh),\ - SF(OTG0_OTG_CRC1_WINDOWB_X_CONTROL, OTG_CRC1_WINDOWB_X_START, mask_sh),\ - SF(OTG0_OTG_CRC1_WINDOWB_X_CONTROL, OTG_CRC1_WINDOWB_X_END, mask_sh),\ - SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL, OTG_CRC1_WINDOWB_Y_START, mask_sh),\ - SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL, OTG_CRC1_WINDOWB_Y_END, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK, OTG_CRC0_WINDOWA_X_START_READBACK, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK, OTG_CRC0_WINDOWA_X_END_READBACK, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK, OTG_CRC0_WINDOWA_Y_START_READBACK, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK, OTG_CRC0_WINDOWA_Y_END_READBACK, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK, OTG_CRC0_WINDOWB_X_START_READBACK, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK, OTG_CRC0_WINDOWB_X_END_READBACK, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK, OTG_CRC0_WINDOWB_Y_START_READBACK, mask_sh),\ - SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK, OTG_CRC0_WINDOWB_Y_END_READBACK, mask_sh),\ - SF(OTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK, OTG_CRC1_WINDOWA_X_START_READBACK, mask_sh),\ - SF(OTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK, OTG_CRC1_WINDOWA_X_END_READBACK, mask_sh),\ - SF(OTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK, OTG_CRC1_WINDOWA_Y_START_READBACK, mask_sh),\ - SF(OTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK, OTG_CRC1_WINDOWA_Y_END_READBACK, mask_sh),\ - SF(OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK, OTG_CRC1_WINDOWB_X_START_READBACK, mask_sh),\ - SF(OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK, OTG_CRC1_WINDOWB_X_END_READBACK, mask_sh),\ - SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG_CRC1_WINDOWB_Y_START_READBACK, mask_sh),\ - SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG_CRC1_WINDOWB_Y_END_READBACK, mask_sh),\ - SF(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, mask_sh) - -void dcn35_timing_generator_init(struct optc *optc1); - -void dcn35_timing_generator_set_fgcg(struct optc *optc1, bool enable); - -#endif /* __DC_OPTC_DCN35_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c index d19db8e9b8..53bd0ae4ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c @@ -342,13 +342,6 @@ void pg_cntl35_io_clk_pg_control(struct pg_cntl *pg_cntl, bool power_on) pg_cntl->pg_res_enable[PG_DCIO] = power_on; } -void pg_cntl35_set_force_poweron_domain22(struct pg_cntl *pg_cntl, bool power_on) -{ - struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl); - - REG_UPDATE(DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, power_on ? 1 : 0); -} - static bool pg_cntl35_plane_otg_status(struct pg_cntl *pg_cntl) { struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl); @@ -518,8 +511,7 @@ static const struct pg_cntl_funcs pg_cntl35_funcs = { .mpcc_pg_control = pg_cntl35_mpcc_pg_control, .opp_pg_control = pg_cntl35_opp_pg_control, .optc_pg_control = pg_cntl35_optc_pg_control, - .dwb_pg_control = pg_cntl35_dwb_pg_control, - .set_force_poweron_domain22 = pg_cntl35_set_force_poweron_domain22 + .dwb_pg_control = pg_cntl35_dwb_pg_control }; struct pg_cntl *pg_cntl35_create( diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h index 069dae08e2..3de240884d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h @@ -183,7 +183,6 @@ void pg_cntl35_optc_pg_control(struct pg_cntl *pg_cntl, unsigned int optc_inst, bool power_on); void pg_cntl35_dwb_pg_control(struct pg_cntl *pg_cntl, bool power_on); void pg_cntl35_init_pg_status(struct pg_cntl *pg_cntl); -void pg_cntl35_set_force_poweron_domain22(struct pg_cntl *pg_cntl, bool power_on); struct pg_cntl *pg_cntl35_create( struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c deleted file mode 100644 index 70ef1e7ff8..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c +++ /dev/null @@ -1,2148 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright 2023 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" -#include "dc.h" - -#include "dcn31/dcn31_init.h" -#include "dcn35/dcn35_init.h" - -#include "resource.h" -#include "include/irq_service_interface.h" -#include "dcn35_resource.h" -#include "dml2/dml2_wrapper.h" - -#include "dcn20/dcn20_resource.h" -#include "dcn30/dcn30_resource.h" -#include "dcn31/dcn31_resource.h" -#include "dcn32/dcn32_resource.h" - -#include "dcn10/dcn10_ipp.h" -#include "dcn30/dcn30_hubbub.h" -#include "dcn31/dcn31_hubbub.h" -#include "dcn35/dcn35_hubbub.h" -#include "dcn32/dcn32_mpc.h" -#include "dcn35/dcn35_hubp.h" -#include "irq/dcn35/irq_service_dcn35.h" -#include "dcn35/dcn35_dpp.h" -#include "dcn35/dcn35_optc.h" -#include "dcn20/dcn20_hwseq.h" -#include "dcn30/dcn30_hwseq.h" -#include "dce110/dce110_hwseq.h" -#include "dcn35/dcn35_opp.h" -#include "dcn35/dcn35_dsc.h" -#include "dcn30/dcn30_vpg.h" -#include "dcn30/dcn30_afmt.h" -#include "dcn31/dcn31_dio_link_encoder.h" -#include "dcn35/dcn35_dio_stream_encoder.h" -#include "dcn31/dcn31_hpo_dp_stream_encoder.h" -#include "dcn31/dcn31_hpo_dp_link_encoder.h" -#include "dcn32/dcn32_hpo_dp_link_encoder.h" -#include "link.h" -#include "dcn31/dcn31_apg.h" -#include "dcn32/dcn32_dio_link_encoder.h" -#include "dcn31/dcn31_vpg.h" -#include "dcn31/dcn31_afmt.h" -#include "dce/dce_clock_source.h" -#include "dce/dce_audio.h" -#include "dce/dce_hwseq.h" -#include "clk_mgr.h" -#include "virtual/virtual_stream_encoder.h" -#include "dce110/dce110_resource.h" -#include "dml/display_mode_vba.h" -#include "dcn35/dcn35_dccg.h" -#include "dcn35/dcn35_pg_cntl.h" -#include "dcn10/dcn10_resource.h" -#include "dcn31/dcn31_panel_cntl.h" -#include "dcn35/dcn35_hwseq.h" -#include "dcn35_dio_link_encoder.h" -#include "dml/dcn31/dcn31_fpu.h" /*todo*/ -#include "dml/dcn35/dcn35_fpu.h" -#include "dcn35/dcn35_dwb.h" -#include "dcn35/dcn35_mmhubbub.h" - -#include "dcn/dcn_3_5_0_offset.h" -#include "dcn/dcn_3_5_0_sh_mask.h" -#include "nbio/nbio_7_11_0_offset.h" -#include "mmhub/mmhub_3_3_0_offset.h" -#include "mmhub/mmhub_3_3_0_sh_mask.h" - -#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0 -#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL - -#include "reg_helper.h" -#include "dce/dmub_abm.h" -#include "dce/dmub_psr.h" -#include "dce/dce_aux.h" -#include "dce/dce_i2c.h" -#include "dml/dcn31/display_mode_vba_31.h" /*temp*/ -#include "vm_helper.h" -#include "dcn20/dcn20_vmid.h" - -#include "link_enc_cfg.h" -#define DC_LOGGER_INIT(logger) - -enum dcn35_clk_src_array_id { - DCN35_CLK_SRC_PLL0, - DCN35_CLK_SRC_PLL1, - DCN35_CLK_SRC_PLL2, - DCN35_CLK_SRC_PLL3, - DCN35_CLK_SRC_PLL4, - DCN35_CLK_SRC_TOTAL -}; - -/* begin ********************* - * macros to expend register list macro defined in HW object header file - */ - -/* DCN */ -/* TODO awful hack. fixup dcn20_dwb.h */ -#undef BASE_INNER -#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg] - -#define BASE(seg) BASE_INNER(seg) - -#define SR(reg_name)\ - REG_STRUCT.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ - reg ## reg_name - -#define SR_ARR(reg_name, id) \ - REG_STRUCT[id].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name - -#define SR_ARR_INIT(reg_name, id, value) \ - REG_STRUCT[id].reg_name = value - -#define SRI(reg_name, block, id)\ - REG_STRUCT.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRI_ARR(reg_name, block, id)\ - REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SR_ARR_I2C(reg_name, id) \ - REG_STRUCT[id-1].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name - -#define SRI_ARR_I2C(reg_name, block, id)\ - REG_STRUCT[id-1].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRI_ARR_ALPHABET(reg_name, block, index, id)\ - REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRI2(reg_name, block, id)\ - .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ - reg ## reg_name - -#define SRI2_ARR(reg_name, block, id)\ - REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ - reg ## reg_name - -#define SRIR(var_name, reg_name, block, id)\ - .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII(reg_name, block, id)\ - REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII_ARR_2(reg_name, block, id, inst)\ - REG_STRUCT[inst].reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII_MPC_RMU(reg_name, block, id)\ - .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define SRII_DWB(reg_name, temp_name, block, id)\ - REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## temp_name - -#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ - .field_name = reg_name ## __ ## field_name ## post_fix - -#define DCCG_SRII(reg_name, block, id)\ - REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - reg ## block ## id ## _ ## reg_name - -#define VUPDATE_SRII(reg_name, block, id)\ - REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ - reg ## reg_name ## _ ## block ## id - -/* NBIO */ -#define NBIO_BASE_INNER(seg) ctx->nbio_reg_offsets[seg] - -#define NBIO_BASE(seg) \ - NBIO_BASE_INNER(seg) - -#define NBIO_SR(reg_name)\ - REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \ - regBIF_BX2_ ## reg_name - -#define NBIO_SR_ARR(reg_name, id)\ - REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \ - regBIF_BX2_ ## reg_name - -#define bios_regs_init() \ - ( \ - NBIO_SR(BIOS_SCRATCH_3),\ - NBIO_SR(BIOS_SCRATCH_6)\ - ) - -static struct bios_registers bios_regs; - -#define clk_src_regs_init(index, pllid)\ - CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid) - -static struct dce110_clk_src_regs clk_src_regs[5]; - -static const struct dce110_clk_src_shift cs_shift = { - CS_COMMON_MASK_SH_LIST_DCN3_1_4(__SHIFT) -}; - -static const struct dce110_clk_src_mask cs_mask = { - CS_COMMON_MASK_SH_LIST_DCN3_1_4(_MASK) -}; - -#define abm_regs_init(id)\ - ABM_DCN32_REG_LIST_RI(id) - -static struct dce_abm_registers abm_regs[4]; - -static const struct dce_abm_shift abm_shift = { - ABM_MASK_SH_LIST_DCN35(__SHIFT) -}; - -static const struct dce_abm_mask abm_mask = { - ABM_MASK_SH_LIST_DCN35(_MASK) -}; - -#define audio_regs_init(id)\ - AUD_COMMON_REG_LIST_RI(id) - -static struct dce_audio_registers audio_regs[7]; - - -#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ - SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ - AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) - -static const struct dce_audio_shift audio_shift = { - DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_audio_mask audio_mask = { - DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) -}; - -#define vpg_regs_init(id)\ - VPG_DCN31_REG_LIST_RI(id) - -static struct dcn31_vpg_registers vpg_regs[10]; - -static const struct dcn31_vpg_shift vpg_shift = { - DCN31_VPG_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_vpg_mask vpg_mask = { - DCN31_VPG_MASK_SH_LIST(_MASK) -}; - -#define afmt_regs_init(id)\ - AFMT_DCN31_REG_LIST_RI(id) - -static struct dcn31_afmt_registers afmt_regs[6]; - -static const struct dcn31_afmt_shift afmt_shift = { - DCN31_AFMT_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_afmt_mask afmt_mask = { - DCN31_AFMT_MASK_SH_LIST(_MASK) -}; - -#define apg_regs_init(id)\ - APG_DCN31_REG_LIST_RI(id) - -static struct dcn31_apg_registers apg_regs[4]; - -static const struct dcn31_apg_shift apg_shift = { - DCN31_APG_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_apg_mask apg_mask = { - DCN31_APG_MASK_SH_LIST(_MASK) -}; - -#define stream_enc_regs_init(id)\ - SE_DCN35_REG_LIST_RI(id) - -static struct dcn10_stream_enc_registers stream_enc_regs[5]; - -static const struct dcn10_stream_encoder_shift se_shift = { - SE_COMMON_MASK_SH_LIST_DCN35(__SHIFT) -}; - -static const struct dcn10_stream_encoder_mask se_mask = { - SE_COMMON_MASK_SH_LIST_DCN35(_MASK) -}; - -#define aux_regs_init(id)\ - DCN2_AUX_REG_LIST_RI(id) - -static struct dcn10_link_enc_aux_registers link_enc_aux_regs[5]; - -#define hpd_regs_init(id)\ - HPD_REG_LIST_RI(id) - -static struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[5]; - - -static const struct dce110_aux_registers_shift aux_shift = { - DCN_AUX_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce110_aux_registers_mask aux_mask = { - DCN_AUX_MASK_SH_LIST(_MASK) -}; - -#define link_regs_init(id, phyid)\ - ( \ - LE_DCN35_REG_LIST_RI(id), \ - UNIPHY_DCN2_REG_LIST_RI(id, phyid)\ - ) - -static struct dcn10_link_enc_registers link_enc_regs[5]; - -static const struct dcn10_link_enc_shift le_shift = { - LINK_ENCODER_MASK_SH_LIST_DCN35(__SHIFT), \ - //DPCS_DCN31_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn10_link_enc_mask le_mask = { - LINK_ENCODER_MASK_SH_LIST_DCN35(_MASK), \ - //DPCS_DCN31_MASK_SH_LIST(_MASK) -}; - -#define hpo_dp_stream_encoder_reg_init(id)\ - DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id) - -static struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[4]; - -static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { - DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { - DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) -}; - -#define hpo_dp_link_encoder_reg_init(id)\ - DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id) - /*DCN3_1_RDPCSTX_REG_LIST(0),*/ - /*DCN3_1_RDPCSTX_REG_LIST(1),*/ - /*DCN3_1_RDPCSTX_REG_LIST(2),*/ - /*DCN3_1_RDPCSTX_REG_LIST(3),*/ - -static struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[2]; - -static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { - DCN3_1_HPO_DP_LINK_ENC_COMMON_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { - DCN3_1_HPO_DP_LINK_ENC_COMMON_MASK_SH_LIST(_MASK) -}; - -#define dpp_regs_init(id)\ - DPP_REG_LIST_DCN35_RI(id) - -static struct dcn3_dpp_registers dpp_regs[4]; - -static const struct dcn35_dpp_shift tf_shift = { - DPP_REG_LIST_SH_MASK_DCN35(__SHIFT) -}; - -static const struct dcn35_dpp_mask tf_mask = { - DPP_REG_LIST_SH_MASK_DCN35(_MASK) -}; - -#define opp_regs_init(id)\ - OPP_REG_LIST_DCN35_RI(id) - -static struct dcn35_opp_registers opp_regs[4]; - -static const struct dcn35_opp_shift opp_shift = { - OPP_MASK_SH_LIST_DCN35(__SHIFT) -}; - -static const struct dcn35_opp_mask opp_mask = { - OPP_MASK_SH_LIST_DCN35(_MASK) -}; - -#define aux_engine_regs_init(id)\ - ( \ - AUX_COMMON_REG_LIST0_RI(id), \ - SR_ARR_INIT(AUXN_IMPCAL, id, 0), \ - SR_ARR_INIT(AUXP_IMPCAL, id, 0), \ - SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK) \ - ) - -static struct dce110_aux_registers aux_engine_regs[5]; - -#define dwbc_regs_dcn3_init(id)\ - DWBC_COMMON_REG_LIST_DCN30_RI(id) - -static struct dcn30_dwbc_registers dwbc35_regs[1]; - -static const struct dcn35_dwbc_shift dwbc35_shift = { - DWBC_COMMON_MASK_SH_LIST_DCN35(__SHIFT) -}; - -static const struct dcn35_dwbc_mask dwbc35_mask = { - DWBC_COMMON_MASK_SH_LIST_DCN35(_MASK) -}; - -#define mcif_wb_regs_dcn3_init(id)\ - MCIF_WB_COMMON_REG_LIST_DCN3_5_RI(id) - -static struct dcn35_mmhubbub_registers mcif_wb35_regs[1]; - -static const struct dcn35_mmhubbub_shift mcif_wb35_shift = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN3_5(__SHIFT) -}; - -static const struct dcn35_mmhubbub_mask mcif_wb35_mask = { - MCIF_WB_COMMON_MASK_SH_LIST_DCN3_5(_MASK) -}; - -#define dsc_regsDCN35_init(id)\ - DSC_REG_LIST_DCN20_RI(id) - -static struct dcn20_dsc_registers dsc_regs[4]; - -static const struct dcn35_dsc_shift dsc_shift = { - DSC_REG_LIST_SH_MASK_DCN35(__SHIFT) -}; - -static const struct dcn35_dsc_mask dsc_mask = { - DSC_REG_LIST_SH_MASK_DCN35(_MASK) -}; - -static struct dcn30_mpc_registers mpc_regs; - -#define dcn_mpc_regs_init() \ - MPC_REG_LIST_DCN3_2_RI(0),\ - MPC_REG_LIST_DCN3_2_RI(1),\ - MPC_REG_LIST_DCN3_2_RI(2),\ - MPC_REG_LIST_DCN3_2_RI(3),\ - MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\ - MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\ - MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\ - MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\ - MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0) - -static const struct dcn30_mpc_shift mpc_shift = { - MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT) -}; - -static const struct dcn30_mpc_mask mpc_mask = { - MPC_COMMON_MASK_SH_LIST_DCN32(_MASK) -}; - -#define optc_regs_init(id)\ - OPTC_COMMON_REG_LIST_DCN3_5_RI(id) - -static struct dcn_optc_registers optc_regs[4]; - -static const struct dcn_optc_shift optc_shift = { - OPTC_COMMON_MASK_SH_LIST_DCN3_5(__SHIFT) -}; - -static const struct dcn_optc_mask optc_mask = { - OPTC_COMMON_MASK_SH_LIST_DCN3_5(_MASK) -}; - -#define hubp_regs_init(id)\ - HUBP_REG_LIST_DCN30_RI(id) - -static struct dcn_hubp2_registers hubp_regs[4]; - - -static const struct dcn35_hubp2_shift hubp_shift = { - HUBP_MASK_SH_LIST_DCN35(__SHIFT) -}; - -static const struct dcn35_hubp2_mask hubp_mask = { - HUBP_MASK_SH_LIST_DCN35(_MASK) -}; - -static struct dcn_hubbub_registers hubbub_reg; - -#define hubbub_reg_init()\ - HUBBUB_REG_LIST_DCN35(0) - -static const struct dcn_hubbub_shift hubbub_shift = { - HUBBUB_MASK_SH_LIST_DCN35(__SHIFT) -}; - -static const struct dcn_hubbub_mask hubbub_mask = { - HUBBUB_MASK_SH_LIST_DCN35(_MASK) -}; - -static struct dccg_registers dccg_regs; - -#define dccg_regs_init()\ - DCCG_REG_LIST_DCN35() - -static const struct dccg_shift dccg_shift = { - DCCG_MASK_SH_LIST_DCN35(__SHIFT) -}; - -static const struct dccg_mask dccg_mask = { - DCCG_MASK_SH_LIST_DCN35(_MASK) -}; - -static struct pg_cntl_registers pg_cntl_regs; - -#define pg_cntl_dcn35_regs_init() \ - PG_CNTL_REG_LIST_DCN35() - -static const struct pg_cntl_shift pg_cntl_shift = { - PG_CNTL_MASK_SH_LIST_DCN35(__SHIFT) -}; - -static const struct pg_cntl_mask pg_cntl_mask = { - PG_CNTL_MASK_SH_LIST_DCN35(_MASK) -}; - -#define SRII2(reg_name_pre, reg_name_post, id)\ - .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \ - ## id ## _ ## reg_name_post ## _BASE_IDX) + \ - reg ## reg_name_pre ## id ## _ ## reg_name_post - -static struct dce_hwseq_registers hwseq_reg; - -#define hwseq_reg_init()\ - HWSEQ_DCN35_REG_LIST() - -#define HWSEQ_DCN35_MASK_SH_LIST(mask_sh)\ - HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ - HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ - HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \ - HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN22_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN23_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN23_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN24_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN24_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN25_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ - HWS_SF(, DOMAIN25_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ - HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN22_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN23_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN24_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DOMAIN25_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ - HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ - HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ - HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \ - HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ - HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ - HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \ - HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh),\ - HWS_SF(, DMU_CLK_CNTL, DISPCLK_R_DMU_GATE_DIS, mask_sh),\ - HWS_SF(, DMU_CLK_CNTL, DISPCLK_G_RBBMIF_GATE_DIS, mask_sh),\ - HWS_SF(, DMU_CLK_CNTL, RBBMIF_FGCG_REP_DIS, mask_sh),\ - HWS_SF(, DMU_CLK_CNTL, DPREFCLK_ALLOW_DS_CLKSTOP, mask_sh),\ - HWS_SF(, DMU_CLK_CNTL, DISPCLK_ALLOW_DS_CLKSTOP, mask_sh),\ - HWS_SF(, DMU_CLK_CNTL, DPPCLK_ALLOW_DS_CLKSTOP, mask_sh),\ - HWS_SF(, DMU_CLK_CNTL, DTBCLK_ALLOW_DS_CLKSTOP, mask_sh),\ - HWS_SF(, DMU_CLK_CNTL, DCFCLK_ALLOW_DS_CLKSTOP, mask_sh),\ - HWS_SF(, DMU_CLK_CNTL, DPIACLK_ALLOW_DS_CLKSTOP, mask_sh),\ - HWS_SF(, DMU_CLK_CNTL, LONO_FGCG_REP_DIS, mask_sh),\ - HWS_SF(, DMU_CLK_CNTL, LONO_DISPCLK_GATE_DISABLE, mask_sh),\ - HWS_SF(, DMU_CLK_CNTL, LONO_SOCCLK_GATE_DISABLE, mask_sh),\ - HWS_SF(, DMU_CLK_CNTL, LONO_DMCUBCLK_GATE_DISABLE, mask_sh),\ - HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKA_FE_GATE_DISABLE, mask_sh), \ - HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKB_FE_GATE_DISABLE, mask_sh), \ - HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKC_FE_GATE_DISABLE, mask_sh), \ - HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKD_FE_GATE_DISABLE, mask_sh), \ - HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKE_FE_GATE_DISABLE, mask_sh), \ - HWS_SF(, DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, mask_sh), \ - HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKA_GATE_DISABLE, mask_sh), \ - HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKB_GATE_DISABLE, mask_sh), \ - HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKC_GATE_DISABLE, mask_sh), \ - HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKD_GATE_DISABLE, mask_sh), \ - HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKE_GATE_DISABLE, mask_sh), \ - HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, mask_sh), \ - HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_ROOT_GATE_DISABLE, mask_sh), \ - HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_ROOT_GATE_DISABLE, mask_sh), \ - HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_ROOT_GATE_DISABLE, mask_sh), \ - HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_ROOT_GATE_DISABLE, mask_sh) - -static const struct dce_hwseq_shift hwseq_shift = { - HWSEQ_DCN35_MASK_SH_LIST(__SHIFT) -}; - -static const struct dce_hwseq_mask hwseq_mask = { - HWSEQ_DCN35_MASK_SH_LIST(_MASK) -}; - -#define vmid_regs_init(id)\ - DCN20_VMID_REG_LIST_RI(id) - -static struct dcn_vmid_registers vmid_regs[16]; - -static const struct dcn20_vmid_shift vmid_shifts = { - DCN20_VMID_MASK_SH_LIST(__SHIFT) -}; - -static const struct dcn20_vmid_mask vmid_masks = { - DCN20_VMID_MASK_SH_LIST(_MASK) -}; - -static const struct resource_caps res_cap_dcn35 = { - .num_timing_generator = 4, - .num_opp = 4, - .num_video_plane = 4, - .num_audio = 5, - .num_stream_encoder = 5, - .num_dig_link_enc = 5, - .num_hpo_dp_stream_encoder = 4, - .num_hpo_dp_link_encoder = 2, - .num_pll = 4,/*1 c10 edp, 3xc20 combo PHY*/ - .num_dwb = 1, - .num_ddc = 5, - .num_vmid = 16, - .num_mpc_3dlut = 2, - .num_dsc = 4, -}; - -static const struct dc_plane_cap plane_cap = { - .type = DC_PLANE_TYPE_DCN_UNIVERSAL, - .per_pixel_alpha = true, - - .pixel_format_support = { - .argb8888 = true, - .nv12 = true, - .fp16 = true, - .p010 = true, - .ayuv = false, - }, - - .max_upscale_factor = { - .argb8888 = 16000, - .nv12 = 16000, - .fp16 = 16000 - }, - - // 6:1 downscaling ratio: 1000/6 = 166.666 - .max_downscale_factor = { - .argb8888 = 167, - .nv12 = 167, - .fp16 = 167 - }, - 64, - 64 -}; - -static const struct dc_debug_options debug_defaults_drv = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = false, - .clock_trace = true, - .disable_pplib_clock_request = false, - .pipe_split_policy = MPC_SPLIT_AVOID, - .force_single_disp_pipe_split = false, - .disable_dcc = DCC_ENABLE, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_clock_gate = true, - .disable_dsc_power_gate = true, - .vsr_support = true, - .performance_trace = false, - .max_downscale_src_width = 4096,/*upto true 4k*/ - .disable_pplib_wm_range = false, - .scl_reset_length10 = true, - .sanity_checks = false, - .underflow_assert_delay_us = 0xFFFFFFFF, - .dwb_fi_phase = -1, // -1 = disable, - .dmub_command_table = true, - .pstate_enabled = true, - .use_max_lb = true, - .enable_mem_low_power = { - .bits = { - .vga = false, - .i2c = true, - .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled - .dscl = true, - .cm = false, - .mpc = true, - .optc = true, - .vpg = true, - .afmt = true, - } - }, - .root_clock_optimization = { - .bits = { - .dpp = true, - .dsc = true,/*dscclk and dsc pg*/ - .hdmistream = true, - .hdmichar = true, - .dpstream = true, - .symclk32_se = true, - .symclk32_le = true, - .symclk_fe = true, - .physymclk = true, - .dpiasymclk = true, - } - }, - .seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT, - .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ - .using_dml2 = true, - .support_eDP1_5 = true, - .enable_hpo_pg_support = false, - .enable_legacy_fast_update = true, - .enable_single_display_2to1_odm_policy = false, - .disable_idle_power_optimizations = true, - .dmcub_emulation = false, - .disable_boot_optimizations = false, - .disable_unbounded_requesting = false, - .disable_mem_low_power = false, - //must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions - .enable_double_buffered_dsc_pg_support = true, - .enable_dp_dig_pixel_rate_div_policy = 1, - .disable_z10 = false, - .ignore_pg = true, - .psp_disabled_wa = true, - .ips2_eval_delay_us = 200, - .ips2_entry_delay_us = 400 -}; - -static const struct dc_panel_config panel_config_defaults = { - .psr = { - .disable_psr = false, - .disallow_psrsu = false, - }, - .ilr = { - .optimize_edp_link_rate = true, - }, -}; - -static void dcn35_dpp_destroy(struct dpp **dpp) -{ - kfree(TO_DCN20_DPP(*dpp)); - *dpp = NULL; -} - -static struct dpp *dcn35_dpp_create(struct dc_context *ctx, uint32_t inst) -{ - struct dcn3_dpp *dpp = kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); - bool success = (dpp != NULL); - - if (!success) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT dpp_regs - dpp_regs_init(0), - dpp_regs_init(1), - dpp_regs_init(2), - dpp_regs_init(3); - - success = dpp35_construct(dpp, ctx, inst, &dpp_regs[inst], &tf_shift, - &tf_mask); - if (success) { - dpp35_set_fgcg( - dpp, - ctx->dc->debug.enable_fine_grain_clock_gating.bits.dpp); - return &dpp->base; - } - - BREAK_TO_DEBUGGER(); - kfree(dpp); - return NULL; -} - -static struct output_pixel_processor *dcn35_opp_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_opp *opp = - kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); - - if (!opp) { - BREAK_TO_DEBUGGER(); - return NULL; - } - -#undef REG_STRUCT -#define REG_STRUCT opp_regs - opp_regs_init(0), - opp_regs_init(1), - opp_regs_init(2), - opp_regs_init(3); - - dcn35_opp_construct(opp, ctx, inst, - &opp_regs[inst], &opp_shift, &opp_mask); - - dcn35_opp_set_fgcg(opp, ctx->dc->debug.enable_fine_grain_clock_gating.bits.opp); - - return &opp->base; -} - -static struct dce_aux *dcn31_aux_engine_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); - - if (!aux_engine) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT aux_engine_regs - aux_engine_regs_init(0), - aux_engine_regs_init(1), - aux_engine_regs_init(2), - aux_engine_regs_init(3), - aux_engine_regs_init(4); - - dce110_aux_engine_construct(aux_engine, ctx, inst, - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst], - &aux_mask, - &aux_shift, - ctx->dc->caps.extended_aux_timeout_support); - - return &aux_engine->base; -} - -#define i2c_inst_regs_init(id)\ - I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id) - -static struct dce_i2c_registers i2c_hw_regs[5]; - -static const struct dce_i2c_shift i2c_shifts = { - I2C_COMMON_MASK_SH_LIST_DCN35(__SHIFT) -}; - -static const struct dce_i2c_mask i2c_masks = { - I2C_COMMON_MASK_SH_LIST_DCN35(_MASK) -}; - -/* ========================================================== */ - -/* - * DPIA index | Preferred Encoder | Host Router - * 0 | C | 0 - * 1 | First Available | 0 - * 2 | D | 1 - * 3 | First Available | 1 - */ -/* ========================================================== */ -static const enum engine_id dpia_to_preferred_enc_id_table[] = { - ENGINE_ID_DIGC, - ENGINE_ID_DIGC, - ENGINE_ID_DIGD, - ENGINE_ID_DIGD -}; - -static enum engine_id dcn35_get_preferred_eng_id_dpia(unsigned int dpia_index) -{ - return dpia_to_preferred_enc_id_table[dpia_index]; -} - -static struct dce_i2c_hw *dcn31_i2c_hw_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); - - if (!dce_i2c_hw) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT i2c_hw_regs - i2c_inst_regs_init(1), - i2c_inst_regs_init(2), - i2c_inst_regs_init(3), - i2c_inst_regs_init(4), - i2c_inst_regs_init(5); - - dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, - &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); - - return dce_i2c_hw; -} -static struct mpc *dcn35_mpc_create( - struct dc_context *ctx, - int num_mpcc, - int num_rmu) -{ - struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), GFP_KERNEL); - - if (!mpc30) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT mpc_regs - dcn_mpc_regs_init(); - - dcn32_mpc_construct(mpc30, ctx, - &mpc_regs, - &mpc_shift, - &mpc_mask, - num_mpcc, - num_rmu); - - return &mpc30->base; -} - -static struct hubbub *dcn35_hubbub_create(struct dc_context *ctx) -{ - int i; - - struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), - GFP_KERNEL); - - if (!hubbub3) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT hubbub_reg - hubbub_reg_init(); - -#undef REG_STRUCT -#define REG_STRUCT vmid_regs - vmid_regs_init(0), - vmid_regs_init(1), - vmid_regs_init(2), - vmid_regs_init(3), - vmid_regs_init(4), - vmid_regs_init(5), - vmid_regs_init(6), - vmid_regs_init(7), - vmid_regs_init(8), - vmid_regs_init(9), - vmid_regs_init(10), - vmid_regs_init(11), - vmid_regs_init(12), - vmid_regs_init(13), - vmid_regs_init(14), - vmid_regs_init(15); - - hubbub35_construct(hubbub3, ctx, - &hubbub_reg, - &hubbub_shift, - &hubbub_mask, - 384,/*ctx->dc->dml.ip.det_buffer_size_kbytes,*/ - 8, /*ctx->dc->dml.ip.pixel_chunk_size_kbytes,*/ - 1792 /*ctx->dc->dml.ip.config_return_buffer_size_in_kbytes*/); - - - for (i = 0; i < res_cap_dcn35.num_vmid; i++) { - struct dcn20_vmid *vmid = &hubbub3->vmid[i]; - - vmid->ctx = ctx; - - vmid->regs = &vmid_regs[i]; - vmid->shifts = &vmid_shifts; - vmid->masks = &vmid_masks; - } - - return &hubbub3->base; -} - -static struct timing_generator *dcn35_timing_generator_create( - struct dc_context *ctx, - uint32_t instance) -{ - struct optc *tgn10 = - kzalloc(sizeof(struct optc), GFP_KERNEL); - - if (!tgn10) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT optc_regs - optc_regs_init(0), - optc_regs_init(1), - optc_regs_init(2), - optc_regs_init(3); - - tgn10->base.inst = instance; - tgn10->base.ctx = ctx; - - tgn10->tg_regs = &optc_regs[instance]; - tgn10->tg_shift = &optc_shift; - tgn10->tg_mask = &optc_mask; - - dcn35_timing_generator_init(tgn10); - - return &tgn10->base; -} - -static const struct encoder_feature_support link_enc_feature = { - .max_hdmi_deep_color = COLOR_DEPTH_121212, - .max_hdmi_pixel_clock = 600000, - .hdmi_ycbcr420_supported = true, - .dp_ycbcr420_supported = true, - .fec_supported = true, - .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_HBR3_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true, - .flags.bits.IS_TPS4_CAPABLE = true -}; - -static struct link_encoder *dcn35_link_encoder_create( - struct dc_context *ctx, - const struct encoder_init_data *enc_init_data) -{ - struct dcn20_link_encoder *enc20 = - kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - - if (!enc20) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT link_enc_aux_regs - aux_regs_init(0), - aux_regs_init(1), - aux_regs_init(2), - aux_regs_init(3), - aux_regs_init(4); - -#undef REG_STRUCT -#define REG_STRUCT link_enc_hpd_regs - hpd_regs_init(0), - hpd_regs_init(1), - hpd_regs_init(2), - hpd_regs_init(3), - hpd_regs_init(4); - -#undef REG_STRUCT -#define REG_STRUCT link_enc_regs - link_regs_init(0, A), - link_regs_init(1, B), - link_regs_init(2, C), - link_regs_init(3, D), - link_regs_init(4, E); - - dcn35_link_encoder_construct(enc20, - enc_init_data, - &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], - &link_enc_aux_regs[enc_init_data->channel - 1], - &link_enc_hpd_regs[enc_init_data->hpd_source], - &le_shift, - &le_mask); - - return &enc20->enc10.base; -} - -/* Create a minimal link encoder object not associated with a particular - * physical connector. - * resource_funcs.link_enc_create_minimal - */ -static struct link_encoder *dcn31_link_enc_create_minimal( - struct dc_context *ctx, enum engine_id eng_id) -{ - struct dcn20_link_encoder *enc20; - - if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc) - return NULL; - - enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); - if (!enc20) - return NULL; - - dcn31_link_encoder_construct_minimal( - enc20, - ctx, - &link_enc_feature, - &link_enc_regs[eng_id - ENGINE_ID_DIGA], - eng_id); - - return &enc20->enc10.base; -} - -static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data) -{ - struct dcn31_panel_cntl *panel_cntl = - kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL); - - if (!panel_cntl) - return NULL; - - dcn31_panel_cntl_construct(panel_cntl, init_data); - - return &panel_cntl->base; -} - -static void read_dce_straps( - struct dc_context *ctx, - struct resource_straps *straps) -{ - generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX), - FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); - -} - -static struct audio *dcn31_create_audio( - struct dc_context *ctx, unsigned int inst) -{ - -#undef REG_STRUCT -#define REG_STRUCT audio_regs - audio_regs_init(0), - audio_regs_init(1), - audio_regs_init(2), - audio_regs_init(3), - audio_regs_init(4); - audio_regs_init(5); - audio_regs_init(6); - - return dce_audio_create(ctx, inst, - &audio_regs[inst], &audio_shift, &audio_mask); -} - -static struct vpg *dcn31_vpg_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL); - - if (!vpg31) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT vpg_regs - vpg_regs_init(0), - vpg_regs_init(1), - vpg_regs_init(2), - vpg_regs_init(3), - vpg_regs_init(4), - vpg_regs_init(5), - vpg_regs_init(6), - vpg_regs_init(7), - vpg_regs_init(8), - vpg_regs_init(9); - - vpg31_construct(vpg31, ctx, inst, - &vpg_regs[inst], - &vpg_shift, - &vpg_mask); - - return &vpg31->base; -} - -static struct afmt *dcn31_afmt_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL); - - if (!afmt31) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT afmt_regs - afmt_regs_init(0), - afmt_regs_init(1), - afmt_regs_init(2), - afmt_regs_init(3), - afmt_regs_init(4), - afmt_regs_init(5); - - afmt31_construct(afmt31, ctx, inst, - &afmt_regs[inst], - &afmt_shift, - &afmt_mask); - - // Light sleep by default, no need to power down here - - return &afmt31->base; -} - -static struct apg *dcn31_apg_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); - - if (!apg31) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT apg_regs - apg_regs_init(0), - apg_regs_init(1), - apg_regs_init(2), - apg_regs_init(3); - - apg31_construct(apg31, ctx, inst, - &apg_regs[inst], - &apg_shift, - &apg_mask); - - return &apg31->base; -} - -static struct stream_encoder *dcn35_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dcn10_stream_encoder *enc1; - struct vpg *vpg; - struct afmt *afmt; - int vpg_inst; - int afmt_inst; - - /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ - if (eng_id <= ENGINE_ID_DIGF) { - vpg_inst = eng_id; - afmt_inst = eng_id; - } else - return NULL; - - enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); - vpg = dcn31_vpg_create(ctx, vpg_inst); - afmt = dcn31_afmt_create(ctx, afmt_inst); - - if (!enc1 || !vpg || !afmt) { - kfree(enc1); - kfree(vpg); - kfree(afmt); - return NULL; - } - -#undef REG_STRUCT -#define REG_STRUCT stream_enc_regs - stream_enc_regs_init(0), - stream_enc_regs_init(1), - stream_enc_regs_init(2), - stream_enc_regs_init(3), - stream_enc_regs_init(4); - - dcn35_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, - eng_id, vpg, afmt, - &stream_enc_regs[eng_id], - &se_shift, &se_mask); - - return &enc1->base; -} - -static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create( - enum engine_id eng_id, - struct dc_context *ctx) -{ - struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; - struct vpg *vpg; - struct apg *apg; - uint32_t hpo_dp_inst; - uint32_t vpg_inst; - uint32_t apg_inst; - - ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); - hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; - - /* Mapping of VPG register blocks to HPO DP block instance: - * VPG[6] -> HPO_DP[0] - * VPG[7] -> HPO_DP[1] - * VPG[8] -> HPO_DP[2] - * VPG[9] -> HPO_DP[3] - */ - vpg_inst = hpo_dp_inst + 6; - - /* Mapping of APG register blocks to HPO DP block instance: - * APG[0] -> HPO_DP[0] - * APG[1] -> HPO_DP[1] - * APG[2] -> HPO_DP[2] - * APG[3] -> HPO_DP[3] - */ - apg_inst = hpo_dp_inst; - - /* allocate HPO stream encoder and create VPG sub-block */ - hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); - vpg = dcn31_vpg_create(ctx, vpg_inst); - apg = dcn31_apg_create(ctx, apg_inst); - - if (!hpo_dp_enc31 || !vpg || !apg) { - kfree(hpo_dp_enc31); - kfree(vpg); - kfree(apg); - return NULL; - } - -#undef REG_STRUCT -#define REG_STRUCT hpo_dp_stream_enc_regs - hpo_dp_stream_encoder_reg_init(0), - hpo_dp_stream_encoder_reg_init(1), - hpo_dp_stream_encoder_reg_init(2), - hpo_dp_stream_encoder_reg_init(3); - - dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, - hpo_dp_inst, eng_id, vpg, apg, - &hpo_dp_stream_enc_regs[hpo_dp_inst], - &hpo_dp_se_shift, &hpo_dp_se_mask); - - return &hpo_dp_enc31->base; -} - -static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( - uint8_t inst, - struct dc_context *ctx) -{ - struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; - - /* allocate HPO link encoder */ - hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); - -#undef REG_STRUCT -#define REG_STRUCT hpo_dp_link_enc_regs - hpo_dp_link_encoder_reg_init(0), - hpo_dp_link_encoder_reg_init(1); - - hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, - &hpo_dp_link_enc_regs[inst], - &hpo_dp_le_shift, &hpo_dp_le_mask); - - return &hpo_dp_enc31->base; -} - -static struct dce_hwseq *dcn35_hwseq_create( - struct dc_context *ctx) -{ - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); - -#undef REG_STRUCT -#define REG_STRUCT hwseq_reg - hwseq_reg_init(); - - if (hws) { - hws->ctx = ctx; - hws->regs = &hwseq_reg; - hws->shifts = &hwseq_shift; - hws->masks = &hwseq_mask; - } - return hws; -} -static const struct resource_create_funcs res_create_funcs = { - .read_dce_straps = read_dce_straps, - .create_audio = dcn31_create_audio, - .create_stream_encoder = dcn35_stream_encoder_create, - .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, - .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, - .create_hwseq = dcn35_hwseq_create, -}; - -static void dcn35_resource_destruct(struct dcn35_resource_pool *pool) -{ - unsigned int i; - - for (i = 0; i < pool->base.stream_enc_count; i++) { - if (pool->base.stream_enc[i] != NULL) { - if (pool->base.stream_enc[i]->vpg != NULL) { - kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); - pool->base.stream_enc[i]->vpg = NULL; - } - if (pool->base.stream_enc[i]->afmt != NULL) { - kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); - pool->base.stream_enc[i]->afmt = NULL; - } - kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); - pool->base.stream_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { - if (pool->base.hpo_dp_stream_enc[i] != NULL) { - if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { - kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); - pool->base.hpo_dp_stream_enc[i]->vpg = NULL; - } - if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { - kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); - pool->base.hpo_dp_stream_enc[i]->apg = NULL; - } - kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); - pool->base.hpo_dp_stream_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { - if (pool->base.hpo_dp_link_enc[i] != NULL) { - kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); - pool->base.hpo_dp_link_enc[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_dsc; i++) { - if (pool->base.dscs[i] != NULL) - dcn20_dsc_destroy(&pool->base.dscs[i]); - } - - if (pool->base.mpc != NULL) { - kfree(TO_DCN20_MPC(pool->base.mpc)); - pool->base.mpc = NULL; - } - if (pool->base.hubbub != NULL) { - kfree(pool->base.hubbub); - pool->base.hubbub = NULL; - } - for (i = 0; i < pool->base.pipe_count; i++) { - if (pool->base.dpps[i] != NULL) - dcn35_dpp_destroy(&pool->base.dpps[i]); - - if (pool->base.ipps[i] != NULL) - pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); - - if (pool->base.hubps[i] != NULL) { - kfree(TO_DCN20_HUBP(pool->base.hubps[i])); - pool->base.hubps[i] = NULL; - } - - if (pool->base.irqs != NULL) { - dal_irq_service_destroy(&pool->base.irqs); - } - } - - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - if (pool->base.engines[i] != NULL) - dce110_engine_destroy(&pool->base.engines[i]); - if (pool->base.hw_i2cs[i] != NULL) { - kfree(pool->base.hw_i2cs[i]); - pool->base.hw_i2cs[i] = NULL; - } - if (pool->base.sw_i2cs[i] != NULL) { - kfree(pool->base.sw_i2cs[i]); - pool->base.sw_i2cs[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - if (pool->base.opps[i] != NULL) - pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - if (pool->base.timing_generators[i] != NULL) { - kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); - pool->base.timing_generators[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_dwb; i++) { - if (pool->base.dwbc[i] != NULL) { - kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); - pool->base.dwbc[i] = NULL; - } - if (pool->base.mcif_wb[i] != NULL) { - kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); - pool->base.mcif_wb[i] = NULL; - } - } - - for (i = 0; i < pool->base.audio_count; i++) { - if (pool->base.audios[i]) - dce_aud_destroy(&pool->base.audios[i]); - } - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] != NULL) { - dcn20_clock_source_destroy(&pool->base.clock_sources[i]); - pool->base.clock_sources[i] = NULL; - } - } - - for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { - if (pool->base.mpc_lut[i] != NULL) { - dc_3dlut_func_release(pool->base.mpc_lut[i]); - pool->base.mpc_lut[i] = NULL; - } - if (pool->base.mpc_shaper[i] != NULL) { - dc_transfer_func_release(pool->base.mpc_shaper[i]); - pool->base.mpc_shaper[i] = NULL; - } - } - - if (pool->base.dp_clock_source != NULL) { - dcn20_clock_source_destroy(&pool->base.dp_clock_source); - pool->base.dp_clock_source = NULL; - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - if (pool->base.multiple_abms[i] != NULL) - dce_abm_destroy(&pool->base.multiple_abms[i]); - } - - if (pool->base.psr != NULL) - dmub_psr_destroy(&pool->base.psr); - - if (pool->base.pg_cntl != NULL) - dcn_pg_cntl_destroy(&pool->base.pg_cntl); - - if (pool->base.dccg != NULL) - dcn_dccg_destroy(&pool->base.dccg); -} - -static struct hubp *dcn35_hubp_create( - struct dc_context *ctx, - uint32_t inst) -{ - struct dcn20_hubp *hubp2 = - kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); - - if (!hubp2) - return NULL; - -#undef REG_STRUCT -#define REG_STRUCT hubp_regs - hubp_regs_init(0), - hubp_regs_init(1), - hubp_regs_init(2), - hubp_regs_init(3); - - if (hubp35_construct(hubp2, ctx, inst, - &hubp_regs[inst], &hubp_shift, &hubp_mask)) - return &hubp2->base; - - BREAK_TO_DEBUGGER(); - kfree(hubp2); - return NULL; -} - -static void dcn35_dwbc_init(struct dcn30_dwbc *dwbc30, struct dc_context *ctx) -{ - dcn35_dwbc_set_fgcg( - dwbc30, ctx->dc->debug.enable_fine_grain_clock_gating.bits.dwb); -} - -static bool dcn35_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t pipe_count = pool->res_cap->num_dwb; - - for (i = 0; i < pipe_count; i++) { - struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), - GFP_KERNEL); - - if (!dwbc30) { - dm_error("DC: failed to create dwbc30!\n"); - return false; - } - -#undef REG_STRUCT -#define REG_STRUCT dwbc35_regs - dwbc_regs_dcn3_init(0); - - dcn35_dwbc_construct(dwbc30, ctx, - &dwbc35_regs[i], - &dwbc35_shift, - &dwbc35_mask, - i); - - pool->dwbc[i] = &dwbc30->base; - - dcn35_dwbc_init(dwbc30, ctx); - } - return true; -} - -static void dcn35_mmhubbub_init(struct dcn30_mmhubbub *mcif_wb30, - struct dc_context *ctx) -{ - dcn35_mmhubbub_set_fgcg( - mcif_wb30, - ctx->dc->debug.enable_fine_grain_clock_gating.bits.mmhubbub); -} - -static bool dcn35_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) -{ - int i; - uint32_t pipe_count = pool->res_cap->num_dwb; - - for (i = 0; i < pipe_count; i++) { - struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), - GFP_KERNEL); - - if (!mcif_wb30) { - dm_error("DC: failed to create mcif_wb30!\n"); - return false; - } - -#undef REG_STRUCT -#define REG_STRUCT mcif_wb35_regs - mcif_wb_regs_dcn3_init(0); - - dcn35_mmhubbub_construct(mcif_wb30, ctx, - &mcif_wb35_regs[i], - &mcif_wb35_shift, - &mcif_wb35_mask, - i); - - dcn35_mmhubbub_init(mcif_wb30, ctx); - - pool->mcif_wb[i] = &mcif_wb30->base; - } - return true; -} - -static struct display_stream_compressor *dcn35_dsc_create( - struct dc_context *ctx, uint32_t inst) -{ - struct dcn20_dsc *dsc = - kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); - - if (!dsc) { - BREAK_TO_DEBUGGER(); - return NULL; - } - -#undef REG_STRUCT -#define REG_STRUCT dsc_regs - dsc_regsDCN35_init(0), - dsc_regsDCN35_init(1), - dsc_regsDCN35_init(2), - dsc_regsDCN35_init(3); - - dsc35_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); - dsc35_set_fgcg(dsc, - ctx->dc->debug.enable_fine_grain_clock_gating.bits.dsc); - return &dsc->base; -} - -static void dcn35_destroy_resource_pool(struct resource_pool **pool) -{ - struct dcn35_resource_pool *dcn35_pool = TO_DCN35_RES_POOL(*pool); - - dcn35_resource_destruct(dcn35_pool); - kfree(dcn35_pool); - *pool = NULL; -} - -static struct clock_source *dcn35_clock_source_create( - struct dc_context *ctx, - struct dc_bios *bios, - enum clock_source_id id, - const struct dce110_clk_src_regs *regs, - bool dp_clk_src) -{ - struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); - - if (!clk_src) - return NULL; - - if (dcn31_clk_src_construct(clk_src, ctx, bios, id, - regs, &cs_shift, &cs_mask)) { - clk_src->base.dp_clk_src = dp_clk_src; - return &clk_src->base; - } - - BREAK_TO_DEBUGGER(); - return NULL; -} - -static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn20_get_dcc_compression_cap -}; - -static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config) -{ - *panel_config = panel_config_defaults; -} - - -static bool dcn35_validate_bandwidth(struct dc *dc, - struct dc_state *context, - bool fast_validate) -{ - bool out = false; - - out = dml2_validate(dc, context, fast_validate); - - if (fast_validate) - return out; - - DC_FP_START(); - dcn35_decide_zstate_support(dc, context); - DC_FP_END(); - - return out; -} - - -static struct resource_funcs dcn35_res_pool_funcs = { - .destroy = dcn35_destroy_resource_pool, - .link_enc_create = dcn35_link_encoder_create, - .link_enc_create_minimal = dcn31_link_enc_create_minimal, - .link_encs_assign = link_enc_cfg_link_encs_assign, - .link_enc_unassign = link_enc_cfg_link_enc_unassign, - .panel_cntl_create = dcn31_panel_cntl_create, - .validate_bandwidth = dcn35_validate_bandwidth, - .calculate_wm_and_dlg = NULL, - .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, - .populate_dml_pipes = dcn35_populate_dml_pipes_from_context_fpu, - .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, - .release_pipe = dcn20_release_pipe, - .add_stream_to_ctx = dcn30_add_stream_to_ctx, - .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, - .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, - .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, - .set_mcif_arb_params = dcn30_set_mcif_arb_params, - .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, - .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, - .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, - .update_bw_bounding_box = dcn35_update_bw_bounding_box_fpu, - .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, - .get_panel_config_defaults = dcn35_get_panel_config_defaults, - .get_preferred_eng_id_dpia = dcn35_get_preferred_eng_id_dpia, -}; - -static bool dcn35_resource_construct( - uint8_t num_virtual_links, - struct dc *dc, - struct dcn35_resource_pool *pool) -{ - int i; - struct dc_context *ctx = dc->ctx; - struct irq_service_init_data init_data; - -#undef REG_STRUCT -#define REG_STRUCT bios_regs - bios_regs_init(); - -#undef REG_STRUCT -#define REG_STRUCT clk_src_regs - clk_src_regs_init(0, A), - clk_src_regs_init(1, B), - clk_src_regs_init(2, C), - clk_src_regs_init(3, D), - clk_src_regs_init(4, E); - -#undef REG_STRUCT -#define REG_STRUCT abm_regs - abm_regs_init(0), - abm_regs_init(1), - abm_regs_init(2), - abm_regs_init(3); - -#undef REG_STRUCT -#define REG_STRUCT dccg_regs - dccg_regs_init(); - - ctx->dc_bios->regs = &bios_regs; - - pool->base.res_cap = &res_cap_dcn35; - - pool->base.funcs = &dcn35_res_pool_funcs; - - /************************************************* - * Resource + asic cap harcoding * - *************************************************/ - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - pool->base.pipe_count = pool->base.res_cap->num_timing_generator; - pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; - dc->caps.max_downscale_ratio = 600; - dc->caps.i2c_speed_in_khz = 100; - dc->caps.i2c_speed_in_khz_hdcp = 100; - dc->caps.max_cursor_size = 256; - dc->caps.min_horizontal_blanking_period = 80; - dc->caps.dmdata_alloc_size = 2048; - dc->caps.max_slave_planes = 2; - dc->caps.max_slave_yuv_planes = 2; - dc->caps.max_slave_rgb_planes = 2; - dc->caps.post_blend_color_processing = true; - dc->caps.force_dp_tps4_for_cp2520 = true; - if (dc->config.forceHBR2CP2520) - dc->caps.force_dp_tps4_for_cp2520 = false; - dc->caps.dp_hpo = true; - dc->caps.dp_hdmi21_pcon_support = true; - - dc->caps.edp_dsc_support = true; - dc->caps.extended_aux_timeout_support = true; - dc->caps.dmcub_support = true; - dc->caps.is_apu = true; - dc->caps.seamless_odm = true; - - dc->caps.zstate_support = true; - dc->caps.ips_support = true; - dc->caps.max_v_total = (1 << 15) - 1; - - /* Color pipeline capabilities */ - dc->caps.color.dpp.dcn_arch = 1; - dc->caps.color.dpp.input_lut_shared = 0; - dc->caps.color.dpp.icsc = 1; - dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr - dc->caps.color.dpp.dgam_rom_caps.srgb = 1; - dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; - dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; - dc->caps.color.dpp.dgam_rom_caps.pq = 1; - dc->caps.color.dpp.dgam_rom_caps.hlg = 1; - dc->caps.color.dpp.post_csc = 1; - dc->caps.color.dpp.gamma_corr = 1; - dc->caps.color.dpp.dgam_rom_for_yuv = 0; - - dc->caps.color.dpp.hw_3d_lut = 1; - dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 - // no OGAM ROM on DCN301 - dc->caps.color.dpp.ogam_rom_caps.srgb = 0; - dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; - dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.dpp.ogam_rom_caps.pq = 0; - dc->caps.color.dpp.ogam_rom_caps.hlg = 0; - dc->caps.color.dpp.ocsc = 0; - - dc->caps.color.mpc.gamut_remap = 1; - dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2 - dc->caps.color.mpc.ogam_ram = 1; - dc->caps.color.mpc.ogam_rom_caps.srgb = 0; - dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; - dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; - dc->caps.color.mpc.ogam_rom_caps.pq = 0; - dc->caps.color.mpc.ogam_rom_caps.hlg = 0; - dc->caps.color.mpc.ocsc = 1; - - /* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order - * to provide some margin. - * It's expected for furture ASIC to have equal or higher value, in order to - * have determinstic power improvement from generate to genration. - * (i.e., we should not expect new ASIC generation with lower vmin rate) - */ - dc->caps.max_disp_clock_khz_at_vmin = 650000; - - /* Use pipe context based otg sync logic */ - dc->config.use_pipe_ctx_sync_logic = true; - - /* read VBIOS LTTPR caps */ - { - if (ctx->dc_bios->funcs->get_lttpr_caps) { - enum bp_result bp_query_result; - uint8_t is_vbios_lttpr_enable = 0; - - bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); - dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; - } - - /* interop bit is implicit */ - { - dc->caps.vbios_lttpr_aware = true; - } - } - - if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) - dc->debug = debug_defaults_drv; - - // Init the vm_helper - if (dc->vm_helper) - vm_helper_init(dc->vm_helper, 16); - - /************************************************* - * Create resources * - *************************************************/ - - /* Clock Sources for Pixel Clock*/ - pool->base.clock_sources[DCN35_CLK_SRC_PLL0] = - dcn35_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL0, - &clk_src_regs[0], false); - pool->base.clock_sources[DCN35_CLK_SRC_PLL1] = - dcn35_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL1, - &clk_src_regs[1], false); - pool->base.clock_sources[DCN35_CLK_SRC_PLL2] = - dcn35_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL2, - &clk_src_regs[2], false); - pool->base.clock_sources[DCN35_CLK_SRC_PLL3] = - dcn35_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL3, - &clk_src_regs[3], false); - pool->base.clock_sources[DCN35_CLK_SRC_PLL4] = - dcn35_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL4, - &clk_src_regs[4], false); - - pool->base.clk_src_count = DCN35_CLK_SRC_TOTAL; - - /* todo: not reuse phy_pll registers */ - pool->base.dp_clock_source = - dcn35_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_ID_DP_DTO, - &clk_src_regs[0], true); - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - /*temp till dml2 fully work without dml1*/ - dml_init_instance(&dc->dml, &dcn3_5_soc, &dcn3_5_ip, DML_PROJECT_DCN31); - - /* TODO: DCCG */ - pool->base.dccg = dccg35_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); - if (pool->base.dccg == NULL) { - dm_error("DC: failed to create dccg!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - -#undef REG_STRUCT -#define REG_STRUCT pg_cntl_regs - pg_cntl_dcn35_regs_init(); - - pool->base.pg_cntl = pg_cntl35_create(ctx, &pg_cntl_regs, &pg_cntl_shift, &pg_cntl_mask); - if (pool->base.pg_cntl == NULL) { - dm_error("DC: failed to create power gate control!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* TODO: IRQ */ - init_data.ctx = dc->ctx; - pool->base.irqs = dal_irq_service_dcn35_create(&init_data); - if (!pool->base.irqs) - goto create_fail; - - /* HUBBUB */ - pool->base.hubbub = dcn35_hubbub_create(ctx); - if (pool->base.hubbub == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create hubbub!\n"); - goto create_fail; - } - - /* HUBPs, DPPs, OPPs and TGs */ - for (i = 0; i < pool->base.pipe_count; i++) { - pool->base.hubps[i] = dcn35_hubp_create(ctx, i); - if (pool->base.hubps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create hubps!\n"); - goto create_fail; - } - - pool->base.dpps[i] = dcn35_dpp_create(ctx, i); - if (pool->base.dpps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create dpps!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - pool->base.opps[i] = dcn35_opp_create(ctx, i); - if (pool->base.opps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create output pixel processor!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - pool->base.timing_generators[i] = dcn35_timing_generator_create( - ctx, i); - if (pool->base.timing_generators[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto create_fail; - } - } - pool->base.timing_generator_count = i; - - /* PSR */ - pool->base.psr = dmub_psr_create(ctx); - if (pool->base.psr == NULL) { - dm_error("DC: failed to create psr obj!\n"); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - - /* ABM */ - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - pool->base.multiple_abms[i] = dmub_abm_create(ctx, - &abm_regs[i], - &abm_shift, - &abm_mask); - if (pool->base.multiple_abms[i] == NULL) { - dm_error("DC: failed to create abm for pipe %d!\n", i); - BREAK_TO_DEBUGGER(); - goto create_fail; - } - } - - /* MPC and DSC */ - pool->base.mpc = dcn35_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut); - if (pool->base.mpc == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mpc!\n"); - goto create_fail; - } - - for (i = 0; i < pool->base.res_cap->num_dsc; i++) { - pool->base.dscs[i] = dcn35_dsc_create(ctx, i); - if (pool->base.dscs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create display stream compressor %d!\n", i); - goto create_fail; - } - } - - /* DWB and MMHUBBUB */ - if (!dcn35_dwbc_create(ctx, &pool->base)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create dwbc!\n"); - goto create_fail; - } - - if (!dcn35_mmhubbub_create(ctx, &pool->base)) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create mcif_wb!\n"); - goto create_fail; - } - - /* AUX and I2C */ - for (i = 0; i < pool->base.res_cap->num_ddc; i++) { - pool->base.engines[i] = dcn31_aux_engine_create(ctx, i); - if (pool->base.engines[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create aux engine!!\n"); - goto create_fail; - } - pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i); - if (pool->base.hw_i2cs[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC:failed to create hw i2c!!\n"); - goto create_fail; - } - pool->base.sw_i2cs[i] = NULL; - } - - /* DCN3.5 has 6 DPIA */ - pool->base.usb4_dpia_count = 4; - if (dc->debug.dpia_debug.bits.disable_dpia) - pool->base.usb4_dpia_count = 0; - - /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ - if (!resource_construct(num_virtual_links, dc, &pool->base, - &res_create_funcs)) - goto create_fail; - - /* HW Sequencer and Plane caps */ - dcn35_hw_sequencer_construct(dc); - - dc->caps.max_planes = pool->base.pipe_count; - - for (i = 0; i < dc->caps.max_planes; ++i) - dc->caps.planes[i] = plane_cap; - - dc->cap_funcs = cap_funcs; - - dc->dcn_ip->max_num_dpp = pool->base.pipe_count; - - dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = true; - dc->dml2_options.use_native_soc_bb_construction = true; - if (dc->config.EnableMinDispClkODM) - dc->dml2_options.minimize_dispclk_using_odm = true; - dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm; - - dc->dml2_options.callbacks.dc = dc; - dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; - dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; - dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; - dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; - dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; - dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; - dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; - dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; - dc->dml2_options.max_segments_per_hubp = 24; - - dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/ - - if (dc->config.sdpif_request_limit_words_per_umc == 0) - dc->config.sdpif_request_limit_words_per_umc = 16;/*todo*/ - - return true; - -create_fail: - - dcn35_resource_destruct(pool); - - return false; -} - -struct resource_pool *dcn35_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc) -{ - struct dcn35_resource_pool *pool = - kzalloc(sizeof(struct dcn35_resource_pool), GFP_KERNEL); - - if (!pool) - return NULL; - - if (dcn35_resource_construct(init_data->num_virtual_links, dc, pool)) - return &pool->base; - - BREAK_TO_DEBUGGER(); - kfree(pool); - return NULL; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.h deleted file mode 100644 index 99aea102e3..0000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.h +++ /dev/null @@ -1,310 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright 2023 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef _DCN35_RESOURCE_H_ -#define _DCN35_RESOURCE_H_ - -#include "core_types.h" - -#define DCN3_5_VMIN_DISPCLK_HZ 717000000 -#define TO_DCN35_RES_POOL(pool)\ - container_of(pool, struct dcn35_resource_pool, base) - -extern struct _vcs_dpi_ip_params_st dcn3_5_ip; -extern struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc; - -struct dcn35_resource_pool { - struct resource_pool base; -}; - -struct resource_pool *dcn35_create_resource_pool( - const struct dc_init_data *init_data, - struct dc *dc); - -/* Defs for runtime init of registers */ - -#define OPP_REG_LIST_DCN20_RI(id) \ - OPP_REG_LIST_DCN10_RI(id), \ - OPP_DPG_REG_LIST_RI(id), \ - SRI_ARR(FMT_422_CONTROL, FMT, id), \ - SRI_ARR(OPPBUF_CONTROL1, OPPBUF, id) - -#define OPP_REG_LIST_DCN35_RI(id) \ - OPP_REG_LIST_DCN20_RI(id), \ - SRI2_ARR(OPP_TOP_CLK_CONTROL, OPP, id) - -#define VPG_DCN31_REG_LIST_RI(id) \ - SRI_ARR(VPG_GENERIC_STATUS, VPG, id), \ - SRI_ARR(VPG_GENERIC_PACKET_ACCESS_CTRL, VPG, id), \ - SRI_ARR(VPG_GENERIC_PACKET_DATA, VPG, id), \ - SRI_ARR(VPG_GSP_FRAME_UPDATE_CTRL, VPG, id), \ - SRI_ARR(VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG, id), \ - SRI_ARR(VPG_MEM_PWR, VPG, id) - -#define AFMT_DCN31_REG_LIST_RI(id) \ - SRI_ARR(AFMT_INFOFRAME_CONTROL0, AFMT, id), \ - SRI_ARR(AFMT_VBI_PACKET_CONTROL, AFMT, id), \ - SRI_ARR(AFMT_AUDIO_PACKET_CONTROL, AFMT, id), \ - SRI_ARR(AFMT_AUDIO_PACKET_CONTROL2, AFMT, id), \ - SRI_ARR(AFMT_AUDIO_SRC_CONTROL, AFMT, id), \ - SRI_ARR(AFMT_60958_0, AFMT, id), \ - SRI_ARR(AFMT_60958_1, AFMT, id), \ - SRI_ARR(AFMT_60958_2, AFMT, id), \ - SRI_ARR(AFMT_MEM_PWR, AFMT, id) - -/* Stream encoder */ -#define SE_DCN35_REG_LIST_RI(id) \ - SRI_ARR(AFMT_CNTL, DIG, id), \ - SRI_ARR(DIG_FE_CNTL, DIG, id), \ - SRI_ARR(HDMI_CONTROL, DIG, id), \ - SRI_ARR(HDMI_DB_CONTROL, DIG, id), \ - SRI_ARR(HDMI_GC, DIG, id), \ - SRI_ARR(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \ - SRI_ARR(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \ - SRI_ARR(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \ - SRI_ARR(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \ - SRI_ARR(HDMI_GENERIC_PACKET_CONTROL4, DIG, id), \ - SRI_ARR(HDMI_GENERIC_PACKET_CONTROL5, DIG, id), \ - SRI_ARR(HDMI_GENERIC_PACKET_CONTROL6, DIG, id), \ - SRI_ARR(HDMI_GENERIC_PACKET_CONTROL7, DIG, id), \ - SRI_ARR(HDMI_GENERIC_PACKET_CONTROL8, DIG, id), \ - SRI_ARR(HDMI_GENERIC_PACKET_CONTROL9, DIG, id), \ - SRI_ARR(HDMI_GENERIC_PACKET_CONTROL10, DIG, id), \ - SRI_ARR(HDMI_INFOFRAME_CONTROL0, DIG, id), \ - SRI_ARR(HDMI_INFOFRAME_CONTROL1, DIG, id), \ - SRI_ARR(HDMI_VBI_PACKET_CONTROL, DIG, id), \ - SRI_ARR(HDMI_AUDIO_PACKET_CONTROL, DIG, id),\ - SRI_ARR(HDMI_ACR_PACKET_CONTROL, DIG, id),\ - SRI_ARR(HDMI_ACR_32_0, DIG, id),\ - SRI_ARR(HDMI_ACR_32_1, DIG, id),\ - SRI_ARR(HDMI_ACR_44_0, DIG, id),\ - SRI_ARR(HDMI_ACR_44_1, DIG, id),\ - SRI_ARR(HDMI_ACR_48_0, DIG, id),\ - SRI_ARR(HDMI_ACR_48_1, DIG, id),\ - SRI_ARR(DP_DB_CNTL, DP, id), \ - SRI_ARR(DP_MSA_MISC, DP, id), \ - SRI_ARR(DP_MSA_VBID_MISC, DP, id), \ - SRI_ARR(DP_MSA_COLORIMETRY, DP, id), \ - SRI_ARR(DP_MSA_TIMING_PARAM1, DP, id), \ - SRI_ARR(DP_MSA_TIMING_PARAM2, DP, id), \ - SRI_ARR(DP_MSA_TIMING_PARAM3, DP, id), \ - SRI_ARR(DP_MSA_TIMING_PARAM4, DP, id), \ - SRI_ARR(DP_MSE_RATE_CNTL, DP, id), \ - SRI_ARR(DP_MSE_RATE_UPDATE, DP, id), \ - SRI_ARR(DP_PIXEL_FORMAT, DP, id), \ - SRI_ARR(DP_SEC_CNTL, DP, id), \ - SRI_ARR(DP_SEC_CNTL1, DP, id), \ - SRI_ARR(DP_SEC_CNTL2, DP, id), \ - SRI_ARR(DP_SEC_CNTL5, DP, id), \ - SRI_ARR(DP_SEC_CNTL6, DP, id), \ - SRI_ARR(DP_STEER_FIFO, DP, id), \ - SRI_ARR(DP_VID_M, DP, id), \ - SRI_ARR(DP_VID_N, DP, id), \ - SRI_ARR(DP_VID_STREAM_CNTL, DP, id), \ - SRI_ARR(DP_VID_TIMING, DP, id), \ - SRI_ARR(DP_SEC_AUD_N, DP, id), \ - SRI_ARR(DP_SEC_TIMESTAMP, DP, id), \ - SRI_ARR(DP_DSC_CNTL, DP, id), \ - SRI_ARR(DP_SEC_METADATA_TRANSMISSION, DP, id), \ - SRI_ARR(HDMI_METADATA_PACKET_CONTROL, DIG, id), \ - SRI_ARR(DP_SEC_FRAMING4, DP, id), \ - SRI_ARR(DP_GSP11_CNTL, DP, id), \ - SRI_ARR(DME_CONTROL, DME, id),\ - SRI_ARR(DP_SEC_METADATA_TRANSMISSION, DP, id), \ - SRI_ARR(HDMI_METADATA_PACKET_CONTROL, DIG, id), \ - SRI_ARR(DIG_FE_CNTL, DIG, id), \ - SRI_ARR(DIG_FE_EN_CNTL, DIG, id), \ - SRI_ARR(DIG_FE_CLK_CNTL, DIG, id), \ - SRI_ARR(DIG_CLOCK_PATTERN, DIG, id), \ - SRI_ARR(DIG_FIFO_CTRL0, DIG, id), \ - SRI_ARR(STREAM_MAPPER_CONTROL, DIG, id) - -#define LE_DCN35_REG_LIST_RI(id)\ - LE_DCN3_REG_LIST_RI(id),\ - SRI_ARR(DP_DPHY_INTERNAL_CTRL, DP, id), \ - SR_ARR(DIO_LINKA_CNTL, id), \ - SR_ARR(DIO_LINKB_CNTL, id), \ - SR_ARR(DIO_LINKC_CNTL, id), \ - SR_ARR(DIO_LINKD_CNTL, id), \ - SR_ARR(DIO_LINKE_CNTL, id), \ - SR_ARR(DIO_LINKF_CNTL, id),\ - SRI_ARR(DIG_BE_CLK_CNTL, DIG, id),\ - SR_ARR(DIO_CLK_CNTL, id) - -#define MCIF_WB_COMMON_REG_LIST_DCN3_5_RI(inst) \ - MCIF_WB_COMMON_REG_LIST_DCN32_RI(inst), \ - SRI2_ARR(MMHUBBUB_CLOCK_CNTL, MMHUBBUB, inst) - -#define HWSEQ_DCN35_REG_LIST()\ - SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ - SR(DCHUBBUB_ARB_HOSTVM_CNTL), \ - SR(DIO_MEM_PWR_CTRL), \ - SR(ODM_MEM_PWR_CTRL3), \ - SR(MMHUBBUB_MEM_PWR_CNTL), \ - SR(DCCG_GATE_DISABLE_CNTL), \ - SR(DCCG_GATE_DISABLE_CNTL2), \ - SR(DCCG_GATE_DISABLE_CNTL5), \ - SR(DCFCLK_CNTL),\ - SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ - SRII(PIXEL_RATE_CNTL, OTG, 0), \ - SRII(PIXEL_RATE_CNTL, OTG, 1),\ - SRII(PIXEL_RATE_CNTL, OTG, 2),\ - SRII(PIXEL_RATE_CNTL, OTG, 3),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\ - SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\ - SR(MICROSECOND_TIME_BASE_DIV), \ - SR(MILLISECOND_TIME_BASE_DIV), \ - SR(DISPCLK_FREQ_CHANGE_CNTL), \ - SR(RBBMIF_TIMEOUT_DIS), \ - SR(RBBMIF_TIMEOUT_DIS_2), \ - SR(DCHUBBUB_CRC_CTRL), \ - SR(DPP_TOP0_DPP_CRC_CTRL), \ - SR(DPP_TOP0_DPP_CRC_VAL_B_A), \ - SR(DPP_TOP0_DPP_CRC_VAL_R_G), \ - SR(MPC_CRC_CTRL), \ - SR(MPC_CRC_RESULT_GB), \ - SR(MPC_CRC_RESULT_C), \ - SR(MPC_CRC_RESULT_AR), \ - SR(DOMAIN0_PG_CONFIG), \ - SR(DOMAIN1_PG_CONFIG), \ - SR(DOMAIN2_PG_CONFIG), \ - SR(DOMAIN3_PG_CONFIG), \ - SR(DOMAIN16_PG_CONFIG), \ - SR(DOMAIN17_PG_CONFIG), \ - SR(DOMAIN18_PG_CONFIG), \ - SR(DOMAIN19_PG_CONFIG), \ - SR(DOMAIN0_PG_STATUS), \ - SR(DOMAIN1_PG_STATUS), \ - SR(DOMAIN2_PG_STATUS), \ - SR(DOMAIN3_PG_STATUS), \ - SR(DOMAIN16_PG_STATUS), \ - SR(DOMAIN17_PG_STATUS), \ - SR(DOMAIN18_PG_STATUS), \ - SR(DOMAIN19_PG_STATUS), \ - SR(DC_IP_REQUEST_CNTL), \ - SR(AZALIA_AUDIO_DTO), \ - SR(AZALIA_CONTROLLER_CLOCK_GATING), \ - SR(HPO_TOP_HW_CONTROL),\ - SR(DMU_CLK_CNTL) - -/* OPTC */ -#define OPTC_COMMON_REG_LIST_DCN3_5_RI(inst) \ - SRI_ARR(OTG_VSTARTUP_PARAM, OTG, inst),\ - SRI_ARR(OTG_VUPDATE_PARAM, OTG, inst),\ - SRI_ARR(OTG_VREADY_PARAM, OTG, inst),\ - SRI_ARR(OTG_MASTER_UPDATE_LOCK, OTG, inst),\ - SRI_ARR(OTG_GLOBAL_CONTROL0, OTG, inst),\ - SRI_ARR(OTG_GLOBAL_CONTROL1, OTG, inst),\ - SRI_ARR(OTG_GLOBAL_CONTROL2, OTG, inst),\ - SRI_ARR(OTG_GLOBAL_CONTROL4, OTG, inst),\ - SRI_ARR(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst),\ - SRI_ARR(OTG_H_TOTAL, OTG, inst),\ - SRI_ARR(OTG_H_BLANK_START_END, OTG, inst),\ - SRI_ARR(OTG_H_SYNC_A, OTG, inst),\ - SRI_ARR(OTG_H_SYNC_A_CNTL, OTG, inst),\ - SRI_ARR(OTG_H_TIMING_CNTL, OTG, inst),\ - SRI_ARR(OTG_V_TOTAL, OTG, inst),\ - SRI_ARR(OTG_V_BLANK_START_END, OTG, inst),\ - SRI_ARR(OTG_V_SYNC_A, OTG, inst),\ - SRI_ARR(OTG_V_SYNC_A_CNTL, OTG, inst),\ - SRI_ARR(OTG_CONTROL, OTG, inst),\ - SRI_ARR(OTG_STEREO_CONTROL, OTG, inst),\ - SRI_ARR(OTG_3D_STRUCTURE_CONTROL, OTG, inst),\ - SRI_ARR(OTG_STEREO_STATUS, OTG, inst),\ - SRI_ARR(OTG_V_TOTAL_MAX, OTG, inst),\ - SRI_ARR(OTG_V_TOTAL_MIN, OTG, inst),\ - SRI_ARR(OTG_V_TOTAL_CONTROL, OTG, inst),\ - SRI_ARR(OTG_TRIGA_CNTL, OTG, inst),\ - SRI_ARR(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\ - SRI_ARR(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\ - SRI_ARR(OTG_STATUS_FRAME_COUNT, OTG, inst),\ - SRI_ARR(OTG_STATUS, OTG, inst),\ - SRI_ARR(OTG_STATUS_POSITION, OTG, inst),\ - SRI_ARR(OTG_NOM_VERT_POSITION, OTG, inst),\ - SRI_ARR(OTG_M_CONST_DTO0, OTG, inst),\ - SRI_ARR(OTG_M_CONST_DTO1, OTG, inst),\ - SRI_ARR(OTG_CLOCK_CONTROL, OTG, inst),\ - SRI_ARR(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\ - SRI_ARR(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\ - SRI_ARR(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst),\ - SRI_ARR(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst),\ - SRI_ARR(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\ - SRI_ARR(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\ - SRI_ARR(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\ - SRI_ARR(OPTC_DATA_SOURCE_SELECT, ODM, inst),\ - SRI_ARR(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\ - SRI_ARR(CONTROL, VTG, inst),\ - SRI_ARR(OTG_VERT_SYNC_CONTROL, OTG, inst),\ - SRI_ARR(OTG_GSL_CONTROL, OTG, inst),\ - SRI_ARR(OTG_CRC_CNTL, OTG, inst),\ - SRI_ARR(OTG_CRC0_DATA_RG, OTG, inst),\ - SRI_ARR(OTG_CRC0_DATA_B, OTG, inst),\ - SRI_ARR(OTG_CRC1_DATA_RG, OTG, inst),\ - SRI_ARR(OTG_CRC1_DATA_B, OTG, inst),\ - SRI_ARR(OTG_CRC2_DATA_RG, OTG, inst),\ - SRI_ARR(OTG_CRC2_DATA_B, OTG, inst),\ - SRI_ARR(OTG_CRC3_DATA_RG, OTG, inst),\ - SRI_ARR(OTG_CRC3_DATA_B, OTG, inst),\ - SRI_ARR(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\ - SRI_ARR(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\ - SRI_ARR(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\ - SRI_ARR(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst),\ - SRI_ARR(OTG_CRC1_WINDOWA_X_CONTROL, OTG, inst),\ - SRI_ARR(OTG_CRC1_WINDOWA_Y_CONTROL, OTG, inst),\ - SRI_ARR(OTG_CRC1_WINDOWB_X_CONTROL, OTG, inst),\ - SRI_ARR(OTG_CRC1_WINDOWB_Y_CONTROL, OTG, inst),\ - SRI_ARR(OTG_CRC0_WINDOWA_X_CONTROL_READBACK, OTG, inst),\ - SRI_ARR(OTG_CRC0_WINDOWA_Y_CONTROL_READBACK, OTG, inst),\ - SRI_ARR(OTG_CRC0_WINDOWB_X_CONTROL_READBACK, OTG, inst),\ - SRI_ARR(OTG_CRC0_WINDOWB_Y_CONTROL_READBACK, OTG, inst),\ - SRI_ARR(OTG_CRC1_WINDOWA_X_CONTROL_READBACK, OTG, inst),\ - SRI_ARR(OTG_CRC1_WINDOWA_Y_CONTROL_READBACK, OTG, inst),\ - SRI_ARR(OTG_CRC1_WINDOWB_X_CONTROL_READBACK, OTG, inst),\ - SRI_ARR(OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG, inst),\ - SR_ARR(GSL_SOURCE_SELECT, inst),\ - SRI_ARR(OTG_TRIGA_MANUAL_TRIG, OTG, inst),\ - SRI_ARR(OTG_GLOBAL_CONTROL1, OTG, inst),\ - SRI_ARR(OTG_GLOBAL_CONTROL2, OTG, inst),\ - SRI_ARR(OTG_GSL_WINDOW_X, OTG, inst),\ - SRI_ARR(OTG_GSL_WINDOW_Y, OTG, inst),\ - SRI_ARR(OTG_VUPDATE_KEEPOUT, OTG, inst),\ - SRI_ARR(OTG_DSC_START_POSITION, OTG, inst),\ - SRI_ARR(OTG_DRR_TRIGGER_WINDOW, OTG, inst),\ - SRI_ARR(OTG_DRR_V_TOTAL_CHANGE, OTG, inst),\ - SRI_ARR(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\ - SRI_ARR(OPTC_BYTES_PER_PIXEL, ODM, inst),\ - SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst),\ - SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst),\ - SRI_ARR(OTG_DRR_CONTROL, OTG, inst),\ - SRI2_ARR(OPTC_CLOCK_CONTROL, OPTC, inst) - -/* DPP */ -#define DPP_REG_LIST_DCN35_RI(id)\ - DPP_REG_LIST_DCN30_COMMON_RI(id) - -#endif /* _DCN35_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index 7ce9a5b6c3..6d7a15dcf8 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -103,10 +103,16 @@ enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( /* * Sends ALLOCATE_PAYLOAD message. */ -bool dm_helpers_dp_mst_send_payload_allocation( +void dm_helpers_dp_mst_send_payload_allocation( struct dc_context *ctx, - const struct dc_stream_state *stream, - bool enable); + const struct dc_stream_state *stream); + +/* + * Update mst manager relevant variables + */ +void dm_helpers_dp_mst_update_mst_mgr_for_deallocation( + struct dc_context *ctx, + const struct dc_stream_state *stream); bool dm_helpers_dp_mst_start_top_mgr( struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h index 4440d08743..bd7ba0a251 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h +++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h @@ -247,6 +247,7 @@ struct pp_smu_funcs_nv { #define PP_SMU_NUM_MEMCLK_DPM_LEVELS 4 #define PP_SMU_NUM_DCLK_DPM_LEVELS 8 #define PP_SMU_NUM_VCLK_DPM_LEVELS 8 +#define PP_SMU_NUM_VPECLK_DPM_LEVELS 8 struct dpm_clock { uint32_t Freq; // In MHz @@ -262,6 +263,7 @@ struct dpm_clocks { struct dpm_clock MemClocks[PP_SMU_NUM_MEMCLK_DPM_LEVELS]; struct dpm_clock VClocks[PP_SMU_NUM_VCLK_DPM_LEVELS]; struct dpm_clock DClocks[PP_SMU_NUM_DCLK_DPM_LEVELS]; + struct dpm_clock VPEClocks[PP_SMU_NUM_VPECLK_DPM_LEVELS]; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c index 50b0434354..0c4a8fe8e5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c @@ -30,7 +30,7 @@ #include "dcn_calc_auto.h" #include "dal_asic_id.h" #include "resource.h" -#include "dcn10/dcn10_resource.h" +#include "resource/dcn10/dcn10_resource.h" #include "dcn10/dcn10_hubbub.h" #include "dml/dml1_display_rq_dlg_calc.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index d2271e308f..38ab9ad60e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -33,6 +33,7 @@ #include "link.h" #include "dcn20_fpu.h" +#include "dc_state_priv.h" #define DC_LOGGER \ dc->ctx->logger @@ -1182,7 +1183,7 @@ void dcn20_calculate_dlg_params(struct dc *dc, pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); - if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) { // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0; context->res_ctx.pipe_ctx[i].unbounded_req = false; @@ -1532,7 +1533,7 @@ int dcn20_populate_dml_pipes_from_context(struct dc *dc, */ if (res_ctx->pipe_ctx[i].plane_state && (res_ctx->pipe_ctx[i].plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE || - res_ctx->pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM)) + dc_state_get_pipe_subvp_type(context, &res_ctx->pipe_ctx[i]) == SUBVP_PHANTOM)) pipes[pipe_cnt].pipe.src.num_cursors = 0; else pipes[pipe_cnt].pipe.src.num_cursors = dc->dml.ip.number_of_cursors; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index 3686f1e7de..63c48c29ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -3542,7 +3542,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l { struct vba_vars_st *v = &mode_lib->vba; int MinPrefetchMode, MaxPrefetchMode; - int i; + int i, start_state; unsigned int j, k, m; bool EnoughWritebackUnits = true; bool WritebackModeSupport = true; @@ -3553,6 +3553,11 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l /*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/ + if (mode_lib->validate_max_state) + start_state = v->soc.num_states - 1; + else + start_state = 0; + CalculateMinAndMaxPrefetchMode( mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank, &MinPrefetchMode, &MaxPrefetchMode); @@ -3851,7 +3856,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->SingleDPPViewportSizeSupportPerPlane, &v->ViewportSizeSupport[0][0]); - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { for (j = 0; j < 2; j++) { v->MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDispclk[i], v->DISPCLKDPPCLKVCOSpeed); v->MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDppclk[i], v->DISPCLKDPPCLKVCOSpeed); @@ -4007,7 +4012,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l /*Total Available Pipes Support Check*/ - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { for (j = 0; j < 2; j++) { if (v->TotalNumberOfActiveDPP[i][j] <= v->MaxNumDPP) { v->TotalAvailablePipesSupport[i][j] = true; @@ -4046,7 +4051,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { v->RequiresDSC[i][k] = false; v->RequiresFEC[i][k] = false; @@ -4174,7 +4179,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } } - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { v->DIOSupport[i] = true; for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { if (!v->skip_dio_check[k] && v->BlendingAndTiming[k] == k && (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_hdmi) @@ -4185,7 +4190,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } - for (i = 0; i < v->soc.num_states; ++i) { + for (i = start_state; i < v->soc.num_states; ++i) { v->ODMCombine4To1SupportCheckOK[i] = true; for (k = 0; k < v->NumberOfActivePlanes; ++k) { if (v->BlendingAndTiming[k] == k && v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1 @@ -4197,7 +4202,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l /* Skip dscclk validation: as long as dispclk is supported, dscclk is also implicitly supported */ - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { v->NotEnoughDSCUnits[i] = false; v->TotalDSCUnitsRequired = 0.0; for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { @@ -4217,7 +4222,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } /*DSC Delay per state*/ - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { if (v->OutputBppPerState[i][k] == BPP_INVALID) { v->BPP = 0.0; @@ -4333,7 +4338,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->cursor_bw[k] = v->NumberOfCursors[k] * v->CursorWidth[k][0] * v->CursorBPP[k][0] / 8.0 / (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k]; } - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { for (j = 0; j < 2; j++) { for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { v->swath_width_luma_ub_this_state[k] = v->swath_width_luma_ub_all_states[i][j][k]; @@ -5075,7 +5080,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l /*PTE Buffer Size Check*/ - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { for (j = 0; j < 2; j++) { v->PTEBufferSizeNotExceeded[i][j] = true; for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) { @@ -5136,7 +5141,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } /*Mode Support, Voltage State and SOC Configuration*/ - for (i = v->soc.num_states - 1; i >= 0; i--) { + for (i = v->soc.num_states - 1; i >= start_state; i--) { for (j = 0; j < 2; j++) { if (v->ScaleRatioAndTapsSupport == 1 && v->SourceFormatPixelAndScanSupport == 1 && v->ViewportSizeSupport[i][j] == 1 && v->DIOSupport[i] == 1 && v->ODMCombine4To1SupportCheckOK[i] == 1 @@ -5158,7 +5163,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } { unsigned int MaximumMPCCombine = 0; - for (i = v->soc.num_states; i >= 0; i--) { + for (i = v->soc.num_states; i >= start_state; i--) { if (i == v->soc.num_states || v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true) { v->VoltageLevel = i; v->ModeIsSupported = v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index b315ca6f1c..ba76dd4a2c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -32,6 +32,7 @@ #include "clk_mgr/dcn32/dcn32_smu13_driver_if.h" #include "dcn30/dcn30_resource.h" #include "link.h" +#include "dc_state_priv.h" #define DC_LOGGER_INIT(logger) @@ -45,6 +46,14 @@ static const struct subvp_high_refresh_list subvp_high_refresh_list = { {.width = 1920, .height = 1080, }}, }; +static const struct subvp_active_margin_list subvp_active_margin_list = { + .min_refresh = 55, + .max_refresh = 65, + .res = { + {.width = 2560, .height = 1440, }, + {.width = 1920, .height = 1080, }}, +}; + struct _vcs_dpi_ip_params_st dcn3_2_ip = { .gpuvm_enable = 0, .gpuvm_max_page_table_levels = 4, @@ -333,7 +342,7 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc, if (!pipe->stream) continue; - if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (pipe->plane_state && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); pipes[pipe_idx].pipe.dest.vupdate_offset = @@ -616,7 +625,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc, if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) && (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) && - pipe->stream->mall_stream_config.type == SUBVP_NONE && + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE && (refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) && !pipe->plane_state->address.tmz_surface && (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 || @@ -674,7 +683,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context // Find the minimum pipe split count for non SubVP pipes if (resource_is_pipe_type(pipe, OPP_HEAD) && - pipe->stream->mall_stream_config.type == SUBVP_NONE) { + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE) { split_cnt = 0; while (pipe) { split_cnt++; @@ -727,8 +736,8 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context) * and also to store the two main SubVP pipe pointers in subvp_pipes[2]. */ if (pipe->stream && pipe->plane_state && !pipe->top_pipe && - pipe->stream->mall_stream_config.type == SUBVP_MAIN) { - phantom = pipe->stream->mall_stream_config.paired_stream; + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) { + phantom = dc_state_get_paired_subvp_stream(context, pipe->stream); microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) + phantom->timing.v_addressable; @@ -796,6 +805,9 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context) int16_t stretched_drr_us = 0; int16_t drr_stretched_vblank_us = 0; int16_t max_vblank_mallregion = 0; + struct dc_stream_state *phantom_stream; + bool subvp_found = false; + bool drr_found = false; // Find SubVP pipe for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -808,8 +820,10 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context) continue; // Find the SubVP pipe - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) { + subvp_found = true; break; + } } // Find the DRR pipe @@ -817,32 +831,37 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context) drr_pipe = &context->res_ctx.pipe_ctx[i]; // We check for master pipe only - if (!resource_is_pipe_type(pipe, OTG_MASTER) || - !resource_is_pipe_type(pipe, DPP_PIPE)) + if (!resource_is_pipe_type(drr_pipe, OTG_MASTER) || + !resource_is_pipe_type(drr_pipe, DPP_PIPE)) continue; - if (drr_pipe->stream->mall_stream_config.type == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param && - (drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable || drr_pipe->stream->vrr_active_fixed)) + if (dc_state_get_pipe_subvp_type(context, drr_pipe) == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param && + (drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable || drr_pipe->stream->vrr_active_fixed)) { + drr_found = true; break; + } } - main_timing = &pipe->stream->timing; - phantom_timing = &pipe->stream->mall_stream_config.paired_stream->timing; - drr_timing = &drr_pipe->stream->timing; - prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total / - (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 + - dc->caps.subvp_prefetch_end_to_mall_start_us; - subvp_active_us = main_timing->v_addressable * main_timing->h_total / - (double)(main_timing->pix_clk_100hz * 100) * 1000000; - drr_frame_us = drr_timing->v_total * drr_timing->h_total / - (double)(drr_timing->pix_clk_100hz * 100) * 1000000; - // P-State allow width and FW delays already included phantom_timing->v_addressable - mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total / - (double)(phantom_timing->pix_clk_100hz * 100) * 1000000; - stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US; - drr_stretched_vblank_us = (drr_timing->v_total - drr_timing->v_addressable) * drr_timing->h_total / - (double)(drr_timing->pix_clk_100hz * 100) * 1000000 + (stretched_drr_us - drr_frame_us); - max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us; + if (subvp_found && drr_found) { + phantom_stream = dc_state_get_paired_subvp_stream(context, pipe->stream); + main_timing = &pipe->stream->timing; + phantom_timing = &phantom_stream->timing; + drr_timing = &drr_pipe->stream->timing; + prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total / + (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 + + dc->caps.subvp_prefetch_end_to_mall_start_us; + subvp_active_us = main_timing->v_addressable * main_timing->h_total / + (double)(main_timing->pix_clk_100hz * 100) * 1000000; + drr_frame_us = drr_timing->v_total * drr_timing->h_total / + (double)(drr_timing->pix_clk_100hz * 100) * 1000000; + // P-State allow width and FW delays already included phantom_timing->v_addressable + mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total / + (double)(phantom_timing->pix_clk_100hz * 100) * 1000000; + stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US; + drr_stretched_vblank_us = (drr_timing->v_total - drr_timing->v_addressable) * drr_timing->h_total / + (double)(drr_timing->pix_clk_100hz * 100) * 1000000 + (stretched_drr_us - drr_frame_us); + max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us; + } /* We consider SubVP + DRR schedulable if the stretched frame duration of the DRR display (i.e. the * highest refresh rate + margin that can support UCLK P-State switch) passes the static analysis @@ -887,6 +906,8 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context) struct dc_crtc_timing *main_timing = NULL; struct dc_crtc_timing *phantom_timing = NULL; struct dc_crtc_timing *vblank_timing = NULL; + struct dc_stream_state *phantom_stream; + enum mall_stream_type pipe_mall_type; /* For SubVP + VBLANK/DRR cases, we assume there can only be * a single VBLANK/DRR display. If DML outputs SubVP + VBLANK @@ -896,6 +917,7 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context) */ for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; + pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); // We check for master pipe, but it shouldn't matter since we only need // the pipe for timing info (stream should be same for any pipe splits) @@ -903,18 +925,19 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context) !resource_is_pipe_type(pipe, DPP_PIPE)) continue; - if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) { + if (!found && pipe_mall_type == SUBVP_NONE) { // Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe). vblank_index = i; found = true; } - if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN) subvp_pipe = pipe; } if (found) { + phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); main_timing = &subvp_pipe->stream->timing; - phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; + phantom_timing = &phantom_stream->timing; vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing; // Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe // Also include the prefetch end to mallstart delay time @@ -969,7 +992,7 @@ static bool subvp_subvp_admissable(struct dc *dc, continue; if (pipe->plane_state && !pipe->top_pipe && - pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) { refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); @@ -1018,23 +1041,23 @@ static bool subvp_validate_static_schedulability(struct dc *dc, for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); if (!pipe->stream) continue; if (pipe->plane_state && !pipe->top_pipe) { - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (pipe_mall_type == SUBVP_MAIN) subvp_count++; - if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { + if (pipe_mall_type == SUBVP_NONE) non_subvp_pipes++; - } } // Count how many planes that aren't SubVP/phantom are capable of VACTIVE // switching (SubVP + VACTIVE unsupported). In situations where we force // SubVP for a VACTIVE plane, we don't want to increment the vactive_count. if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vlevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 && - pipe->stream->mall_stream_config.type == SUBVP_NONE) { + pipe_mall_type == SUBVP_NONE) { vactive_count++; } pipe_idx++; @@ -1070,7 +1093,7 @@ static void assign_subvp_index(struct dc *dc, struct dc_state *context) struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && - pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) { + dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) { pipe_ctx->subvp_index = index++; } else { pipe_ctx->subvp_index = 0; @@ -1414,6 +1437,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, unsigned int dc_pipe_idx = 0; int i = 0; bool found_supported_config = false; + int vlevel_temp = 0; dc_assert_fp_enabled(); @@ -1446,13 +1470,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, */ if (!dc->debug.force_disable_subvp && !dc->caps.dmub_caps.gecc_enable && dcn32_all_pipes_have_stream_and_plane(dc, context) && !dcn32_mpo_in_use(context) && !dcn32_any_surfaces_rotated(dc, context) && !is_test_pattern_enabled(context) && - (*vlevel == context->bw_ctx.dml.soc.num_states || + (*vlevel == context->bw_ctx.dml.soc.num_states || (vba->DRAMSpeedPerState[*vlevel] != vba->DRAMSpeedPerState[0] && + vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported) || vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported || dc->debug.force_subvp_mclk_switch)) { dcn32_merge_pipes_for_subvp(dc, context); memset(merge, 0, MAX_PIPES * sizeof(bool)); + vlevel_temp = *vlevel; /* to re-initialize viewport after the pipe merge */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; @@ -1521,10 +1547,14 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, } } + if (vba->DRAMSpeedPerState[*vlevel] >= vba->DRAMSpeedPerState[vlevel_temp]) + found_supported_config = false; + // If SubVP pipe config is unsupported (or cannot be used for UCLK switching) // remove phantom pipes and repopulate dml pipes if (!found_supported_config) { - dc->res_pool->funcs->remove_phantom_pipes(dc, context, false); + dc_state_remove_phantom_streams_and_planes(dc, context); + dc_state_release_phantom_streams_and_planes(dc, context); vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported; *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false); @@ -1676,7 +1706,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); - if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) { // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0; context->res_ctx.pipe_ctx[i].unbounded_req = false; @@ -1708,7 +1738,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, context->res_ctx.pipe_ctx[i].plane_state != context->res_ctx.pipe_ctx[i].top_pipe->plane_state) && context->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) { /* SS: all active surfaces stored in MALL */ - if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type != SUBVP_PHANTOM) { + if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) != SUBVP_PHANTOM) { context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes; if (context->res_ctx.pipe_ctx[i].stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) { @@ -1922,7 +1952,8 @@ bool dcn32_internal_validate_bw(struct dc *dc, return false; // For each full update, remove all existing phantom pipes first - dc->res_pool->funcs->remove_phantom_pipes(dc, context, fast_validate); + dc_state_remove_phantom_streams_and_planes(dc, context); + dc_state_release_phantom_streams_and_planes(dc, context); dc->res_pool->funcs->update_soc_for_wm_a(dc, context); @@ -2729,7 +2760,7 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk struct _vcs_dpi_voltage_scaling_st entry = {0}; struct clk_limit_table_entry max_clk_data = {0}; - unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299; + unsigned int min_dcfclk_mhz = 399, min_fclk_mhz = 599; static const unsigned int num_dcfclk_stas = 5; unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564}; @@ -3305,25 +3336,24 @@ bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe) { bool allow = false; uint32_t refresh_rate = 0; + uint32_t min_refresh = subvp_active_margin_list.min_refresh; + uint32_t max_refresh = subvp_active_margin_list.max_refresh; + uint32_t i; - /* Allow subvp on displays that have active margin for 2560x1440@60hz displays - * only for now. There must be no scaling as well. - * - * For now we only enable on 2560x1440@60hz displays to enable 4K60 + 1440p60 configs - * for p-state switching. - */ - if (pipe->stream && pipe->plane_state) { - refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 + - pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1) - / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total); - if (pipe->stream->timing.v_addressable == 1440 && - pipe->stream->timing.h_addressable == 2560 && - refresh_rate >= 55 && refresh_rate <= 65 && - pipe->plane_state->src_rect.height == 1440 && - pipe->plane_state->src_rect.width == 2560 && - pipe->plane_state->dst_rect.height == 1440 && - pipe->plane_state->dst_rect.width == 2560) + for (i = 0; i < SUBVP_ACTIVE_MARGIN_LIST_LEN; i++) { + uint32_t width = subvp_active_margin_list.res[i].width; + uint32_t height = subvp_active_margin_list.res[i].height; + + refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + + pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); + refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); + + if (refresh_rate >= min_refresh && refresh_rate <= max_refresh && + dcn32_check_native_scaling_for_res(pipe, width, height)) { allow = true; + break; + } } return allow; } @@ -3442,7 +3472,15 @@ void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *co for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (!pipe->stream) + /* In DCN32/321, FPO uses per-pipe P-State force. + * If there's no planes, HUBP is power gated and + * therefore programming UCLK_PSTATE_FORCE does + * nothing (P-State will always be asserted naturally + * on a pipe that has HUBP power gated. Therefore we + * only want to enable FPO if the FPO pipe has both + * a stream and a plane. + */ + if (!pipe->stream || !pipe->plane_state) continue; if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index f154a3eb1d..7ea2bd5374 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -164,11 +164,11 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = { }, }, .num_states = 5, - .sr_exit_time_us = 14.0, - .sr_enter_plus_exit_time_us = 16.0, - .sr_exit_z8_time_us = 525.0, - .sr_enter_plus_exit_z8_time_us = 715.0, - .fclk_change_latency_us = 20.0, + .sr_exit_time_us = 28.0, + .sr_enter_plus_exit_time_us = 30.0, + .sr_exit_z8_time_us = 210.0, + .sr_enter_plus_exit_z8_time_us = 320.0, + .fclk_change_latency_us = 24.0, .usr_retraining_latency_us = 2, .writeback_latency_us = 12.0, @@ -326,6 +326,25 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc, dcn3_5_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000.0; } + + if (dc->bb_overrides.dram_clock_change_latency_ns > 0) + dcn3_5_soc.dram_clock_change_latency_us = + dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; + + if (dc->bb_overrides.sr_exit_time_ns > 0) + dcn3_5_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; + + if (dc->bb_overrides.sr_enter_plus_exit_time_ns > 0) + dcn3_5_soc.sr_enter_plus_exit_time_us = + dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; + + if (dc->bb_overrides.sr_exit_z8_time_ns > 0) + dcn3_5_soc.sr_exit_z8_time_us = dc->bb_overrides.sr_exit_z8_time_ns / 1000.0; + + if (dc->bb_overrides.sr_enter_plus_exit_z8_time_ns > 0) + dcn3_5_soc.sr_enter_plus_exit_z8_time_us = + dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0; + /*temp till dml2 fully work without dml1*/ dml_init_instance(&dc->dml, &dcn3_5_soc, &dcn3_5_ip, DML_PROJECT_DCN31); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c index 1a2b24cc6b..0baf39d64a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c @@ -772,18 +772,29 @@ static unsigned int get_mpc_factor(struct dml2_context *ctx, const struct dc_state *state, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, - const struct dc_stream_status *status, unsigned int stream_id, + const struct dc_stream_status *status, + const struct dc_stream_state *stream, int plane_idx) { unsigned int plane_id; unsigned int cfg_idx; + unsigned int mpc_factor; - get_plane_id(ctx, state, status->plane_states[plane_idx], stream_id, plane_idx, &plane_id); + get_plane_id(ctx, state, status->plane_states[plane_idx], + stream->stream_id, plane_idx, &plane_id); cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id); - if (ctx->architecture == dml2_architecture_20) - return (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx]; - ASSERT(false); - return 1; + if (ctx->architecture == dml2_architecture_20) { + mpc_factor = (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx]; + } else { + mpc_factor = 1; + ASSERT(false); + } + + /* For stereo timings, we need to pipe split */ + if (dml2_is_stereo_timing(stream)) + mpc_factor = 2; + + return mpc_factor; } static unsigned int get_odm_factor( @@ -820,14 +831,13 @@ static void populate_mpc_factors_for_stream( unsigned int mpc_factors[MAX_PIPES]) { const struct dc_stream_status *status = &state->stream_status[stream_idx]; - unsigned int stream_id = state->streams[stream_idx]->stream_id; int i; for (i = 0; i < status->plane_count; i++) if (odm_factor == 1) mpc_factors[i] = get_mpc_factor( ctx, state, disp_cfg, mapping, status, - stream_id, i); + state->streams[stream_idx], i); else mpc_factors[i] = 1; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h index e85866db80..7ca7f2a743 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h @@ -38,5 +38,6 @@ #include "core_types.h" #include "dsc.h" #include "clk_mgr.h" +#include "dc_state_priv.h" #endif //__DML2_DC_TYPES_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c index 32f8a43af3..282d70e2b1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c @@ -51,7 +51,7 @@ unsigned int dml2_helper_calculate_num_ways_for_subvp(struct dml2_context *ctx, // Find the phantom pipes if (pipe->stream && pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && - pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4; mblk_width = ctx->config.mall_cfg.mblk_width_pixels; mblk_height = bytes_per_pixel == 4 ? mblk_width = ctx->config.mall_cfg.mblk_height_4bpe_pixels : ctx->config.mall_cfg.mblk_height_8bpe_pixels; @@ -253,7 +253,7 @@ static bool assign_subvp_pipe(struct dml2_context *ctx, struct dc_state *context * to combine this with SubVP can cause issues with the scheduling). */ if (pipe->plane_state && !pipe->top_pipe && - pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && + ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_NONE && refresh_rate < 120 && vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) { while (pipe) { num_pipes++; @@ -317,7 +317,7 @@ static bool enough_pipes_for_subvp(struct dml2_context *ctx, struct dc_state *st // Find the minimum pipe split count for non SubVP pipes if (pipe->stream && !pipe->top_pipe && - pipe->stream->mall_stream_config.type == SUBVP_NONE) { + ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_NONE) { split_cnt = 0; while (pipe) { split_cnt++; @@ -372,8 +372,8 @@ static bool subvp_subvp_schedulable(struct dml2_context *ctx, struct dc_state *c * and also to store the two main SubVP pipe pointers in subvp_pipes[2]. */ if (pipe->stream && pipe->plane_state && !pipe->top_pipe && - pipe->stream->mall_stream_config.type == SUBVP_MAIN) { - phantom = pipe->stream->mall_stream_config.paired_stream; + ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) { + phantom = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, pipe->stream); microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) + phantom->timing.v_addressable; @@ -435,6 +435,7 @@ bool dml2_svp_drr_schedulable(struct dml2_context *ctx, struct dc_state *context struct pipe_ctx *pipe = NULL; struct dc_crtc_timing *main_timing = NULL; struct dc_crtc_timing *phantom_timing = NULL; + struct dc_stream_state *phantom_stream; int16_t prefetch_us = 0; int16_t mall_region_us = 0; int16_t drr_frame_us = 0; // nominal frame time @@ -453,12 +454,13 @@ bool dml2_svp_drr_schedulable(struct dml2_context *ctx, struct dc_state *context continue; // Find the SubVP pipe - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) break; } + phantom_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, pipe->stream); main_timing = &pipe->stream->timing; - phantom_timing = &pipe->stream->mall_stream_config.paired_stream->timing; + phantom_timing = &phantom_stream->timing; prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total / (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 + ctx->config.svp_pstate.subvp_prefetch_end_to_mall_start_us; @@ -519,6 +521,8 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state * struct dc_crtc_timing *main_timing = NULL; struct dc_crtc_timing *phantom_timing = NULL; struct dc_crtc_timing *vblank_timing = NULL; + struct dc_stream_state *phantom_stream; + enum mall_stream_type pipe_mall_type; /* For SubVP + VBLANK/DRR cases, we assume there can only be * a single VBLANK/DRR display. If DML outputs SubVP + VBLANK @@ -528,19 +532,20 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state * */ for (i = 0; i < ctx->config.dcn_pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; + pipe_mall_type = ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe); // We check for master pipe, but it shouldn't matter since we only need // the pipe for timing info (stream should be same for any pipe splits) if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe) continue; - if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) { + if (!found && pipe_mall_type == SUBVP_NONE) { // Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe). vblank_index = i; found = true; } - if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN) + if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN) subvp_pipe = pipe; } // Use ignore_msa_timing_param flag to identify as DRR @@ -548,8 +553,9 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state * // SUBVP + DRR case schedulable = dml2_svp_drr_schedulable(ctx, context, &context->res_ctx.pipe_ctx[vblank_index].stream->timing); } else if (found) { + phantom_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, subvp_pipe->stream); main_timing = &subvp_pipe->stream->timing; - phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; + phantom_timing = &phantom_stream->timing; vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing; // Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe // Also include the prefetch end to mallstart delay time @@ -602,19 +608,20 @@ bool dml2_svp_validate_static_schedulability(struct dml2_context *ctx, struct dc for (i = 0, pipe_idx = 0; i < ctx->config.dcn_pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + enum mall_stream_type pipe_mall_type = ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe); if (!pipe->stream) continue; if (pipe->plane_state && !pipe->top_pipe && - pipe->stream->mall_stream_config.type == SUBVP_MAIN) + pipe_mall_type == SUBVP_MAIN) subvp_count++; // Count how many planes that aren't SubVP/phantom are capable of VACTIVE // switching (SubVP + VACTIVE unsupported). In situations where we force // SubVP for a VACTIVE plane, we don't want to increment the vactive_count. if (vba->ActiveDRAMClockChangeLatencyMargin[vba->pipe_plane[pipe_idx]] > 0 && - pipe->stream->mall_stream_config.type == SUBVP_NONE) { + pipe_mall_type == SUBVP_NONE) { vactive_count++; } pipe_idx++; @@ -708,14 +715,10 @@ static void set_phantom_stream_timing(struct dml2_context *ctx, struct dc_state static struct dc_stream_state *enable_phantom_stream(struct dml2_context *ctx, struct dc_state *state, unsigned int dc_pipe_idx, unsigned int svp_height, unsigned int vstartup) { struct pipe_ctx *ref_pipe = &state->res_ctx.pipe_ctx[dc_pipe_idx]; - struct dc_stream_state *phantom_stream = ctx->config.svp_pstate.callbacks.create_stream_for_sink(ref_pipe->stream->sink); - - phantom_stream->signal = SIGNAL_TYPE_VIRTUAL; - phantom_stream->dpms_off = true; - phantom_stream->mall_stream_config.type = SUBVP_PHANTOM; - phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream; - ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN; - ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream; + struct dc_stream_state *phantom_stream = ctx->config.svp_pstate.callbacks.create_phantom_stream( + ctx->config.svp_pstate.callbacks.dc, + state, + ref_pipe->stream); /* stream has limited viewport and small timing */ memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing)); @@ -723,7 +726,10 @@ static struct dc_stream_state *enable_phantom_stream(struct dml2_context *ctx, s memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst)); set_phantom_stream_timing(ctx, state, ref_pipe, phantom_stream, dc_pipe_idx, svp_height, vstartup); - ctx->config.svp_pstate.callbacks.add_stream_to_ctx(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream); + ctx->config.svp_pstate.callbacks.add_phantom_stream(ctx->config.svp_pstate.callbacks.dc, + state, + phantom_stream, + ref_pipe->stream); return phantom_stream; } @@ -740,7 +746,10 @@ static void enable_phantom_plane(struct dml2_context *ctx, if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state) { phantom_plane = prev_phantom_plane; } else { - phantom_plane = ctx->config.svp_pstate.callbacks.create_plane(ctx->config.svp_pstate.callbacks.dc); + phantom_plane = ctx->config.svp_pstate.callbacks.create_phantom_plane( + ctx->config.svp_pstate.callbacks.dc, + state, + curr_pipe->plane_state); } memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address)); @@ -763,9 +772,7 @@ static void enable_phantom_plane(struct dml2_context *ctx, phantom_plane->clip_rect.y = 0; phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable; - phantom_plane->is_phantom = true; - - ctx->config.svp_pstate.callbacks.add_plane_to_context(ctx->config.svp_pstate.callbacks.dc, phantom_stream, phantom_plane, state); + ctx->config.svp_pstate.callbacks.add_phantom_plane(ctx->config.svp_pstate.callbacks.dc, phantom_stream, phantom_plane, state); curr_pipe = curr_pipe->bottom_pipe; prev_phantom_plane = phantom_plane; @@ -790,7 +797,7 @@ static void add_phantom_pipes_for_main_pipe(struct dml2_context *ctx, struct dc_ // We determine which phantom pipes were added by comparing with // the phantom stream. if (pipe->plane_state && pipe->stream && pipe->stream == phantom_stream && - pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) { pipe->stream->use_dynamic_meta = false; pipe->plane_state->flip_immediate = false; if (!ctx->config.svp_pstate.callbacks.build_scaling_params(pipe)) { @@ -800,7 +807,7 @@ static void add_phantom_pipes_for_main_pipe(struct dml2_context *ctx, struct dc_ } } -static bool remove_all_planes_for_stream(struct dml2_context *ctx, struct dc_stream_state *stream, struct dc_state *context) +static bool remove_all_phantom_planes_for_stream(struct dml2_context *ctx, struct dc_stream_state *stream, struct dc_state *context) { int i, old_plane_count; struct dc_stream_status *stream_status = NULL; @@ -821,9 +828,11 @@ static bool remove_all_planes_for_stream(struct dml2_context *ctx, struct dc_str for (i = 0; i < old_plane_count; i++) del_planes[i] = stream_status->plane_states[i]; - for (i = 0; i < old_plane_count; i++) - if (!ctx->config.svp_pstate.callbacks.remove_plane_from_context(ctx->config.svp_pstate.callbacks.dc, stream, del_planes[i], context)) + for (i = 0; i < old_plane_count; i++) { + if (!ctx->config.svp_pstate.callbacks.remove_phantom_plane(ctx->config.svp_pstate.callbacks.dc, stream, del_planes[i], context)) return false; + ctx->config.svp_pstate.callbacks.release_phantom_plane(ctx->config.svp_pstate.callbacks.dc, context, del_planes[i]); + } return true; } @@ -832,35 +841,21 @@ bool dml2_svp_remove_all_phantom_pipes(struct dml2_context *ctx, struct dc_state { int i; bool removed_pipe = false; - struct dc_plane_state *phantom_plane = NULL; struct dc_stream_state *phantom_stream = NULL; for (i = 0; i < ctx->config.dcn_pipe_count; i++) { struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i]; // build scaling params for phantom pipes - if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - phantom_plane = pipe->plane_state; + if (pipe->plane_state && pipe->stream && ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) { phantom_stream = pipe->stream; - remove_all_planes_for_stream(ctx, pipe->stream, state); - ctx->config.svp_pstate.callbacks.remove_stream_from_ctx(ctx->config.svp_pstate.callbacks.dc, state, pipe->stream); - - /* Ref count is incremented on allocation and also when added to the context. - * Therefore we must call release for the the phantom plane and stream once - * they are removed from the ctx to finally decrement the refcount to 0 to free. - */ - ctx->config.svp_pstate.callbacks.plane_state_release(phantom_plane); - ctx->config.svp_pstate.callbacks.stream_release(phantom_stream); + remove_all_phantom_planes_for_stream(ctx, phantom_stream, state); + ctx->config.svp_pstate.callbacks.remove_phantom_stream(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream); + ctx->config.svp_pstate.callbacks.release_phantom_stream(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream); removed_pipe = true; } - // Clear all phantom stream info - if (pipe->stream) { - pipe->stream->mall_stream_config.type = SUBVP_NONE; - pipe->stream->mall_stream_config.paired_stream = NULL; - } - if (pipe->plane_state) { pipe->plane_state->is_phantom = false; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index b6744ad778..a20f28a5d2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -1055,8 +1055,10 @@ static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2, void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg) { - int i = 0, j = 0; + int i = 0, j = 0, k = 0; int disp_cfg_stream_location, disp_cfg_plane_location; + enum mall_stream_type stream_mall_type; + struct pipe_ctx *current_pipe_context; for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) { dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id_valid[i] = false; @@ -1076,7 +1078,17 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat dml2_populate_pipe_to_plane_index_mapping(dml2, context); for (i = 0; i < context->stream_count; i++) { + current_pipe_context = NULL; + for (k = 0; k < MAX_PIPES; k++) { + /* find one pipe allocated to this stream for the purpose of getting + info about the link later */ + if (context->streams[i] == context->res_ctx.pipe_ctx[k].stream) { + current_pipe_context = &context->res_ctx.pipe_ctx[k]; + break; + } + } disp_cfg_stream_location = map_stream_to_dml_display_cfg(dml2, context->streams[i], dml_dispcfg); + stream_mall_type = dc_state_get_stream_subvp_type(context, context->streams[i]); if (disp_cfg_stream_location < 0) disp_cfg_stream_location = dml_dispcfg->num_timings++; @@ -1084,7 +1096,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__); populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]); - populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], &context->res_ctx.pipe_ctx[i]); + populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context); switch (context->streams[i]->debug.force_odm_combine_segments) { case 2: dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_stream_location] = dml_odm_use_policy_combine_2to1; @@ -1121,10 +1133,10 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat populate_dml_surface_cfg_from_plane_state(dml2->v20.dml_core_ctx.project, &dml_dispcfg->surface, disp_cfg_plane_location, context->stream_status[i].plane_states[j]); populate_dml_plane_cfg_from_plane_state(&dml_dispcfg->plane, disp_cfg_plane_location, context->stream_status[i].plane_states[j], context); - if (context->streams[i]->mall_stream_config.type == SUBVP_MAIN) { + if (stream_mall_type == SUBVP_MAIN) { dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_sub_viewport; dml_dispcfg->plane.UseMALLForStaticScreen[disp_cfg_plane_location] = dml_use_mall_static_screen_optimize; - } else if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM) { + } else if (stream_mall_type == SUBVP_PHANTOM) { dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_phantom_pipe; dml_dispcfg->plane.UseMALLForStaticScreen[disp_cfg_plane_location] = dml_use_mall_static_screen_disable; dml2->v20.dml_core_ctx.policy.ImmediateFlipRequirement[disp_cfg_plane_location] = dml_immediate_flip_not_required; @@ -1141,7 +1153,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat if (j >= 1) { populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_plane_location, context->streams[i]); - populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_plane_location, context->streams[i], &context->res_ctx.pipe_ctx[i]); + populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_plane_location, context->streams[i], current_pipe_context); switch (context->streams[i]->debug.force_odm_combine_segments) { case 2: dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_plane_location] = dml_odm_use_policy_combine_2to1; @@ -1153,9 +1165,9 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat break; } - if (context->streams[i]->mall_stream_config.type == SUBVP_MAIN) + if (stream_mall_type == SUBVP_MAIN) dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_sub_viewport; - else if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM) + else if (stream_mall_type == SUBVP_PHANTOM) dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_phantom_pipe; dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[disp_cfg_plane_location] = context->streams[i]->stream_id; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c index d6a6848415..1068b962d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c @@ -155,8 +155,12 @@ unsigned int dml2_util_get_maximum_odm_combine_for_output(bool force_odm_4to1, e bool is_dp2p0_output_encoder(const struct pipe_ctx *pipe_ctx) { + if (pipe_ctx == NULL || pipe_ctx->stream == NULL) + return false; + /* If this assert is hit then we have a link encoder dynamic management issue */ ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true); + /* Count MST hubs once by treating only 1st remote sink in topology as an encoder */ if (pipe_ctx->stream->link && pipe_ctx->stream->link->remote_sinks[0]) { return (pipe_ctx->stream_res.hpo_dp_stream_enc && @@ -283,6 +287,7 @@ static void populate_pipe_ctx_dlg_params_from_dml(struct pipe_ctx *pipe_ctx, str void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state, struct dml2_context *in_ctx, unsigned int pipe_cnt) { unsigned int dc_pipe_ctx_index, dml_pipe_idx, plane_id; + enum mall_stream_type pipe_mall_type; bool unbounded_req_enabled = false; struct dml2_calculate_rq_and_dlg_params_scratch *s = &in_ctx->v20.scratch.calculate_rq_and_dlg_params_scratch; @@ -330,7 +335,8 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont */ populate_pipe_ctx_dlg_params_from_dml(&context->res_ctx.pipe_ctx[dc_pipe_ctx_index], &context->bw_ctx.dml2->v20.dml_core_ctx, dml_pipe_idx); - if (context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->mall_stream_config.type == SUBVP_PHANTOM) { + pipe_mall_type = dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[dc_pipe_ctx_index]); + if (pipe_mall_type == SUBVP_PHANTOM) { // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests context->res_ctx.pipe_ctx[dc_pipe_ctx_index].det_buffer_size_kb = 0; context->res_ctx.pipe_ctx[dc_pipe_ctx_index].unbounded_req = false; @@ -357,7 +363,7 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont context->res_ctx.pipe_ctx[dc_pipe_ctx_index].plane_state != context->res_ctx.pipe_ctx[dc_pipe_ctx_index].top_pipe->plane_state) && context->res_ctx.pipe_ctx[dc_pipe_ctx_index].prev_odm_pipe == NULL) { /* SS: all active surfaces stored in MALL */ - if (context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->mall_stream_config.type != SUBVP_PHANTOM) { + if (pipe_mall_type != SUBVP_PHANTOM) { context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[dc_pipe_ctx_index].surface_size_in_mall_bytes; } else { /* SUBVP: phantom surfaces only stored in MALL */ @@ -476,7 +482,7 @@ bool dml2_verify_det_buffer_configuration(struct dml2_context *in_ctx, struct dc return need_recalculation; } -bool dml2_is_stereo_timing(struct dc_stream_state *stream) +bool dml2_is_stereo_timing(const struct dc_stream_state *stream) { bool is_stereo = false; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h index 23b9028337..5842d6d3c4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h @@ -42,7 +42,7 @@ void dml2_copy_clocks_to_dc_state(struct dml2_dcn_clocks *out_clks, struct dc_st void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display_mode_lib_st *dml_core_ctx); int dml2_helper_find_dml_pipe_idx_by_stream_id(struct dml2_context *ctx, unsigned int stream_id); bool is_dtbclk_required(const struct dc *dc, struct dc_state *context); -bool dml2_is_stereo_timing(struct dc_stream_state *stream); +bool dml2_is_stereo_timing(const struct dc_stream_state *stream); /* * dml2_dc_construct_pipes - This function will determine if we need additional pipes based diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 269bfb14c2..72cca36706 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -423,7 +423,7 @@ static int find_drr_eligible_stream(struct dc_state *display_state) int i; for (i = 0; i < display_state->stream_count; i++) { - if (display_state->streams[i]->mall_stream_config.type == SUBVP_NONE + if (dc_state_get_stream_subvp_type(display_state, display_state->streams[i]) == SUBVP_NONE && display_state->streams[i]->ignore_msa_timing_param) { // Use ignore_msa_timing_param flag to identify as DRR return i; @@ -639,6 +639,8 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.b, &dml2->v20.dml_core_ctx); memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c)); dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx); + //copy for deciding zstate use + context->bw_ctx.dml.vba.StutterPeriod = context->bw_ctx.dml2->v20.dml_core_ctx.mp.StutterPeriod; } return result; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index 548504d7de..cc662d682f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -93,15 +93,34 @@ struct dml2_dc_callbacks { struct dml2_dc_svp_callbacks { struct dc *dc; bool (*build_scaling_params)(struct pipe_ctx *pipe_ctx); - struct dc_stream_state* (*create_stream_for_sink)(struct dc_sink *dc_sink_data); - struct dc_plane_state* (*create_plane)(struct dc *dc); - enum dc_status (*add_stream_to_ctx)(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); - bool (*add_plane_to_context)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context); - bool (*remove_plane_from_context)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context); - enum dc_status (*remove_stream_from_ctx)(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *stream); - void (*plane_state_release)(struct dc_plane_state *plane_state); - void (*stream_release)(struct dc_stream_state *stream); + struct dc_stream_state* (*create_phantom_stream)(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *main_stream); + struct dc_plane_state* (*create_phantom_plane)(struct dc *dc, + struct dc_state *state, + struct dc_plane_state *main_plane); + enum dc_status (*add_phantom_stream)(struct dc *dc, + struct dc_state *state, + struct dc_stream_state *phantom_stream, + struct dc_stream_state *main_stream); + bool (*add_phantom_plane)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context); + bool (*remove_phantom_plane)(const struct dc *dc, + struct dc_stream_state *stream, + struct dc_plane_state *plane_state, + struct dc_state *context); + enum dc_status (*remove_phantom_stream)(struct dc *dc, + struct dc_state *state, + struct dc_stream_state *stream); + void (*release_phantom_plane)(const struct dc *dc, + struct dc_state *state, + struct dc_plane_state *plane); + void (*release_phantom_stream)(const struct dc *dc, + struct dc_state *state, + struct dc_stream_state *stream); void (*release_dsc)(struct resource_context *res_ctx, const struct resource_pool *pool, struct display_stream_compressor **dsc); + enum mall_stream_type (*get_pipe_subvp_type)(const struct dc_state *state, const struct pipe_ctx *pipe_ctx); + enum mall_stream_type (*get_stream_subvp_type)(const struct dc_state *state, const struct dc_stream_state *stream); + struct dc_stream_state *(*get_paired_subvp_stream)(const struct dc_state *state, const struct dc_stream_state *stream); }; struct dml2_clks_table_entry { diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile index a2537229ee..b183ba5a69 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile @@ -1,8 +1,34 @@ # SPDX-License-Identifier: MIT # # Makefile for the 'dsc' sub-component of DAL. + +ifdef CONFIG_DRM_AMD_DC_FP + +############################################################################### +# DCN20 +############################################################################### +DSC_DCN20 = dcn20_dsc.o + +AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/dsc/dcn20/,$(DSC_DCN20)) + + + + +############################################################################### +# DCN35 +############################################################################### + +DSC_DCN35 = dcn35_dsc.o + +AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/dsc/dcn35/,$(DSC_DCN35)) + + + +endif + DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o AMD_DAL_DSC = $(addprefix $(AMDDALPATH)/dc/dsc/,$(DSC)) AMD_DISPLAY_FILES += $(AMD_DAL_DSC) + diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index e8b5f17beb..0df6c55eb3 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -331,8 +331,9 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, int buff_block_size; int buff_size; - if (!dsc_buff_block_size_from_dpcd(dpcd_dsc_basic_data[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT], - &buff_block_size)) + if (!dsc_buff_block_size_from_dpcd( + dpcd_dsc_basic_data[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT] & 0x03, + &buff_block_size)) return false; buff_size = dpcd_dsc_basic_data[DP_DSC_RC_BUF_SIZE - DP_DSC_SUPPORT] + 1; @@ -357,10 +358,15 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, { int dpcd_throughput = dpcd_dsc_basic_data[DP_DSC_PEAK_THROUGHPUT - DP_DSC_SUPPORT]; + int dsc_throughput_granular_delta; + + dsc_throughput_granular_delta = dpcd_dsc_basic_data[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT] >> 3; + dsc_throughput_granular_delta *= 2; if (!dsc_throughput_from_dpcd(dpcd_throughput & DP_DSC_THROUGHPUT_MODE_0_MASK, &dsc_sink_caps->throughput_mode_0_mps)) return false; + dsc_sink_caps->throughput_mode_0_mps += dsc_throughput_granular_delta; dpcd_throughput = (dpcd_throughput & DP_DSC_THROUGHPUT_MODE_1_MASK) >> DP_DSC_THROUGHPUT_MODE_1_SHIFT; if (!dsc_throughput_from_dpcd(dpcd_throughput, &dsc_sink_caps->throughput_mode_1_mps)) diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c new file mode 100644 index 0000000000..c9ae2d8f00 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c @@ -0,0 +1,780 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include + +#include "reg_helper.h" +#include "dcn20_dsc.h" +#include "dsc/dscc_types.h" +#include "dsc/rc_calc.h" + +static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals); + +/* Object I/F functions */ +static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s); +static bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg); +static void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, + struct dsc_optc_config *dsc_optc_cfg); +static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe); +static void dsc2_disable(struct display_stream_compressor *dsc); +static void dsc2_disconnect(struct display_stream_compressor *dsc); + +static const struct dsc_funcs dcn20_dsc_funcs = { + .dsc_get_enc_caps = dsc2_get_enc_caps, + .dsc_read_state = dsc2_read_state, + .dsc_validate_stream = dsc2_validate_stream, + .dsc_set_config = dsc2_set_config, + .dsc_get_packed_pps = dsc2_get_packed_pps, + .dsc_enable = dsc2_enable, + .dsc_disable = dsc2_disable, + .dsc_disconnect = dsc2_disconnect, +}; + +/* Macro definitios for REG_SET macros*/ +#define CTX \ + dsc20->base.ctx + +#define REG(reg)\ + dsc20->dsc_regs->reg + +#undef FN +#define FN(reg_name, field_name) \ + dsc20->dsc_shift->field_name, dsc20->dsc_mask->field_name +#define DC_LOGGER \ + dsc->ctx->logger + +enum dsc_bits_per_comp { + DSC_BPC_8 = 8, + DSC_BPC_10 = 10, + DSC_BPC_12 = 12, + DSC_BPC_UNKNOWN +}; + +/* API functions (external or via structure->function_pointer) */ + +void dsc2_construct(struct dcn20_dsc *dsc, + struct dc_context *ctx, + int inst, + const struct dcn20_dsc_registers *dsc_regs, + const struct dcn20_dsc_shift *dsc_shift, + const struct dcn20_dsc_mask *dsc_mask) +{ + dsc->base.ctx = ctx; + dsc->base.inst = inst; + dsc->base.funcs = &dcn20_dsc_funcs; + + dsc->dsc_regs = dsc_regs; + dsc->dsc_shift = dsc_shift; + dsc->dsc_mask = dsc_mask; + + dsc->max_image_width = 5184; +} + + +#define DCN20_MAX_PIXEL_CLOCK_Mhz 1188 +#define DCN20_MAX_DISPLAY_CLOCK_Mhz 1200 + +/* This returns the capabilities for a single DSC encoder engine. Number of slices and total throughput + * can be doubled, tripled etc. by using additional DSC engines. + */ +void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz) +{ + dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */ + + dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 1; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = 1; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = 1; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 = 1; + + dsc_enc_caps->lb_bit_depth = 13; + dsc_enc_caps->is_block_pred_supported = true; + + dsc_enc_caps->color_formats.bits.RGB = 1; + dsc_enc_caps->color_formats.bits.YCBCR_444 = 1; + dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1; + dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0; + dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1; + + dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1; + dsc_enc_caps->color_depth.bits.COLOR_DEPTH_10_BPC = 1; + dsc_enc_caps->color_depth.bits.COLOR_DEPTH_12_BPC = 1; + + /* Maximum total throughput with all the slices combined. This is different from how DP spec specifies it. + * Our decoder's total throughput in Pix/s is equal to DISPCLK. This is then shared between slices. + * The value below is the absolute maximum value. The actual throughput may be lower, but it'll always + * be sufficient to process the input pixel rate fed into a single DSC engine. + */ + dsc_enc_caps->max_total_throughput_mps = DCN20_MAX_DISPLAY_CLOCK_Mhz; + + /* For pixel clock bigger than a single-pipe limit we'll need two engines, which then doubles our + * throughput and number of slices, but also introduces a lower limit of 2 slices + */ + if (pixel_clock_100Hz >= DCN20_MAX_PIXEL_CLOCK_Mhz*10000) { + dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 0; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 = 1; + dsc_enc_caps->max_total_throughput_mps = DCN20_MAX_DISPLAY_CLOCK_Mhz * 2; + } + + /* For pixel clock bigger than a single-pipe limit needing four engines ODM 4:1, which then quardruples our + * throughput and number of slices + */ + if (pixel_clock_100Hz > DCN20_MAX_PIXEL_CLOCK_Mhz*10000*2) { + dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 = 1; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 = 1; + dsc_enc_caps->max_total_throughput_mps = DCN20_MAX_DISPLAY_CLOCK_Mhz * 4; + } + + dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */ + dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */ +} + + +/* this function read dsc related register fields to be logged later in dcn10_log_hw_state + * into a dcn_dsc_state struct. + */ +static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s) +{ + struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); + + REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &s->dsc_clock_en); + REG_GET(DSCC_PPS_CONFIG3, SLICE_WIDTH, &s->dsc_slice_width); + REG_GET(DSCC_PPS_CONFIG1, BITS_PER_PIXEL, &s->dsc_bits_per_pixel); + REG_GET(DSCC_PPS_CONFIG3, SLICE_HEIGHT, &s->dsc_slice_height); + REG_GET(DSCC_PPS_CONFIG1, CHUNK_SIZE, &s->dsc_chunk_size); + REG_GET(DSCC_PPS_CONFIG2, PIC_WIDTH, &s->dsc_pic_width); + REG_GET(DSCC_PPS_CONFIG2, PIC_HEIGHT, &s->dsc_pic_height); + REG_GET(DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, &s->dsc_slice_bpg_offset); + REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &s->dsc_fw_en, + DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source); +} + + +static bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg) +{ + struct dsc_optc_config dsc_optc_cfg; + struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); + + if (dsc_cfg->pic_width > dsc20->max_image_width) + return false; + + return dsc_prepare_config(dsc_cfg, &dsc20->reg_vals, &dsc_optc_cfg); +} + + +void dsc_config_log(struct display_stream_compressor *dsc, const struct dsc_config *config) +{ + DC_LOG_DSC("\tnum_slices_h %d", config->dc_dsc_cfg.num_slices_h); + DC_LOG_DSC("\tnum_slices_v %d", config->dc_dsc_cfg.num_slices_v); + DC_LOG_DSC("\tbits_per_pixel %d (%d.%04d)", + config->dc_dsc_cfg.bits_per_pixel, + config->dc_dsc_cfg.bits_per_pixel / 16, + ((config->dc_dsc_cfg.bits_per_pixel % 16) * 10000) / 16); + DC_LOG_DSC("\tcolor_depth %d", config->color_depth); +} + +static void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, + struct dsc_optc_config *dsc_optc_cfg) +{ + bool is_config_ok; + struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); + + DC_LOG_DSC("Setting DSC Config at DSC inst %d", dsc->inst); + dsc_config_log(dsc, dsc_cfg); + is_config_ok = dsc_prepare_config(dsc_cfg, &dsc20->reg_vals, dsc_optc_cfg); + ASSERT(is_config_ok); + DC_LOG_DSC("programming DSC Picture Parameter Set (PPS):"); + dsc_log_pps(dsc, &dsc20->reg_vals.pps); + dsc_write_to_registers(dsc, &dsc20->reg_vals); +} + + +bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps) +{ + bool is_config_ok; + struct dsc_reg_values dsc_reg_vals; + struct dsc_optc_config dsc_optc_cfg; + + memset(&dsc_reg_vals, 0, sizeof(dsc_reg_vals)); + memset(&dsc_optc_cfg, 0, sizeof(dsc_optc_cfg)); + + DC_LOG_DSC("Getting packed DSC PPS for DSC Config:"); + dsc_config_log(dsc, dsc_cfg); + DC_LOG_DSC("DSC Picture Parameter Set (PPS):"); + is_config_ok = dsc_prepare_config(dsc_cfg, &dsc_reg_vals, &dsc_optc_cfg); + ASSERT(is_config_ok); + drm_dsc_pps_payload_pack((struct drm_dsc_picture_parameter_set *)dsc_packed_pps, &dsc_reg_vals.pps); + dsc_log_pps(dsc, &dsc_reg_vals.pps); + + return is_config_ok; +} + + +static void dsc2_enable(struct display_stream_compressor *dsc, int opp_pipe) +{ + struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); + int dsc_clock_en; + int dsc_fw_config; + int enabled_opp_pipe; + + DC_LOG_DSC("enable DSC %d at opp pipe %d", dsc->inst, opp_pipe); + + REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en); + REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe); + if ((dsc_clock_en || dsc_fw_config) && enabled_opp_pipe != opp_pipe) { + DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already enabled!", dsc->inst, enabled_opp_pipe); + ASSERT(0); + } + + REG_UPDATE(DSC_TOP_CONTROL, + DSC_CLOCK_EN, 1); + + REG_UPDATE_2(DSCRM_DSC_FORWARD_CONFIG, + DSCRM_DSC_FORWARD_EN, 1, + DSCRM_DSC_OPP_PIPE_SOURCE, opp_pipe); +} + + +static void dsc2_disable(struct display_stream_compressor *dsc) +{ + struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); + int dsc_clock_en; + int dsc_fw_config; + int enabled_opp_pipe; + + DC_LOG_DSC("disable DSC %d", dsc->inst); + + REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &dsc_clock_en); + REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &dsc_fw_config, DSCRM_DSC_OPP_PIPE_SOURCE, &enabled_opp_pipe); + if (!dsc_clock_en || !dsc_fw_config) { + DC_LOG_DSC("ERROR: DSC %d at opp pipe %d already disabled!", dsc->inst, enabled_opp_pipe); + ASSERT(0); + } + + REG_UPDATE(DSCRM_DSC_FORWARD_CONFIG, + DSCRM_DSC_FORWARD_EN, 0); + + REG_UPDATE(DSC_TOP_CONTROL, + DSC_CLOCK_EN, 0); +} + +static void dsc2_disconnect(struct display_stream_compressor *dsc) +{ + struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); + + DC_LOG_DSC("disconnect DSC %d", dsc->inst); + + REG_UPDATE(DSCRM_DSC_FORWARD_CONFIG, + DSCRM_DSC_FORWARD_EN, 0); +} + +/* This module's internal functions */ +void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps) +{ + int i; + int bits_per_pixel = pps->bits_per_pixel; + + DC_LOG_DSC("\tdsc_version_major %d", pps->dsc_version_major); + DC_LOG_DSC("\tdsc_version_minor %d", pps->dsc_version_minor); + DC_LOG_DSC("\tbits_per_component %d", pps->bits_per_component); + DC_LOG_DSC("\tline_buf_depth %d", pps->line_buf_depth); + DC_LOG_DSC("\tblock_pred_enable %d", pps->block_pred_enable); + DC_LOG_DSC("\tconvert_rgb %d", pps->convert_rgb); + DC_LOG_DSC("\tsimple_422 %d", pps->simple_422); + DC_LOG_DSC("\tvbr_enable %d", pps->vbr_enable); + DC_LOG_DSC("\tbits_per_pixel %d (%d.%04d)", bits_per_pixel, bits_per_pixel / 16, ((bits_per_pixel % 16) * 10000) / 16); + DC_LOG_DSC("\tpic_height %d", pps->pic_height); + DC_LOG_DSC("\tpic_width %d", pps->pic_width); + DC_LOG_DSC("\tslice_height %d", pps->slice_height); + DC_LOG_DSC("\tslice_width %d", pps->slice_width); + DC_LOG_DSC("\tslice_chunk_size %d", pps->slice_chunk_size); + DC_LOG_DSC("\tinitial_xmit_delay %d", pps->initial_xmit_delay); + DC_LOG_DSC("\tinitial_dec_delay %d", pps->initial_dec_delay); + DC_LOG_DSC("\tinitial_scale_value %d", pps->initial_scale_value); + DC_LOG_DSC("\tscale_increment_interval %d", pps->scale_increment_interval); + DC_LOG_DSC("\tscale_decrement_interval %d", pps->scale_decrement_interval); + DC_LOG_DSC("\tfirst_line_bpg_offset %d", pps->first_line_bpg_offset); + DC_LOG_DSC("\tnfl_bpg_offset %d", pps->nfl_bpg_offset); + DC_LOG_DSC("\tslice_bpg_offset %d", pps->slice_bpg_offset); + DC_LOG_DSC("\tinitial_offset %d", pps->initial_offset); + DC_LOG_DSC("\tfinal_offset %d", pps->final_offset); + DC_LOG_DSC("\tflatness_min_qp %d", pps->flatness_min_qp); + DC_LOG_DSC("\tflatness_max_qp %d", pps->flatness_max_qp); + /* DC_LOG_DSC("\trc_parameter_set %d", pps->rc_parameter_set); */ + DC_LOG_DSC("\tnative_420 %d", pps->native_420); + DC_LOG_DSC("\tnative_422 %d", pps->native_422); + DC_LOG_DSC("\tsecond_line_bpg_offset %d", pps->second_line_bpg_offset); + DC_LOG_DSC("\tnsl_bpg_offset %d", pps->nsl_bpg_offset); + DC_LOG_DSC("\tsecond_line_offset_adj %d", pps->second_line_offset_adj); + DC_LOG_DSC("\trc_model_size %d", pps->rc_model_size); + DC_LOG_DSC("\trc_edge_factor %d", pps->rc_edge_factor); + DC_LOG_DSC("\trc_quant_incr_limit0 %d", pps->rc_quant_incr_limit0); + DC_LOG_DSC("\trc_quant_incr_limit1 %d", pps->rc_quant_incr_limit1); + DC_LOG_DSC("\trc_tgt_offset_high %d", pps->rc_tgt_offset_high); + DC_LOG_DSC("\trc_tgt_offset_low %d", pps->rc_tgt_offset_low); + + for (i = 0; i < NUM_BUF_RANGES - 1; i++) + DC_LOG_DSC("\trc_buf_thresh[%d] %d", i, pps->rc_buf_thresh[i]); + + for (i = 0; i < NUM_BUF_RANGES; i++) { + DC_LOG_DSC("\trc_range_parameters[%d].range_min_qp %d", i, pps->rc_range_params[i].range_min_qp); + DC_LOG_DSC("\trc_range_parameters[%d].range_max_qp %d", i, pps->rc_range_params[i].range_max_qp); + DC_LOG_DSC("\trc_range_parameters[%d].range_bpg_offset %d", i, pps->rc_range_params[i].range_bpg_offset); + } +} + +void dsc_override_rc_params(struct rc_params *rc, const struct dc_dsc_rc_params_override *override) +{ + uint8_t i; + + rc->rc_model_size = override->rc_model_size; + for (i = 0; i < DC_DSC_RC_BUF_THRESH_SIZE; i++) + rc->rc_buf_thresh[i] = override->rc_buf_thresh[i]; + for (i = 0; i < DC_DSC_QP_SET_SIZE; i++) { + rc->qp_min[i] = override->rc_minqp[i]; + rc->qp_max[i] = override->rc_maxqp[i]; + rc->ofs[i] = override->rc_offset[i]; + } + + rc->rc_tgt_offset_hi = override->rc_tgt_offset_hi; + rc->rc_tgt_offset_lo = override->rc_tgt_offset_lo; + rc->rc_edge_factor = override->rc_edge_factor; + rc->rc_quant_incr_limit0 = override->rc_quant_incr_limit0; + rc->rc_quant_incr_limit1 = override->rc_quant_incr_limit1; + + rc->initial_fullness_offset = override->initial_fullness_offset; + rc->initial_xmit_delay = override->initial_delay; + + rc->flatness_min_qp = override->flatness_min_qp; + rc->flatness_max_qp = override->flatness_max_qp; + rc->flatness_det_thresh = override->flatness_det_thresh; +} + +bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals, + struct dsc_optc_config *dsc_optc_cfg) +{ + struct dsc_parameters dsc_params; + struct rc_params rc; + + /* Validate input parameters */ + ASSERT(dsc_cfg->dc_dsc_cfg.num_slices_h); + ASSERT(dsc_cfg->dc_dsc_cfg.num_slices_v); + ASSERT(dsc_cfg->dc_dsc_cfg.version_minor == 1 || dsc_cfg->dc_dsc_cfg.version_minor == 2); + ASSERT(dsc_cfg->pic_width); + ASSERT(dsc_cfg->pic_height); + ASSERT((dsc_cfg->dc_dsc_cfg.version_minor == 1 && + (8 <= dsc_cfg->dc_dsc_cfg.linebuf_depth && dsc_cfg->dc_dsc_cfg.linebuf_depth <= 13)) || + (dsc_cfg->dc_dsc_cfg.version_minor == 2 && + ((8 <= dsc_cfg->dc_dsc_cfg.linebuf_depth && dsc_cfg->dc_dsc_cfg.linebuf_depth <= 15) || + dsc_cfg->dc_dsc_cfg.linebuf_depth == 0))); + ASSERT(96 <= dsc_cfg->dc_dsc_cfg.bits_per_pixel && dsc_cfg->dc_dsc_cfg.bits_per_pixel <= 0x3ff); // 6.0 <= bits_per_pixel <= 63.9375 + + if (!dsc_cfg->dc_dsc_cfg.num_slices_v || !dsc_cfg->dc_dsc_cfg.num_slices_h || + !(dsc_cfg->dc_dsc_cfg.version_minor == 1 || dsc_cfg->dc_dsc_cfg.version_minor == 2) || + !dsc_cfg->pic_width || !dsc_cfg->pic_height || + !((dsc_cfg->dc_dsc_cfg.version_minor == 1 && // v1.1 line buffer depth range: + 8 <= dsc_cfg->dc_dsc_cfg.linebuf_depth && dsc_cfg->dc_dsc_cfg.linebuf_depth <= 13) || + (dsc_cfg->dc_dsc_cfg.version_minor == 2 && // v1.2 line buffer depth range: + ((8 <= dsc_cfg->dc_dsc_cfg.linebuf_depth && dsc_cfg->dc_dsc_cfg.linebuf_depth <= 15) || + dsc_cfg->dc_dsc_cfg.linebuf_depth == 0))) || + !(96 <= dsc_cfg->dc_dsc_cfg.bits_per_pixel && dsc_cfg->dc_dsc_cfg.bits_per_pixel <= 0x3ff)) { + dm_output_to_console("%s: Invalid parameters\n", __func__); + return false; + } + + dsc_init_reg_values(dsc_reg_vals); + + /* Copy input config */ + dsc_reg_vals->pixel_format = dsc_dc_pixel_encoding_to_dsc_pixel_format(dsc_cfg->pixel_encoding, dsc_cfg->dc_dsc_cfg.ycbcr422_simple); + dsc_reg_vals->num_slices_h = dsc_cfg->dc_dsc_cfg.num_slices_h; + dsc_reg_vals->num_slices_v = dsc_cfg->dc_dsc_cfg.num_slices_v; + dsc_reg_vals->pps.dsc_version_minor = dsc_cfg->dc_dsc_cfg.version_minor; + dsc_reg_vals->pps.pic_width = dsc_cfg->pic_width; + dsc_reg_vals->pps.pic_height = dsc_cfg->pic_height; + dsc_reg_vals->pps.bits_per_component = dsc_dc_color_depth_to_dsc_bits_per_comp(dsc_cfg->color_depth); + dsc_reg_vals->pps.block_pred_enable = dsc_cfg->dc_dsc_cfg.block_pred_enable; + dsc_reg_vals->pps.line_buf_depth = dsc_cfg->dc_dsc_cfg.linebuf_depth; + dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1; + dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0; + + // TODO: in addition to validating slice height (pic height must be divisible by slice height), + // see what happens when the same condition doesn't apply for slice_width/pic_width. + dsc_reg_vals->pps.slice_width = dsc_cfg->pic_width / dsc_cfg->dc_dsc_cfg.num_slices_h; + dsc_reg_vals->pps.slice_height = dsc_cfg->pic_height / dsc_cfg->dc_dsc_cfg.num_slices_v; + + ASSERT(dsc_reg_vals->pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height); + if (!(dsc_reg_vals->pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height)) { + dm_output_to_console("%s: pix height %d not divisible by num_slices_v %d\n\n", __func__, dsc_cfg->pic_height, dsc_cfg->dc_dsc_cfg.num_slices_v); + return false; + } + + dsc_reg_vals->bpp_x32 = dsc_cfg->dc_dsc_cfg.bits_per_pixel << 1; + if (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420 || dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422) + dsc_reg_vals->pps.bits_per_pixel = dsc_reg_vals->bpp_x32; + else + dsc_reg_vals->pps.bits_per_pixel = dsc_reg_vals->bpp_x32 >> 1; + + dsc_reg_vals->pps.convert_rgb = dsc_reg_vals->pixel_format == DSC_PIXFMT_RGB ? 1 : 0; + dsc_reg_vals->pps.native_422 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422); + dsc_reg_vals->pps.native_420 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420); + dsc_reg_vals->pps.simple_422 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422); + + calc_rc_params(&rc, &dsc_reg_vals->pps); + + if (dsc_cfg->dc_dsc_cfg.rc_params_ovrd) + dsc_override_rc_params(&rc, dsc_cfg->dc_dsc_cfg.rc_params_ovrd); + + if (dscc_compute_dsc_parameters(&dsc_reg_vals->pps, &rc, &dsc_params)) { + dm_output_to_console("%s: DSC config failed\n", __func__); + return false; + } + + dsc_update_from_dsc_parameters(dsc_reg_vals, &dsc_params); + + dsc_optc_cfg->bytes_per_pixel = dsc_params.bytes_per_pixel; + dsc_optc_cfg->slice_width = dsc_reg_vals->pps.slice_width; + dsc_optc_cfg->is_pixel_format_444 = dsc_reg_vals->pixel_format == DSC_PIXFMT_RGB || + dsc_reg_vals->pixel_format == DSC_PIXFMT_YCBCR444 || + dsc_reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422; + + return true; +} + + +enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_pixel_encoding dc_pix_enc, bool is_ycbcr422_simple) +{ + enum dsc_pixel_format dsc_pix_fmt = DSC_PIXFMT_UNKNOWN; + + /* NOTE: We don't support DSC_PIXFMT_SIMPLE_YCBCR422 */ + + switch (dc_pix_enc) { + case PIXEL_ENCODING_RGB: + dsc_pix_fmt = DSC_PIXFMT_RGB; + break; + case PIXEL_ENCODING_YCBCR422: + if (is_ycbcr422_simple) + dsc_pix_fmt = DSC_PIXFMT_SIMPLE_YCBCR422; + else + dsc_pix_fmt = DSC_PIXFMT_NATIVE_YCBCR422; + break; + case PIXEL_ENCODING_YCBCR444: + dsc_pix_fmt = DSC_PIXFMT_YCBCR444; + break; + case PIXEL_ENCODING_YCBCR420: + dsc_pix_fmt = DSC_PIXFMT_NATIVE_YCBCR420; + break; + default: + dsc_pix_fmt = DSC_PIXFMT_UNKNOWN; + break; + } + + ASSERT(dsc_pix_fmt != DSC_PIXFMT_UNKNOWN); + return dsc_pix_fmt; +} + + +enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_color_depth dc_color_depth) +{ + enum dsc_bits_per_comp bpc = DSC_BPC_UNKNOWN; + + switch (dc_color_depth) { + case COLOR_DEPTH_888: + bpc = DSC_BPC_8; + break; + case COLOR_DEPTH_101010: + bpc = DSC_BPC_10; + break; + case COLOR_DEPTH_121212: + bpc = DSC_BPC_12; + break; + default: + bpc = DSC_BPC_UNKNOWN; + break; + } + + return bpc; +} + + +void dsc_init_reg_values(struct dsc_reg_values *reg_vals) +{ + int i; + + memset(reg_vals, 0, sizeof(struct dsc_reg_values)); + + /* Non-PPS values */ + reg_vals->dsc_clock_enable = 1; + reg_vals->dsc_clock_gating_disable = 0; + reg_vals->underflow_recovery_en = 0; + reg_vals->underflow_occurred_int_en = 0; + reg_vals->underflow_occurred_status = 0; + reg_vals->ich_reset_at_eol = 0; + reg_vals->alternate_ich_encoding_en = 0; + reg_vals->rc_buffer_model_size = 0; + /*reg_vals->disable_ich = 0;*/ + reg_vals->dsc_dbg_en = 0; + + for (i = 0; i < 4; i++) + reg_vals->rc_buffer_model_overflow_int_en[i] = 0; + + /* PPS values */ + reg_vals->pps.dsc_version_minor = 2; + reg_vals->pps.dsc_version_major = 1; + reg_vals->pps.line_buf_depth = 9; + reg_vals->pps.bits_per_component = 8; + reg_vals->pps.block_pred_enable = 1; + reg_vals->pps.slice_chunk_size = 0; + reg_vals->pps.pic_width = 0; + reg_vals->pps.pic_height = 0; + reg_vals->pps.slice_width = 0; + reg_vals->pps.slice_height = 0; + reg_vals->pps.initial_xmit_delay = 170; + reg_vals->pps.initial_dec_delay = 0; + reg_vals->pps.initial_scale_value = 0; + reg_vals->pps.scale_increment_interval = 0; + reg_vals->pps.scale_decrement_interval = 0; + reg_vals->pps.nfl_bpg_offset = 0; + reg_vals->pps.slice_bpg_offset = 0; + reg_vals->pps.nsl_bpg_offset = 0; + reg_vals->pps.initial_offset = 6144; + reg_vals->pps.final_offset = 0; + reg_vals->pps.flatness_min_qp = 3; + reg_vals->pps.flatness_max_qp = 12; + reg_vals->pps.rc_model_size = 8192; + reg_vals->pps.rc_edge_factor = 6; + reg_vals->pps.rc_quant_incr_limit0 = 11; + reg_vals->pps.rc_quant_incr_limit1 = 11; + reg_vals->pps.rc_tgt_offset_low = 3; + reg_vals->pps.rc_tgt_offset_high = 3; +} + +/* Updates dsc_reg_values::reg_vals::xxx fields based on the values from computed params. + * This is required because dscc_compute_dsc_parameters returns a modified PPS, which in turn + * affects non-PPS register values. + */ +void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, const struct dsc_parameters *dsc_params) +{ + int i; + + reg_vals->pps = dsc_params->pps; + + // pps_computed will have the "expanded" values; need to shift them to make them fit for regs. + for (i = 0; i < NUM_BUF_RANGES - 1; i++) + reg_vals->pps.rc_buf_thresh[i] = reg_vals->pps.rc_buf_thresh[i] >> 6; + + reg_vals->rc_buffer_model_size = dsc_params->rc_buffer_model_size; +} + +static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals) +{ + uint32_t temp_int; + struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); + + REG_SET(DSC_DEBUG_CONTROL, 0, + DSC_DBG_EN, reg_vals->dsc_dbg_en); + + // dsccif registers + REG_SET_5(DSCCIF_CONFIG0, 0, + INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN, reg_vals->underflow_recovery_en, + INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN, reg_vals->underflow_occurred_int_en, + INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS, reg_vals->underflow_occurred_status, + INPUT_PIXEL_FORMAT, reg_vals->pixel_format, + DSCCIF_CONFIG0__BITS_PER_COMPONENT, reg_vals->pps.bits_per_component); + + REG_SET_2(DSCCIF_CONFIG1, 0, + PIC_WIDTH, reg_vals->pps.pic_width, + PIC_HEIGHT, reg_vals->pps.pic_height); + + // dscc registers + if (dsc20->dsc_mask->ICH_RESET_AT_END_OF_LINE == 0) { + REG_SET_3(DSCC_CONFIG0, 0, + NUMBER_OF_SLICES_PER_LINE, reg_vals->num_slices_h - 1, + ALTERNATE_ICH_ENCODING_EN, reg_vals->alternate_ich_encoding_en, + NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, reg_vals->num_slices_v - 1); + } else { + REG_SET_4(DSCC_CONFIG0, 0, ICH_RESET_AT_END_OF_LINE, + reg_vals->ich_reset_at_eol, NUMBER_OF_SLICES_PER_LINE, + reg_vals->num_slices_h - 1, ALTERNATE_ICH_ENCODING_EN, + reg_vals->alternate_ich_encoding_en, NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, + reg_vals->num_slices_v - 1); + } + + REG_SET(DSCC_CONFIG1, 0, + DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, reg_vals->rc_buffer_model_size); + /*REG_SET_2(DSCC_CONFIG1, 0, + DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, reg_vals->rc_buffer_model_size, + DSCC_DISABLE_ICH, reg_vals->disable_ich);*/ + + REG_SET_4(DSCC_INTERRUPT_CONTROL_STATUS, 0, + DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN, reg_vals->rc_buffer_model_overflow_int_en[0], + DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN, reg_vals->rc_buffer_model_overflow_int_en[1], + DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN, reg_vals->rc_buffer_model_overflow_int_en[2], + DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN, reg_vals->rc_buffer_model_overflow_int_en[3]); + + REG_SET_3(DSCC_PPS_CONFIG0, 0, + DSC_VERSION_MINOR, reg_vals->pps.dsc_version_minor, + LINEBUF_DEPTH, reg_vals->pps.line_buf_depth, + DSCC_PPS_CONFIG0__BITS_PER_COMPONENT, reg_vals->pps.bits_per_component); + + if (reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420 || reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422) + temp_int = reg_vals->bpp_x32; + else + temp_int = reg_vals->bpp_x32 >> 1; + + REG_SET_7(DSCC_PPS_CONFIG1, 0, + BITS_PER_PIXEL, temp_int, + SIMPLE_422, reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422, + CONVERT_RGB, reg_vals->pixel_format == DSC_PIXFMT_RGB, + BLOCK_PRED_ENABLE, reg_vals->pps.block_pred_enable, + NATIVE_422, reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR422, + NATIVE_420, reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420, + CHUNK_SIZE, reg_vals->pps.slice_chunk_size); + + REG_SET_2(DSCC_PPS_CONFIG2, 0, + PIC_WIDTH, reg_vals->pps.pic_width, + PIC_HEIGHT, reg_vals->pps.pic_height); + + REG_SET_2(DSCC_PPS_CONFIG3, 0, + SLICE_WIDTH, reg_vals->pps.slice_width, + SLICE_HEIGHT, reg_vals->pps.slice_height); + + REG_SET(DSCC_PPS_CONFIG4, 0, + INITIAL_XMIT_DELAY, reg_vals->pps.initial_xmit_delay); + + REG_SET_2(DSCC_PPS_CONFIG5, 0, + INITIAL_SCALE_VALUE, reg_vals->pps.initial_scale_value, + SCALE_INCREMENT_INTERVAL, reg_vals->pps.scale_increment_interval); + + REG_SET_3(DSCC_PPS_CONFIG6, 0, + SCALE_DECREMENT_INTERVAL, reg_vals->pps.scale_decrement_interval, + FIRST_LINE_BPG_OFFSET, reg_vals->pps.first_line_bpg_offset, + SECOND_LINE_BPG_OFFSET, reg_vals->pps.second_line_bpg_offset); + + REG_SET_2(DSCC_PPS_CONFIG7, 0, + NFL_BPG_OFFSET, reg_vals->pps.nfl_bpg_offset, + SLICE_BPG_OFFSET, reg_vals->pps.slice_bpg_offset); + + REG_SET_2(DSCC_PPS_CONFIG8, 0, + NSL_BPG_OFFSET, reg_vals->pps.nsl_bpg_offset, + SECOND_LINE_OFFSET_ADJ, reg_vals->pps.second_line_offset_adj); + + REG_SET_2(DSCC_PPS_CONFIG9, 0, + INITIAL_OFFSET, reg_vals->pps.initial_offset, + FINAL_OFFSET, reg_vals->pps.final_offset); + + REG_SET_3(DSCC_PPS_CONFIG10, 0, + FLATNESS_MIN_QP, reg_vals->pps.flatness_min_qp, + FLATNESS_MAX_QP, reg_vals->pps.flatness_max_qp, + RC_MODEL_SIZE, reg_vals->pps.rc_model_size); + + REG_SET_5(DSCC_PPS_CONFIG11, 0, + RC_EDGE_FACTOR, reg_vals->pps.rc_edge_factor, + RC_QUANT_INCR_LIMIT0, reg_vals->pps.rc_quant_incr_limit0, + RC_QUANT_INCR_LIMIT1, reg_vals->pps.rc_quant_incr_limit1, + RC_TGT_OFFSET_LO, reg_vals->pps.rc_tgt_offset_low, + RC_TGT_OFFSET_HI, reg_vals->pps.rc_tgt_offset_high); + + REG_SET_4(DSCC_PPS_CONFIG12, 0, + RC_BUF_THRESH0, reg_vals->pps.rc_buf_thresh[0], + RC_BUF_THRESH1, reg_vals->pps.rc_buf_thresh[1], + RC_BUF_THRESH2, reg_vals->pps.rc_buf_thresh[2], + RC_BUF_THRESH3, reg_vals->pps.rc_buf_thresh[3]); + + REG_SET_4(DSCC_PPS_CONFIG13, 0, + RC_BUF_THRESH4, reg_vals->pps.rc_buf_thresh[4], + RC_BUF_THRESH5, reg_vals->pps.rc_buf_thresh[5], + RC_BUF_THRESH6, reg_vals->pps.rc_buf_thresh[6], + RC_BUF_THRESH7, reg_vals->pps.rc_buf_thresh[7]); + + REG_SET_4(DSCC_PPS_CONFIG14, 0, + RC_BUF_THRESH8, reg_vals->pps.rc_buf_thresh[8], + RC_BUF_THRESH9, reg_vals->pps.rc_buf_thresh[9], + RC_BUF_THRESH10, reg_vals->pps.rc_buf_thresh[10], + RC_BUF_THRESH11, reg_vals->pps.rc_buf_thresh[11]); + + REG_SET_5(DSCC_PPS_CONFIG15, 0, + RC_BUF_THRESH12, reg_vals->pps.rc_buf_thresh[12], + RC_BUF_THRESH13, reg_vals->pps.rc_buf_thresh[13], + RANGE_MIN_QP0, reg_vals->pps.rc_range_params[0].range_min_qp, + RANGE_MAX_QP0, reg_vals->pps.rc_range_params[0].range_max_qp, + RANGE_BPG_OFFSET0, reg_vals->pps.rc_range_params[0].range_bpg_offset); + + REG_SET_6(DSCC_PPS_CONFIG16, 0, + RANGE_MIN_QP1, reg_vals->pps.rc_range_params[1].range_min_qp, + RANGE_MAX_QP1, reg_vals->pps.rc_range_params[1].range_max_qp, + RANGE_BPG_OFFSET1, reg_vals->pps.rc_range_params[1].range_bpg_offset, + RANGE_MIN_QP2, reg_vals->pps.rc_range_params[2].range_min_qp, + RANGE_MAX_QP2, reg_vals->pps.rc_range_params[2].range_max_qp, + RANGE_BPG_OFFSET2, reg_vals->pps.rc_range_params[2].range_bpg_offset); + + REG_SET_6(DSCC_PPS_CONFIG17, 0, + RANGE_MIN_QP3, reg_vals->pps.rc_range_params[3].range_min_qp, + RANGE_MAX_QP3, reg_vals->pps.rc_range_params[3].range_max_qp, + RANGE_BPG_OFFSET3, reg_vals->pps.rc_range_params[3].range_bpg_offset, + RANGE_MIN_QP4, reg_vals->pps.rc_range_params[4].range_min_qp, + RANGE_MAX_QP4, reg_vals->pps.rc_range_params[4].range_max_qp, + RANGE_BPG_OFFSET4, reg_vals->pps.rc_range_params[4].range_bpg_offset); + + REG_SET_6(DSCC_PPS_CONFIG18, 0, + RANGE_MIN_QP5, reg_vals->pps.rc_range_params[5].range_min_qp, + RANGE_MAX_QP5, reg_vals->pps.rc_range_params[5].range_max_qp, + RANGE_BPG_OFFSET5, reg_vals->pps.rc_range_params[5].range_bpg_offset, + RANGE_MIN_QP6, reg_vals->pps.rc_range_params[6].range_min_qp, + RANGE_MAX_QP6, reg_vals->pps.rc_range_params[6].range_max_qp, + RANGE_BPG_OFFSET6, reg_vals->pps.rc_range_params[6].range_bpg_offset); + + REG_SET_6(DSCC_PPS_CONFIG19, 0, + RANGE_MIN_QP7, reg_vals->pps.rc_range_params[7].range_min_qp, + RANGE_MAX_QP7, reg_vals->pps.rc_range_params[7].range_max_qp, + RANGE_BPG_OFFSET7, reg_vals->pps.rc_range_params[7].range_bpg_offset, + RANGE_MIN_QP8, reg_vals->pps.rc_range_params[8].range_min_qp, + RANGE_MAX_QP8, reg_vals->pps.rc_range_params[8].range_max_qp, + RANGE_BPG_OFFSET8, reg_vals->pps.rc_range_params[8].range_bpg_offset); + + REG_SET_6(DSCC_PPS_CONFIG20, 0, + RANGE_MIN_QP9, reg_vals->pps.rc_range_params[9].range_min_qp, + RANGE_MAX_QP9, reg_vals->pps.rc_range_params[9].range_max_qp, + RANGE_BPG_OFFSET9, reg_vals->pps.rc_range_params[9].range_bpg_offset, + RANGE_MIN_QP10, reg_vals->pps.rc_range_params[10].range_min_qp, + RANGE_MAX_QP10, reg_vals->pps.rc_range_params[10].range_max_qp, + RANGE_BPG_OFFSET10, reg_vals->pps.rc_range_params[10].range_bpg_offset); + + REG_SET_6(DSCC_PPS_CONFIG21, 0, + RANGE_MIN_QP11, reg_vals->pps.rc_range_params[11].range_min_qp, + RANGE_MAX_QP11, reg_vals->pps.rc_range_params[11].range_max_qp, + RANGE_BPG_OFFSET11, reg_vals->pps.rc_range_params[11].range_bpg_offset, + RANGE_MIN_QP12, reg_vals->pps.rc_range_params[12].range_min_qp, + RANGE_MAX_QP12, reg_vals->pps.rc_range_params[12].range_max_qp, + RANGE_BPG_OFFSET12, reg_vals->pps.rc_range_params[12].range_bpg_offset); + + REG_SET_6(DSCC_PPS_CONFIG22, 0, + RANGE_MIN_QP13, reg_vals->pps.rc_range_params[13].range_min_qp, + RANGE_MAX_QP13, reg_vals->pps.rc_range_params[13].range_max_qp, + RANGE_BPG_OFFSET13, reg_vals->pps.rc_range_params[13].range_bpg_offset, + RANGE_MIN_QP14, reg_vals->pps.rc_range_params[14].range_min_qp, + RANGE_MAX_QP14, reg_vals->pps.rc_range_params[14].range_max_qp, + RANGE_BPG_OFFSET14, reg_vals->pps.rc_range_params[14].range_bpg_offset); + +} + diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h new file mode 100644 index 0000000000..ba869387c3 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h @@ -0,0 +1,589 @@ +/* Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __DCN20_DSC_H__ +#define __DCN20_DSC_H__ + +#include "dsc.h" +#include "dsc/dscc_types.h" +#include + +#define TO_DCN20_DSC(dsc)\ + container_of(dsc, struct dcn20_dsc, base) + +#define DSC_REG_LIST_DCN20(id) \ + SRI(DSC_TOP_CONTROL, DSC_TOP, id),\ + SRI(DSC_DEBUG_CONTROL, DSC_TOP, id),\ + SRI(DSCC_CONFIG0, DSCC, id),\ + SRI(DSCC_CONFIG1, DSCC, id),\ + SRI(DSCC_STATUS, DSCC, id),\ + SRI(DSCC_INTERRUPT_CONTROL_STATUS, DSCC, id),\ + SRI(DSCC_PPS_CONFIG0, DSCC, id),\ + SRI(DSCC_PPS_CONFIG1, DSCC, id),\ + SRI(DSCC_PPS_CONFIG2, DSCC, id),\ + SRI(DSCC_PPS_CONFIG3, DSCC, id),\ + SRI(DSCC_PPS_CONFIG4, DSCC, id),\ + SRI(DSCC_PPS_CONFIG5, DSCC, id),\ + SRI(DSCC_PPS_CONFIG6, DSCC, id),\ + SRI(DSCC_PPS_CONFIG7, DSCC, id),\ + SRI(DSCC_PPS_CONFIG8, DSCC, id),\ + SRI(DSCC_PPS_CONFIG9, DSCC, id),\ + SRI(DSCC_PPS_CONFIG10, DSCC, id),\ + SRI(DSCC_PPS_CONFIG11, DSCC, id),\ + SRI(DSCC_PPS_CONFIG12, DSCC, id),\ + SRI(DSCC_PPS_CONFIG13, DSCC, id),\ + SRI(DSCC_PPS_CONFIG14, DSCC, id),\ + SRI(DSCC_PPS_CONFIG15, DSCC, id),\ + SRI(DSCC_PPS_CONFIG16, DSCC, id),\ + SRI(DSCC_PPS_CONFIG17, DSCC, id),\ + SRI(DSCC_PPS_CONFIG18, DSCC, id),\ + SRI(DSCC_PPS_CONFIG19, DSCC, id),\ + SRI(DSCC_PPS_CONFIG20, DSCC, id),\ + SRI(DSCC_PPS_CONFIG21, DSCC, id),\ + SRI(DSCC_PPS_CONFIG22, DSCC, id),\ + SRI(DSCC_MEM_POWER_CONTROL, DSCC, id),\ + SRI(DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC, id),\ + SRI(DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC, id),\ + SRI(DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC, id),\ + SRI(DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC, id),\ + SRI(DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC, id),\ + SRI(DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC, id),\ + SRI(DSCC_MAX_ABS_ERROR0, DSCC, id),\ + SRI(DSCC_MAX_ABS_ERROR1, DSCC, id),\ + SRI(DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id),\ + SRI(DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\ + SRI(DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\ + SRI(DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\ + SRI(DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id),\ + SRI(DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\ + SRI(DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\ + SRI(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\ + SRI(DSCCIF_CONFIG0, DSCCIF, id),\ + SRI(DSCCIF_CONFIG1, DSCCIF, id),\ + SRI(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id) + + +#define DSC_SF(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + +//Used in resolving the corner case with duplicate field name +#define DSC2_SF(reg_name, field_name, post_fix)\ + .field_name = reg_name ## _ ## field_name ## post_fix + +#define DSC_REG_LIST_SH_MASK_DCN20(mask_sh)\ + DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_CLOCK_EN, mask_sh), \ + DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DISPCLK_R_GATE_DIS, mask_sh), \ + DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DSCCLK_R_GATE_DIS, mask_sh), \ + DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_DBG_EN, mask_sh), \ + DSC_SF(DSCC0_DSCC_CONFIG0, ICH_RESET_AT_END_OF_LINE, mask_sh), \ + DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_PER_LINE, mask_sh), \ + DSC_SF(DSCC0_DSCC_CONFIG0, ALTERNATE_ICH_ENCODING_EN, mask_sh), \ + DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, mask_sh), \ + DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, mask_sh), \ + /*DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_DISABLE_ICH, mask_sh),*/ \ + DSC_SF(DSCC0_DSCC_STATUS, DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ + DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MINOR, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MAJOR, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG0, PPS_IDENTIFIER, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG0, LINEBUF_DEPTH, mask_sh), \ + DSC2_SF(DSCC0, DSCC_PPS_CONFIG0__BITS_PER_COMPONENT, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BITS_PER_PIXEL, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG1, VBR_ENABLE, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG1, SIMPLE_422, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CONVERT_RGB, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BLOCK_PRED_ENABLE, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_422, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_420, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CHUNK_SIZE, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_WIDTH, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_HEIGHT, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_WIDTH, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_HEIGHT, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_XMIT_DELAY, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_DEC_DELAY, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG5, INITIAL_SCALE_VALUE, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG5, SCALE_INCREMENT_INTERVAL, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SCALE_DECREMENT_INTERVAL, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG6, FIRST_LINE_BPG_OFFSET, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SECOND_LINE_BPG_OFFSET, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG7, NFL_BPG_OFFSET, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG8, NSL_BPG_OFFSET, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG8, SECOND_LINE_OFFSET_ADJ, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG9, INITIAL_OFFSET, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG9, FINAL_OFFSET, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MIN_QP, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MAX_QP, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG10, RC_MODEL_SIZE, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_EDGE_FACTOR, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT0, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT1, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_LO, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_HI, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH0, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH1, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH2, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH3, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH4, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH5, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH6, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH7, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH8, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH9, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH10, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH11, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH12, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH13, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MIN_QP0, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MAX_QP0, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_BPG_OFFSET0, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP1, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP1, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET1, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP2, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP2, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET2, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP3, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP3, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET3, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP4, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP4, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET4, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP5, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP5, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET5, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP6, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP6, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET6, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP7, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP7, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET7, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP8, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP8, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET8, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP9, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP9, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET9, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP10, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP10, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET10, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP11, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP11, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET11, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP12, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP12, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET12, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP13, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP13, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET13, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP14, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP14, mask_sh), \ + DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET14, mask_sh), \ + DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_DEFAULT_MEM_LOW_POWER_STATE, mask_sh), \ + DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_FORCE, mask_sh), \ + DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_DIS, mask_sh), \ + DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_STATE, mask_sh), \ + DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_FORCE, mask_sh), \ + DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_DIS, mask_sh), \ + DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_STATE, mask_sh), \ + DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC_R_Y_SQUARED_ERROR_LOWER, mask_sh), \ + DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC_R_Y_SQUARED_ERROR_UPPER, mask_sh), \ + DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC_G_CB_SQUARED_ERROR_LOWER, mask_sh), \ + DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC_G_CB_SQUARED_ERROR_UPPER, mask_sh), \ + DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC_B_CR_SQUARED_ERROR_LOWER, mask_sh), \ + DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC_B_CR_SQUARED_ERROR_UPPER, mask_sh), \ + DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_R_Y_MAX_ABS_ERROR, mask_sh), \ + DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_G_CB_MAX_ABS_ERROR, mask_sh), \ + DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR1, DSCC_B_CR_MAX_ABS_ERROR, mask_sh), \ + DSC_SF(DSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, mask_sh), \ + DSC_SF(DSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \ + DSC_SF(DSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \ + DSC_SF(DSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \ + DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, mask_sh), \ + DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \ + DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \ + DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \ + DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN, mask_sh), \ + DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \ + DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS, mask_sh), \ + DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_PIXEL_FORMAT, mask_sh), \ + DSC2_SF(DSCCIF0, DSCCIF_CONFIG0__BITS_PER_COMPONENT, mask_sh), \ + DSC_SF(DSCCIF0_DSCCIF_CONFIG0, DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \ + DSC_SF(DSCCIF0_DSCCIF_CONFIG1, PIC_WIDTH, mask_sh), \ + DSC_SF(DSCCIF0_DSCCIF_CONFIG1, PIC_HEIGHT, mask_sh), \ + DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, mask_sh), \ + DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_OPP_PIPE_SOURCE, mask_sh) + + + +#define DSC_FIELD_LIST_DCN20(type)\ + type DSC_CLOCK_EN; \ + type DSC_DISPCLK_R_GATE_DIS; \ + type DSC_DSCCLK_R_GATE_DIS; \ + type DSC_DBG_EN; \ + type DSC_TEST_CLOCK_MUX_SEL; \ + type ICH_RESET_AT_END_OF_LINE; \ + type NUMBER_OF_SLICES_PER_LINE; \ + type ALTERNATE_ICH_ENCODING_EN; \ + type NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION; \ + type DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE; \ + /*type DSCC_DISABLE_ICH;*/ \ + type DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING; \ + type DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED; \ + type DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED; \ + type DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED; \ + type DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED; \ + type DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED; \ + type DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED; \ + type DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED; \ + type DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED; \ + type DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED; \ + type DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED; \ + type DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED; \ + type DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED; \ + type DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN; \ + type DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN; \ + type DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN; \ + type DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN; \ + type DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN; \ + type DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN; \ + type DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN; \ + type DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN; \ + type DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN; \ + type DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN; \ + type DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN; \ + type DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN; \ + type DSC_VERSION_MINOR; \ + type DSC_VERSION_MAJOR; \ + type PPS_IDENTIFIER; \ + type LINEBUF_DEPTH; \ + type DSCC_PPS_CONFIG0__BITS_PER_COMPONENT; \ + type BITS_PER_PIXEL; \ + type VBR_ENABLE; \ + type SIMPLE_422; \ + type CONVERT_RGB; \ + type BLOCK_PRED_ENABLE; \ + type NATIVE_422; \ + type NATIVE_420; \ + type CHUNK_SIZE; \ + type PIC_WIDTH; \ + type PIC_HEIGHT; \ + type SLICE_WIDTH; \ + type SLICE_HEIGHT; \ + type INITIAL_XMIT_DELAY; \ + type INITIAL_DEC_DELAY; \ + type INITIAL_SCALE_VALUE; \ + type SCALE_INCREMENT_INTERVAL; \ + type SCALE_DECREMENT_INTERVAL; \ + type FIRST_LINE_BPG_OFFSET; \ + type SECOND_LINE_BPG_OFFSET; \ + type NFL_BPG_OFFSET; \ + type SLICE_BPG_OFFSET; \ + type NSL_BPG_OFFSET; \ + type SECOND_LINE_OFFSET_ADJ; \ + type INITIAL_OFFSET; \ + type FINAL_OFFSET; \ + type FLATNESS_MIN_QP; \ + type FLATNESS_MAX_QP; \ + type RC_MODEL_SIZE; \ + type RC_EDGE_FACTOR; \ + type RC_QUANT_INCR_LIMIT0; \ + type RC_QUANT_INCR_LIMIT1; \ + type RC_TGT_OFFSET_LO; \ + type RC_TGT_OFFSET_HI; \ + type RC_BUF_THRESH0; \ + type RC_BUF_THRESH1; \ + type RC_BUF_THRESH2; \ + type RC_BUF_THRESH3; \ + type RC_BUF_THRESH4; \ + type RC_BUF_THRESH5; \ + type RC_BUF_THRESH6; \ + type RC_BUF_THRESH7; \ + type RC_BUF_THRESH8; \ + type RC_BUF_THRESH9; \ + type RC_BUF_THRESH10; \ + type RC_BUF_THRESH11; \ + type RC_BUF_THRESH12; \ + type RC_BUF_THRESH13; \ + type RANGE_MIN_QP0; \ + type RANGE_MAX_QP0; \ + type RANGE_BPG_OFFSET0; \ + type RANGE_MIN_QP1; \ + type RANGE_MAX_QP1; \ + type RANGE_BPG_OFFSET1; \ + type RANGE_MIN_QP2; \ + type RANGE_MAX_QP2; \ + type RANGE_BPG_OFFSET2; \ + type RANGE_MIN_QP3; \ + type RANGE_MAX_QP3; \ + type RANGE_BPG_OFFSET3; \ + type RANGE_MIN_QP4; \ + type RANGE_MAX_QP4; \ + type RANGE_BPG_OFFSET4; \ + type RANGE_MIN_QP5; \ + type RANGE_MAX_QP5; \ + type RANGE_BPG_OFFSET5; \ + type RANGE_MIN_QP6; \ + type RANGE_MAX_QP6; \ + type RANGE_BPG_OFFSET6; \ + type RANGE_MIN_QP7; \ + type RANGE_MAX_QP7; \ + type RANGE_BPG_OFFSET7; \ + type RANGE_MIN_QP8; \ + type RANGE_MAX_QP8; \ + type RANGE_BPG_OFFSET8; \ + type RANGE_MIN_QP9; \ + type RANGE_MAX_QP9; \ + type RANGE_BPG_OFFSET9; \ + type RANGE_MIN_QP10; \ + type RANGE_MAX_QP10; \ + type RANGE_BPG_OFFSET10; \ + type RANGE_MIN_QP11; \ + type RANGE_MAX_QP11; \ + type RANGE_BPG_OFFSET11; \ + type RANGE_MIN_QP12; \ + type RANGE_MAX_QP12; \ + type RANGE_BPG_OFFSET12; \ + type RANGE_MIN_QP13; \ + type RANGE_MAX_QP13; \ + type RANGE_BPG_OFFSET13; \ + type RANGE_MIN_QP14; \ + type RANGE_MAX_QP14; \ + type RANGE_BPG_OFFSET14; \ + type DSCC_DEFAULT_MEM_LOW_POWER_STATE; \ + type DSCC_MEM_PWR_FORCE; \ + type DSCC_MEM_PWR_DIS; \ + type DSCC_MEM_PWR_STATE; \ + type DSCC_NATIVE_422_MEM_PWR_FORCE; \ + type DSCC_NATIVE_422_MEM_PWR_DIS; \ + type DSCC_NATIVE_422_MEM_PWR_STATE; \ + type DSCC_R_Y_SQUARED_ERROR_LOWER; \ + type DSCC_R_Y_SQUARED_ERROR_UPPER; \ + type DSCC_G_CB_SQUARED_ERROR_LOWER; \ + type DSCC_G_CB_SQUARED_ERROR_UPPER; \ + type DSCC_B_CR_SQUARED_ERROR_LOWER; \ + type DSCC_B_CR_SQUARED_ERROR_UPPER; \ + type DSCC_R_Y_MAX_ABS_ERROR; \ + type DSCC_G_CB_MAX_ABS_ERROR; \ + type DSCC_B_CR_MAX_ABS_ERROR; \ + type DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL; \ + type DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL; \ + type DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL; \ + type DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL; \ + type DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL; \ + type DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL; \ + type DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL; \ + type DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL; \ + type DSCC_UPDATE_PENDING_STATUS; \ + type DSCC_UPDATE_TAKEN_STATUS; \ + type DSCC_UPDATE_TAKEN_ACK; \ + type DSCC_RATE_BUFFER0_FULLNESS_LEVEL; \ + type DSCC_RATE_BUFFER1_FULLNESS_LEVEL; \ + type DSCC_RATE_BUFFER2_FULLNESS_LEVEL; \ + type DSCC_RATE_BUFFER3_FULLNESS_LEVEL; \ + type DSCC_RATE_CONTROL_BUFFER0_FULLNESS_LEVEL; \ + type DSCC_RATE_CONTROL_BUFFER1_FULLNESS_LEVEL; \ + type DSCC_RATE_CONTROL_BUFFER2_FULLNESS_LEVEL; \ + type DSCC_RATE_CONTROL_BUFFER3_FULLNESS_LEVEL; \ + type DSCC_RATE_BUFFER0_INITIAL_XMIT_DELAY_REACHED; \ + type DSCC_RATE_BUFFER1_INITIAL_XMIT_DELAY_REACHED; \ + type DSCC_RATE_BUFFER2_INITIAL_XMIT_DELAY_REACHED; \ + type DSCC_RATE_BUFFER3_INITIAL_XMIT_DELAY_REACHED; \ + type INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN; \ + type INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN; \ + type INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS; \ + type INPUT_PIXEL_FORMAT; \ + type DSCCIF_CONFIG0__BITS_PER_COMPONENT; \ + type DOUBLE_BUFFER_REG_UPDATE_PENDING; \ + type DSCCIF_UPDATE_PENDING_STATUS; \ + type DSCCIF_UPDATE_TAKEN_STATUS; \ + type DSCCIF_UPDATE_TAKEN_ACK; \ + type DSCRM_DSC_FORWARD_EN; \ + type DSCRM_DSC_OPP_PIPE_SOURCE + +struct dcn20_dsc_registers { + uint32_t DSC_TOP_CONTROL; + uint32_t DSC_DEBUG_CONTROL; + uint32_t DSCC_CONFIG0; + uint32_t DSCC_CONFIG1; + uint32_t DSCC_STATUS; + uint32_t DSCC_INTERRUPT_CONTROL_STATUS; + uint32_t DSCC_PPS_CONFIG0; + uint32_t DSCC_PPS_CONFIG1; + uint32_t DSCC_PPS_CONFIG2; + uint32_t DSCC_PPS_CONFIG3; + uint32_t DSCC_PPS_CONFIG4; + uint32_t DSCC_PPS_CONFIG5; + uint32_t DSCC_PPS_CONFIG6; + uint32_t DSCC_PPS_CONFIG7; + uint32_t DSCC_PPS_CONFIG8; + uint32_t DSCC_PPS_CONFIG9; + uint32_t DSCC_PPS_CONFIG10; + uint32_t DSCC_PPS_CONFIG11; + uint32_t DSCC_PPS_CONFIG12; + uint32_t DSCC_PPS_CONFIG13; + uint32_t DSCC_PPS_CONFIG14; + uint32_t DSCC_PPS_CONFIG15; + uint32_t DSCC_PPS_CONFIG16; + uint32_t DSCC_PPS_CONFIG17; + uint32_t DSCC_PPS_CONFIG18; + uint32_t DSCC_PPS_CONFIG19; + uint32_t DSCC_PPS_CONFIG20; + uint32_t DSCC_PPS_CONFIG21; + uint32_t DSCC_PPS_CONFIG22; + uint32_t DSCC_MEM_POWER_CONTROL; + uint32_t DSCC_R_Y_SQUARED_ERROR_LOWER; + uint32_t DSCC_R_Y_SQUARED_ERROR_UPPER; + uint32_t DSCC_G_CB_SQUARED_ERROR_LOWER; + uint32_t DSCC_G_CB_SQUARED_ERROR_UPPER; + uint32_t DSCC_B_CR_SQUARED_ERROR_LOWER; + uint32_t DSCC_B_CR_SQUARED_ERROR_UPPER; + uint32_t DSCC_MAX_ABS_ERROR0; + uint32_t DSCC_MAX_ABS_ERROR1; + uint32_t DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL; + uint32_t DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL; + uint32_t DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL; + uint32_t DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL; + uint32_t DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL; + uint32_t DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL; + uint32_t DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL; + uint32_t DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL; + uint32_t DSCCIF_CONFIG0; + uint32_t DSCCIF_CONFIG1; + uint32_t DSCRM_DSC_FORWARD_CONFIG; +}; + + +struct dcn20_dsc_shift { + DSC_FIELD_LIST_DCN20(uint8_t); +}; + +struct dcn20_dsc_mask { + DSC_FIELD_LIST_DCN20(uint32_t); +}; + +/* DSCCIF_CONFIG.INPUT_PIXEL_FORMAT values */ +enum dsc_pixel_format { + DSC_PIXFMT_RGB, + DSC_PIXFMT_YCBCR444, + DSC_PIXFMT_SIMPLE_YCBCR422, + DSC_PIXFMT_NATIVE_YCBCR422, + DSC_PIXFMT_NATIVE_YCBCR420, + DSC_PIXFMT_UNKNOWN +}; + +struct dsc_reg_values { + /* PPS registers */ + struct drm_dsc_config pps; + + /* Additional registers */ + uint32_t dsc_clock_enable; + uint32_t dsc_clock_gating_disable; + uint32_t underflow_recovery_en; + uint32_t underflow_occurred_int_en; + uint32_t underflow_occurred_status; + enum dsc_pixel_format pixel_format; + uint32_t ich_reset_at_eol; + uint32_t alternate_ich_encoding_en; + uint32_t num_slices_h; + uint32_t num_slices_v; + uint32_t rc_buffer_model_size; + uint32_t disable_ich; + uint32_t bpp_x32; + uint32_t dsc_dbg_en; + uint32_t rc_buffer_model_overflow_int_en[4]; +}; + +struct dcn20_dsc { + struct display_stream_compressor base; + const struct dcn20_dsc_registers *dsc_regs; + const struct dcn20_dsc_shift *dsc_shift; + const struct dcn20_dsc_mask *dsc_mask; + + struct dsc_reg_values reg_vals; + + int max_image_width; +}; + +void dsc_config_log(struct display_stream_compressor *dsc, + const struct dsc_config *config); + +void dsc_log_pps(struct display_stream_compressor *dsc, + struct drm_dsc_config *pps); + +void dsc_override_rc_params(struct rc_params *rc, + const struct dc_dsc_rc_params_override *override); + +bool dsc_prepare_config(const struct dsc_config *dsc_cfg, + struct dsc_reg_values *dsc_reg_vals, + struct dsc_optc_config *dsc_optc_cfg); + +enum dsc_pixel_format dsc_dc_pixel_encoding_to_dsc_pixel_format(enum dc_pixel_encoding dc_pix_enc, + bool is_ycbcr422_simple); + +enum dsc_bits_per_comp dsc_dc_color_depth_to_dsc_bits_per_comp(enum dc_color_depth dc_color_depth); + +void dsc_init_reg_values(struct dsc_reg_values *reg_vals); + +void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, const struct dsc_parameters *dsc_params); + +void dsc2_construct(struct dcn20_dsc *dsc, + struct dc_context *ctx, + int inst, + const struct dcn20_dsc_registers *dsc_regs, + const struct dcn20_dsc_shift *dsc_shift, + const struct dcn20_dsc_mask *dsc_mask); + +void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, + int pixel_clock_100Hz); + +bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, + const struct dsc_config *dsc_cfg, + uint8_t *dsc_packed_pps); + +#endif + diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c new file mode 100644 index 0000000000..71d2dff998 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dcn35_dsc.h" +#include "reg_helper.h" + +/* Macro definitios for REG_SET macros*/ +#define CTX \ + dsc20->base.ctx + +#define REG(reg)\ + dsc20->dsc_regs->reg + +#undef FN +#define FN(reg_name, field_name) \ + ((const struct dcn35_dsc_shift *)(dsc20->dsc_shift))->field_name, \ + ((const struct dcn35_dsc_mask *)(dsc20->dsc_mask))->field_name + +#define DC_LOGGER \ + dsc->ctx->logger + +void dsc35_construct(struct dcn20_dsc *dsc, + struct dc_context *ctx, + int inst, + const struct dcn20_dsc_registers *dsc_regs, + const struct dcn35_dsc_shift *dsc_shift, + const struct dcn35_dsc_mask *dsc_mask) +{ + dsc2_construct(dsc, ctx, inst, dsc_regs, + (const struct dcn20_dsc_shift *)(dsc_shift), + (const struct dcn20_dsc_mask *)(dsc_mask)); +} + +void dsc35_set_fgcg(struct dcn20_dsc *dsc20, bool enable) +{ + REG_UPDATE(DSC_TOP_CONTROL, DSC_FGCG_REP_DIS, !enable); +} diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.h new file mode 100644 index 0000000000..133ad38842 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN35_DSC_H__ +#define __DCN35_DSC_H__ + +#include "dcn20/dcn20_dsc.h" + +#define DSC_REG_LIST_SH_MASK_DCN35(mask_sh) \ + DSC_REG_LIST_SH_MASK_DCN20(mask_sh), \ + DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_FGCG_REP_DIS, mask_sh) + +#define DSC_FIELD_LIST_DCN35(type) \ + struct { \ + DSC_FIELD_LIST_DCN20(type); \ + type DSC_FGCG_REP_DIS; \ + } + +struct dcn35_dsc_shift { + DSC_FIELD_LIST_DCN35(uint8_t); +}; + +struct dcn35_dsc_mask { + DSC_FIELD_LIST_DCN35(uint32_t); +}; + +void dsc35_construct(struct dcn20_dsc *dsc, + struct dc_context *ctx, + int inst, + const struct dcn20_dsc_registers *dsc_regs, + const struct dcn35_dsc_shift *dsc_shift, + const struct dcn35_dsc_mask *dsc_mask); + +void dsc35_set_fgcg(struct dcn20_dsc *dsc20, bool enable); + +#endif /* __DCN35_DSC_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h new file mode 100644 index 0000000000..4b27f29d0d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h @@ -0,0 +1,112 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __DAL_DSC_H__ +#define __DAL_DSC_H__ + +#include "dc_dsc.h" +#include "dc_hw_types.h" +#include "dc_types.h" +/* do not include any other headers + * or else it might break Edid Utility functionality. + */ + + +/* Input parameters for configuring DSC from the outside of DSC */ +struct dsc_config { + uint32_t pic_width; + uint32_t pic_height; + enum dc_pixel_encoding pixel_encoding; + enum dc_color_depth color_depth; /* Bits per component */ + bool is_odm; + struct dc_dsc_config dc_dsc_cfg; +}; + + +/* Output parameters for configuring DSC-related part of OPTC */ +struct dsc_optc_config { + uint32_t slice_width; /* Slice width in pixels */ + uint32_t bytes_per_pixel; /* Bytes per pixel in u3.28 format */ + bool is_pixel_format_444; /* 'true' if pixel format is 'RGB 444' or 'Simple YCbCr 4:2:2' (4:2:2 upsampled to 4:4:4)' */ +}; + + +struct dcn_dsc_state { + uint32_t dsc_clock_en; + uint32_t dsc_slice_width; + uint32_t dsc_bits_per_pixel; + uint32_t dsc_slice_height; + uint32_t dsc_pic_width; + uint32_t dsc_pic_height; + uint32_t dsc_slice_bpg_offset; + uint32_t dsc_chunk_size; + uint32_t dsc_fw_en; + uint32_t dsc_opp_source; +}; + + +/* DSC encoder capabilities + * They differ from the DPCD DSC caps because they are based on AMD DSC encoder caps. + */ +union dsc_enc_slice_caps { + struct { + uint8_t NUM_SLICES_1 : 1; + uint8_t NUM_SLICES_2 : 1; + uint8_t NUM_SLICES_3 : 1; /* This one is not per DSC spec, but our encoder supports it */ + uint8_t NUM_SLICES_4 : 1; + uint8_t NUM_SLICES_8 : 1; + uint8_t NUM_SLICES_12 : 1; + uint8_t NUM_SLICES_16 : 1; + } bits; + uint8_t raw; +}; + +struct dsc_enc_caps { + uint8_t dsc_version; + union dsc_enc_slice_caps slice_caps; + int32_t lb_bit_depth; + bool is_block_pred_supported; + union dsc_color_formats color_formats; + union dsc_color_depth color_depth; + int32_t max_total_throughput_mps; /* Maximum total throughput with all the slices combined */ + int32_t max_slice_width; + uint32_t bpp_increment_div; /* bpp increment divisor, e.g. if 16, it's 1/16th of a bit */ + uint32_t edp_sink_max_bits_per_pixel; + bool is_dp; +}; + +struct dsc_funcs { + void (*dsc_get_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz); + void (*dsc_read_state)(struct display_stream_compressor *dsc, struct dcn_dsc_state *s); + bool (*dsc_validate_stream)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg); + void (*dsc_set_config)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, + struct dsc_optc_config *dsc_optc_cfg); + bool (*dsc_get_packed_pps)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, + uint8_t *dsc_packed_pps); + void (*dsc_enable)(struct display_stream_compressor *dsc, int opp_pipe); + void (*dsc_disable)(struct display_stream_compressor *dsc); + void (*dsc_disconnect)(struct display_stream_compressor *dsc); +}; + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/hwss/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/Makefile index bccd46bd18..254136f8e3 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/Makefile +++ b/drivers/gpu/drm/amd/display/dc/hwss/Makefile @@ -78,7 +78,7 @@ ifdef CONFIG_DRM_AMD_DC_FP # DCN ############################################################################### -HWSS_DCN10 = dcn10_hwseq.o +HWSS_DCN10 = dcn10_hwseq.o dcn10_init.o AMD_DAL_HWSS_DCN10 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn10/,$(HWSS_DCN10)) @@ -86,7 +86,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN10) ############################################################################### -HWSS_DCN20 = dcn20_hwseq.o +HWSS_DCN20 = dcn20_hwseq.o dcn20_init.o AMD_DAL_HWSS_DCN20 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn20/,$(HWSS_DCN20)) @@ -94,7 +94,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN20) ############################################################################### -HWSS_DCN201 = dcn201_hwseq.o +HWSS_DCN201 = dcn201_hwseq.o dcn201_init.o AMD_DAL_HWSS_DCN201 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn201/,$(HWSS_DCN201)) @@ -102,7 +102,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN201) ############################################################################### -HWSS_DCN21 = dcn21_hwseq.o +HWSS_DCN21 = dcn21_hwseq.o dcn21_init.o AMD_DAL_HWSS_DCN21 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn21/,$(HWSS_DCN21)) @@ -114,7 +114,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN21) ############################################################################### -HWSS_DCN30 = dcn30_hwseq.o +HWSS_DCN30 = dcn30_hwseq.o dcn30_init.o AMD_DAL_HWSS_DCN30 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn30/,$(HWSS_DCN30)) @@ -122,7 +122,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN30) ############################################################################### -HWSS_DCN301 = dcn301_hwseq.o +HWSS_DCN301 = dcn301_hwseq.o dcn301_init.o AMD_DAL_HWSS_DCN301 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn301/,$(HWSS_DCN301)) @@ -130,15 +130,17 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN301) ############################################################################### -HWSS_DCN302 = dcn302_hwseq.o +HWSS_DCN302 = dcn302_hwseq.o dcn302_init.o AMD_DAL_HWSS_DCN302 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn302/,$(HWSS_DCN302)) AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN302) + + ############################################################################### -HWSS_DCN303 = dcn303_hwseq.o +HWSS_DCN303 = dcn303_hwseq.o dcn303_init.o AMD_DAL_HWSS_DCN303 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn303/,$(HWSS_DCN303)) @@ -146,7 +148,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN303) ############################################################################### -HWSS_DCN31 = dcn31_hwseq.o +HWSS_DCN31 = dcn31_hwseq.o dcn31_init.o AMD_DAL_HWSS_DCN31 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn31/,$(HWSS_DCN31)) @@ -154,7 +156,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN31) ############################################################################### -HWSS_DCN314 = dcn314_hwseq.o +HWSS_DCN314 = dcn314_hwseq.o dcn314_init.o AMD_DAL_HWSS_DCN314 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn314/,$(HWSS_DCN314)) @@ -162,7 +164,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN314) ############################################################################### -HWSS_DCN32 = dcn32_hwseq.o +HWSS_DCN32 = dcn32_hwseq.o dcn32_init.o AMD_DAL_HWSS_DCN32 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn32/,$(HWSS_DCN32)) @@ -170,7 +172,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN32) ############################################################################### -HWSS_DCN35 = dcn35_hwseq.o +HWSS_DCN35 = dcn35_hwseq.o dcn35_init.o AMD_DAL_HWSS_DCN35 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn35/,$(HWSS_DCN35)) @@ -180,4 +182,4 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN35) ############################################################################### -endif \ No newline at end of file +endif diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h index 44b4df6469..52f045cfd5 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h @@ -682,6 +682,7 @@ struct dce_hwseq_registers { uint32_t DCHUBBUB_ARB_HOSTVM_CNTL; uint32_t HPO_TOP_HW_CONTROL; uint32_t DMU_CLK_CNTL; + uint32_t DCCG_GATE_DISABLE_CNTL4; uint32_t DCCG_GATE_DISABLE_CNTL5; }; /* set field name */ @@ -1199,7 +1200,19 @@ struct dce_hwseq_registers { type PHYBSYMCLK_ROOT_GATE_DISABLE;\ type PHYCSYMCLK_ROOT_GATE_DISABLE;\ type PHYDSYMCLK_ROOT_GATE_DISABLE;\ - type PHYESYMCLK_ROOT_GATE_DISABLE; + type PHYESYMCLK_ROOT_GATE_DISABLE;\ + type DTBCLK_P0_GATE_DISABLE;\ + type DTBCLK_P1_GATE_DISABLE;\ + type DTBCLK_P2_GATE_DISABLE;\ + type DTBCLK_P3_GATE_DISABLE;\ + type DPSTREAMCLK0_GATE_DISABLE;\ + type DPSTREAMCLK1_GATE_DISABLE;\ + type DPSTREAMCLK2_GATE_DISABLE;\ + type DPSTREAMCLK3_GATE_DISABLE;\ + type DPIASYMCLK0_GATE_DISABLE;\ + type DPIASYMCLK1_GATE_DISABLE;\ + type DPIASYMCLK2_GATE_DISABLE;\ + type DPIASYMCLK3_GATE_DISABLE; struct dce_hwseq_shift { HWSEQ_REG_FIELD_LIST(uint8_t) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 3642c069bd..12af6bb9fe 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -55,6 +55,7 @@ #include "audio.h" #include "reg_helper.h" #include "panel_cntl.h" +#include "dc_state_priv.h" #include "dpcd_defs.h" /* include DCE11 register header files */ #include "dce/dce_11_0_d.h" @@ -1476,7 +1477,7 @@ static enum dc_status dce110_enable_stream_timing( return DC_OK; } -static enum dc_status apply_single_controller_ctx_to_hw( +enum dc_status dce110_apply_single_controller_ctx_to_hw( struct pipe_ctx *pipe_ctx, struct dc_state *context, struct dc *dc) @@ -1597,7 +1598,7 @@ static enum dc_status apply_single_controller_ctx_to_hw( * is constructed with the same sink). Make sure not to override * and link programming on the main. */ - if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) { + if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) { pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false; pipe_ctx->stream->link->replay_settings.replay_feature_enabled = false; } @@ -1685,7 +1686,7 @@ static void disable_vga_and_power_gate_all_controllers( true); dc->current_state->res_ctx.pipe_ctx[i].pipe_idx = i; - dc->hwss.disable_plane(dc, + dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); } } @@ -2135,7 +2136,7 @@ static void dce110_reset_hw_ctx_wrap( old_clk)) old_clk->funcs->cs_power_down(old_clk); - dc->hwss.disable_plane(dc, pipe_ctx_old); + dc->hwss.disable_plane(dc, dc->current_state, pipe_ctx_old); pipe_ctx_old->stream = NULL; } @@ -2302,7 +2303,7 @@ enum dc_status dce110_apply_ctx_to_hw( if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe) continue; - status = apply_single_controller_ctx_to_hw( + status = dce110_apply_single_controller_ctx_to_hw( pipe_ctx, context, dc); @@ -2499,6 +2500,7 @@ static bool wait_for_reset_trigger_to_occur( /* Enable timing synchronization for a group of Timing Generators. */ static void dce110_enable_timing_synchronization( struct dc *dc, + struct dc_state *state, int group_index, int group_size, struct pipe_ctx *grouped_pipes[]) @@ -2592,6 +2594,7 @@ static void init_hw(struct dc *dc) struct dmcu *dmcu; struct dce_hwseq *hws = dc->hwseq; uint32_t backlight = MAX_BACKLIGHT_LEVEL; + uint32_t user_level = MAX_BACKLIGHT_LEVEL; bp = dc->ctx->dc_bios; for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -2641,13 +2644,15 @@ static void init_hw(struct dc *dc) for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; - if (link->panel_cntl) + if (link->panel_cntl) { backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); + user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL; + } } abm = dc->res_pool->abm; if (abm != NULL) - abm->funcs->abm_init(abm, backlight); + abm->funcs->abm_init(abm, backlight, user_level); dmcu = dc->res_pool->dmcu; if (dmcu != NULL && abm != NULL) @@ -2844,7 +2849,7 @@ static void dce110_post_unlock_program_front_end( { } -static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx) +static void dce110_power_down_fe(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx) { struct dce_hwseq *hws = dc->hwseq; int fe_idx = pipe_ctx->plane_res.mi ? @@ -3117,7 +3122,8 @@ void dce110_disable_link_output(struct dc_link *link, struct dmcu *dmcu = dc->res_pool->dmcu; if (signal == SIGNAL_TYPE_EDP && - link->dc->hwss.edp_backlight_control) + link->dc->hwss.edp_backlight_control && + !link->skip_implict_edp_power_control) link->dc->hwss.edp_backlight_control(link, false); else if (dmcu != NULL && dmcu->funcs->lock_phy) dmcu->funcs->lock_phy(dmcu); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h index 08028a1779..ed3cc3648e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h @@ -39,6 +39,10 @@ enum dc_status dce110_apply_ctx_to_hw( struct dc *dc, struct dc_state *context); +enum dc_status dce110_apply_single_controller_ctx_to_hw( + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct dc *dc); void dce110_enable_stream(struct pipe_ctx *pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index 2b3ef5cdbd..c45f84aa32 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -56,6 +56,7 @@ #include "dc_trace.h" #include "dce/dmub_outbox.h" #include "link.h" +#include "dc_state_priv.h" #define DC_LOGGER \ dc_logger @@ -115,7 +116,7 @@ void dcn10_lock_all_pipes(struct dc *dc, !pipe_ctx->stream || (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) || !tg->funcs->is_tg_enabled(tg) || - pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM) + dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) continue; if (lock) @@ -1181,7 +1182,9 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc) } /* trigger HW to start disconnect plane from stream on the next vsync */ -void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_plane_atomic_disconnect(struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx) { struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; @@ -1201,7 +1204,7 @@ void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove); // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle, // so don't wait for MPCC_IDLE in the programming sequence - if (opp != NULL && !pipe_ctx->plane_state->is_phantom) + if (opp != NULL && dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM) opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; dc->optimized_required = true; @@ -1291,7 +1294,7 @@ void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) pipe_ctx->plane_state = NULL; } -void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx) { struct dce_hwseq *hws = dc->hwseq; DC_LOGGER_INIT(dc->ctx->logger); @@ -1417,12 +1420,12 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; - hws->funcs.plane_atomic_disconnect(dc, pipe_ctx); + hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx); if (tg->funcs->is_tg_enabled(tg)) tg->funcs->unlock(tg); - dc->hwss.disable_plane(dc, pipe_ctx); + dc->hwss.disable_plane(dc, context, pipe_ctx); pipe_ctx->stream_res.tg = NULL; pipe_ctx->plane_res.hubp = NULL; @@ -1487,6 +1490,7 @@ void dcn10_init_hw(struct dc *dc) struct dc_bios *dcb = dc->ctx->dc_bios; struct resource_pool *res_pool = dc->res_pool; uint32_t backlight = MAX_BACKLIGHT_LEVEL; + uint32_t user_level = MAX_BACKLIGHT_LEVEL; bool is_optimized_init_done = false; if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) @@ -1584,12 +1588,14 @@ void dcn10_init_hw(struct dc *dc) for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; - if (link->panel_cntl) + if (link->panel_cntl) { backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); + user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL; + } } if (abm != NULL) - abm->funcs->abm_init(abm, backlight); + abm->funcs->abm_init(abm, backlight, user_level); if (dmcu != NULL && !dmcu->auto_load_dmcu) dmcu->funcs->dmcu_init(dmcu); @@ -2266,6 +2272,7 @@ void dcn10_enable_vblanks_synchronization( void dcn10_enable_timing_synchronization( struct dc *dc, + struct dc_state *state, int group_index, int group_size, struct pipe_ctx *grouped_pipes[]) @@ -2280,7 +2287,7 @@ void dcn10_enable_timing_synchronization( DC_SYNC_INFO("Setting up OTG reset trigger\n"); for (i = 1; i < group_size; i++) { - if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) + if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM) continue; opp = grouped_pipes[i]->stream_res.opp; @@ -2300,14 +2307,14 @@ void dcn10_enable_timing_synchronization( if (grouped_pipes[i]->stream == NULL) continue; - if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) + if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM) continue; grouped_pipes[i]->stream->vblank_synchronized = false; } for (i = 1; i < group_size; i++) { - if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) + if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM) continue; grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger( @@ -2321,11 +2328,11 @@ void dcn10_enable_timing_synchronization( * synchronized. Look at last pipe programmed to reset. */ - if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM) + if (grouped_pipes[1]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[1]) != SUBVP_PHANTOM) wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg); for (i = 1; i < group_size; i++) { - if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) + if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM) continue; grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger( @@ -2333,7 +2340,7 @@ void dcn10_enable_timing_synchronization( } for (i = 1; i < group_size; i++) { - if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) + if (dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM) continue; opp = grouped_pipes[i]->stream_res.opp; @@ -3025,7 +3032,7 @@ void dcn10_post_unlock_program_front_end( for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) - dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) { @@ -3072,7 +3079,7 @@ void dcn10_prepare_bandwidth( context, false); - dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub, + dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub, &context->bw_ctx.bw.dcn.watermarks, dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, true); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h index ef6d56da41..bc5dd68a24 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h @@ -75,7 +75,7 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn10_reset_hw_ctx_wrap( struct dc *dc, struct dc_state *context); -void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx); void dcn10_lock_all_pipes( struct dc *dc, struct dc_state *context, @@ -108,13 +108,16 @@ void dcn10_power_down_on_boot(struct dc *dc); enum dc_status dce110_apply_ctx_to_hw( struct dc *dc, struct dc_state *context); -void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn10_plane_atomic_disconnect(struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx); void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data); void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx); void dce110_power_down(struct dc *dc); void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context); void dcn10_enable_timing_synchronization( struct dc *dc, + struct dc_state *state, int group_index, int group_size, struct pipe_ctx *grouped_pipes[]); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c new file mode 100644 index 0000000000..a5bdac79a7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c @@ -0,0 +1,128 @@ +/* + * Copyright 2016-2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hw_sequencer_private.h" +#include "dce110/dce110_hwseq.h" +#include "dcn10/dcn10_hwseq.h" +#include "dcn20/dcn20_hwseq.h" + +static const struct hw_sequencer_funcs dcn10_funcs = { + .program_gamut_remap = dcn10_program_gamut_remap, + .init_hw = dcn10_init_hw, + .power_down_on_boot = dcn10_power_down_on_boot, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .post_unlock_program_front_end = dcn10_post_unlock_program_front_end, + .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, + .update_plane_addr = dcn10_update_plane_addr, + .update_dchub = dcn10_update_dchub, + .update_pending_status = dcn10_update_pending_status, + .program_output_csc = dcn10_program_output_csc, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dce110_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dce110_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn10_unblank_stream, + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .disable_plane = dcn10_disable_plane, + .pipe_control_lock = dcn10_pipe_control_lock, + .cursor_lock = dcn10_cursor_lock, + .interdependent_update_lock = dcn10_lock_all_pipes, + .prepare_bandwidth = dcn10_prepare_bandwidth, + .optimize_bandwidth = dcn10_optimize_bandwidth, + .set_drr = dcn10_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn10_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dce110_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_backlight_control = dce110_edp_backlight_control, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, + .power_down = dce110_power_down, + .set_backlight_level = dce110_set_backlight_level, + .set_abm_immediate_disable = dce110_set_abm_immediate_disable, + .set_pipe = dce110_set_pipe, + .enable_lvds_link_output = dce110_enable_lvds_link_output, + .enable_tmds_link_output = dce110_enable_tmds_link_output, + .enable_dp_link_output = dce110_enable_dp_link_output, + .disable_link_output = dce110_disable_link_output, + .get_dcc_en_bits = dcn10_get_dcc_en_bits, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, +}; + +static const struct hwseq_private_funcs dcn10_private_funcs = { + .init_pipes = dcn10_init_pipes, + .update_plane_addr = dcn10_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .program_pipe = dcn10_program_pipe, + .update_mpcc = dcn10_update_mpcc, + .set_input_transfer_func = dcn10_set_input_transfer_func, + .set_output_transfer_func = dcn10_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn10_blank_pixel_data, + .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap, + .enable_stream_timing = dcn10_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .disable_stream_gating = NULL, + .enable_stream_gating = NULL, + .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = NULL, + .disable_vga = dcn10_disable_vga, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn10_plane_atomic_disable, + .plane_atomic_power_down = dcn10_plane_atomic_power_down, + .enable_power_gating_plane = dcn10_enable_power_gating_plane, + .dpp_pg_control = dcn10_dpp_pg_control, + .hubp_pg_control = dcn10_hubp_pg_control, + .dsc_pg_control = NULL, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, +}; + +void dcn10_hw_sequencer_construct(struct dc *dc) +{ + dc->hwss = dcn10_funcs; + dc->hwseq->funcs = dcn10_private_funcs; +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.h new file mode 100644 index 0000000000..8c6fd7b844 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN10_INIT_H__ +#define __DC_DCN10_INIT_H__ + +struct dc; + +void dcn10_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN10_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index e3f547e061..868a086c72 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -55,6 +55,7 @@ #include "inc/link_enc_cfg.h" #include "link_hwss.h" #include "link.h" +#include "dc_state_priv.h" #define DC_LOGGER \ dc_logger @@ -623,9 +624,9 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) } -void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx) { - bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom; + bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM; struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL; DC_LOGGER_INIT(dc->ctx->logger); @@ -847,7 +848,7 @@ enum dc_status dcn20_enable_stream_timing( /* TODO enable stream if timing changed */ /* TODO unblank stream if DP */ - if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) { if (pipe_ctx->stream_res.tg && pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable) pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg); } @@ -1368,8 +1369,14 @@ void dcn20_pipe_control_lock( } } -static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe) +static void dcn20_detect_pipe_changes(struct dc_state *old_state, + struct dc_state *new_state, + struct pipe_ctx *old_pipe, + struct pipe_ctx *new_pipe) { + bool old_is_phantom = dc_state_get_pipe_subvp_type(old_state, old_pipe) == SUBVP_PHANTOM; + bool new_is_phantom = dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM; + new_pipe->update_flags.raw = 0; /* If non-phantom pipe is being transitioned to a phantom pipe, @@ -1379,8 +1386,8 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx * be different). The post_unlock sequence will set the correct * update flags to enable the phantom pipe. */ - if (old_pipe->plane_state && !old_pipe->plane_state->is_phantom && - new_pipe->plane_state && new_pipe->plane_state->is_phantom) { + if (old_pipe->plane_state && !old_is_phantom && + new_pipe->plane_state && new_is_phantom) { new_pipe->update_flags.bits.disable = 1; return; } @@ -1405,6 +1412,10 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx new_pipe->update_flags.bits.scaler = 1; new_pipe->update_flags.bits.viewport = 1; new_pipe->update_flags.bits.det_size = 1; + if (new_pipe->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE && + new_pipe->stream_res.test_pattern_params.width != 0 && + new_pipe->stream_res.test_pattern_params.height != 0) + new_pipe->update_flags.bits.test_pattern_changed = 1; if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { new_pipe->update_flags.bits.odm = 1; new_pipe->update_flags.bits.global_sync = 1; @@ -1417,14 +1428,14 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx * The remove-add sequence of the phantom pipe always results in the pipe * being blanked in enable_stream_timing (DPG). */ - if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) + if (new_pipe->stream && dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM) new_pipe->update_flags.bits.enable = 1; /* Phantom pipes are effectively disabled, if the pipe was previously phantom * we have to enable */ - if (old_pipe->plane_state && old_pipe->plane_state->is_phantom && - new_pipe->plane_state && !new_pipe->plane_state->is_phantom) + if (old_pipe->plane_state && old_is_phantom && + new_pipe->plane_state && !new_is_phantom) new_pipe->update_flags.bits.enable = 1; if (old_pipe->plane_state && !new_pipe->plane_state) { @@ -1557,6 +1568,7 @@ static void dcn20_update_dchubp_dpp( struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct dccg *dccg = dc->res_pool->dccg; bool viewport_changed = false; + enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe_ctx); if (pipe_ctx->update_flags.bits.dppclk) dpp->funcs->dpp_dppclk_control(dpp, false, true); @@ -1702,7 +1714,7 @@ static void dcn20_update_dchubp_dpp( pipe_ctx->update_flags.bits.plane_changed || plane_state->update_flags.bits.addr_update) { if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && - pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) { + pipe_mall_type == SUBVP_MAIN) { union block_sequence_params params; params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv; @@ -1716,7 +1728,7 @@ static void dcn20_update_dchubp_dpp( if (pipe_ctx->update_flags.bits.enable) hubp->funcs->set_blank(hubp, false); /* If the stream paired with this plane is phantom, the plane is also phantom */ - if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM + if (pipe_ctx->stream && pipe_mall_type == SUBVP_PHANTOM && hubp->funcs->phantom_hubp_post_enable) hubp->funcs->phantom_hubp_post_enable(hubp); } @@ -1774,7 +1786,7 @@ static void dcn20_program_pipe( pipe_ctx->pipe_dlg_param.vupdate_offset, pipe_ctx->pipe_dlg_param.vupdate_width); - if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) + if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); pipe_ctx->stream_res.tg->funcs->set_vtg_params( @@ -1914,7 +1926,7 @@ void dcn20_program_front_end_for_ctx( /* Set pipe update flags and lock pipes */ for (i = 0; i < dc->res_pool->pipe_count; i++) - dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i], + dcn20_detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i], &context->res_ctx.pipe_ctx[i]); /* When disabling phantom pipes, turn on phantom OTG first (so we can get double @@ -1924,15 +1936,16 @@ void dcn20_program_front_end_for_ctx( struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream; if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream && - dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) { + dc_state_get_pipe_subvp_type(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) { struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg; if (tg->funcs->enable_crtc) { if (dc->hwss.blank_phantom) { int main_pipe_width, main_pipe_height; + struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(dc->current_state, dc->current_state->res_ctx.pipe_ctx[i].stream); - main_pipe_width = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.width; - main_pipe_height = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.height; + main_pipe_width = phantom_stream->dst.width; + main_pipe_height = phantom_stream->dst.height; dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height); } tg->funcs->enable_crtc(tg); @@ -1961,9 +1974,9 @@ void dcn20_program_front_end_for_ctx( * DET allocation. */ if (hubbub->funcs->program_det_size && (context->res_ctx.pipe_ctx[i].update_flags.bits.disable || - (context->res_ctx.pipe_ctx[i].plane_state && context->res_ctx.pipe_ctx[i].plane_state->is_phantom))) + (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM))) hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0); - hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + hws->funcs.plane_atomic_disconnect(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); } @@ -1996,7 +2009,7 @@ void dcn20_program_front_end_for_ctx( * but the MPO still exists until the double buffered update of the main pipe so we * will get a frame of underflow if the phantom pipe is programmed here. */ - if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) dcn20_program_pipe(dc, pipe, context); } @@ -2035,7 +2048,7 @@ void dcn20_post_unlock_program_front_end( for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) - dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); /* * If we are enabling a pipe, we need to wait for pending clear as this is a critical @@ -2047,7 +2060,7 @@ void dcn20_post_unlock_program_front_end( struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; // Don't check flip pending on phantom pipes if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable && - pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { + dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) { struct hubp *hubp = pipe->plane_res.hubp; int j = 0; for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us @@ -2070,7 +2083,7 @@ void dcn20_post_unlock_program_front_end( * programming sequence). */ while (pipe) { - if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { /* When turning on the phantom pipe we want to run through the * entire enable sequence, so apply all the "enable" flags. */ @@ -2140,17 +2153,17 @@ void dcn20_prepare_bandwidth( struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; // At optimize don't restore the original watermark value - if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) { + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) { context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U; break; } } /* program dchubbub watermarks: - * For assigning wm_optimized_required, use |= operator since we don't want + * For assigning optimized_required, use |= operator since we don't want * to clear the value if the optimize has not happened yet */ - dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub, + dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub, &context->bw_ctx.bw.dcn.watermarks, dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, false); @@ -2163,10 +2176,10 @@ void dcn20_prepare_bandwidth( if (hubbub->funcs->program_compbuf_size) { if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes) { compbuf_size_kb = context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes; - dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes); + dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes); } else { compbuf_size_kb = context->bw_ctx.bw.dcn.compbuf_size_kb; - dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb); + dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb); } hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, false); @@ -2184,7 +2197,7 @@ void dcn20_optimize_bandwidth( struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; // At optimize don't need to restore the original watermark value - if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) { + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) { context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U; break; } @@ -2218,7 +2231,8 @@ void dcn20_optimize_bandwidth( dc->clk_mgr, context, true); - if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) { + if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW && + !dc->debug.disable_extblankadj) { for (i = 0; i < dc->res_pool->pipe_count; ++i) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; @@ -2331,7 +2345,7 @@ bool dcn20_wait_for_blank_complete( int counter; for (counter = 0; counter < 1000; counter++) { - if (opp->funcs->dpg_is_blanked(opp)) + if (!opp->funcs->dpg_is_pending(opp)) break; udelay(100); @@ -2342,7 +2356,7 @@ bool dcn20_wait_for_blank_complete( return false; } - return true; + return opp->funcs->dpg_is_blanked(opp); } bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx) @@ -2548,7 +2562,7 @@ void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx) tg->funcs->setup_vertical_interrupt2(tg, start_line); } -static void dcn20_reset_back_end_for_pipe( +void dcn20_reset_back_end_for_pipe( struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context) @@ -2944,7 +2958,7 @@ void dcn20_fpga_init_hw(struct dc *dc) dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; /*to do*/ - hws->funcs.plane_atomic_disconnect(dc, pipe_ctx); + hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx); } /* initialize DWB pointer to MCIF_WB */ @@ -2961,7 +2975,7 @@ void dcn20_fpga_init_hw(struct dc *dc) for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - dc->hwss.disable_plane(dc, pipe_ctx); + dc->hwss.disable_plane(dc, context, pipe_ctx); pipe_ctx->stream_res.tg = NULL; pipe_ctx->plane_res.hubp = NULL; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h index ab02e4e9c8..d950b3e54e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h @@ -52,7 +52,7 @@ void dcn20_program_output_csc(struct dc *dc, void dcn20_enable_stream(struct pipe_ctx *pipe_ctx); void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings); -void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx); void dcn20_disable_pixel_data( struct dc *dc, struct pipe_ctx *pipe_ctx, @@ -84,6 +84,10 @@ enum dc_status dcn20_enable_stream_timing( void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_reset_back_end_for_pipe( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); void dcn20_init_blank( struct dc *dc, struct timing_generator *tg); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c new file mode 100644 index 0000000000..884e3e3233 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c @@ -0,0 +1,145 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce110/dce110_hwseq.h" +#include "dcn10/dcn10_hwseq.h" +#include "dcn20/dcn20_hwseq.h" + +#include "dcn20_init.h" + +static const struct hw_sequencer_funcs dcn20_funcs = { + .program_gamut_remap = dcn10_program_gamut_remap, + .init_hw = dcn10_init_hw, + .power_down_on_boot = dcn10_power_down_on_boot, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, + .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, + .update_plane_addr = dcn20_update_plane_addr, + .update_dchub = dcn10_update_dchub, + .update_pending_status = dcn10_update_pending_status, + .program_output_csc = dcn20_program_output_csc, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_vblanks_synchronization = dcn10_enable_vblanks_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dce110_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dcn20_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn20_unblank_stream, + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .disable_plane = dcn20_disable_plane, + .pipe_control_lock = dcn20_pipe_control_lock, + .interdependent_update_lock = dcn10_lock_all_pipes, + .cursor_lock = dcn10_cursor_lock, + .prepare_bandwidth = dcn20_prepare_bandwidth, + .optimize_bandwidth = dcn20_optimize_bandwidth, + .update_bandwidth = dcn20_update_bandwidth, + .set_drr = dcn10_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn10_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dce110_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_backlight_control = dce110_edp_backlight_control, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .program_triplebuffer = dcn20_program_triple_buffer, + .enable_writeback = dcn20_enable_writeback, + .disable_writeback = dcn20_disable_writeback, + .dmdata_status_done = dcn20_dmdata_status_done, + .program_dmdata_engine = dcn20_program_dmdata_engine, + .set_dmdata_attributes = dcn20_set_dmdata_attributes, + .init_sys_ctx = dcn20_init_sys_ctx, + .init_vm_ctx = dcn20_init_vm_ctx, + .set_flip_control_gsl = dcn20_set_flip_control_gsl, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, + .set_backlight_level = dce110_set_backlight_level, + .set_abm_immediate_disable = dce110_set_abm_immediate_disable, + .set_pipe = dce110_set_pipe, + .enable_lvds_link_output = dce110_enable_lvds_link_output, + .enable_tmds_link_output = dce110_enable_tmds_link_output, + .enable_dp_link_output = dce110_enable_dp_link_output, + .disable_link_output = dce110_disable_link_output, + .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, + .get_dcc_en_bits = dcn10_get_dcc_en_bits, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, +}; + +static const struct hwseq_private_funcs dcn20_private_funcs = { + .init_pipes = dcn10_init_pipes, + .update_plane_addr = dcn20_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .update_mpcc = dcn20_update_mpcc, + .set_input_transfer_func = dcn20_set_input_transfer_func, + .set_output_transfer_func = dcn20_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn20_blank_pixel_data, + .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, + .enable_stream_timing = dcn20_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .disable_stream_gating = dcn20_disable_stream_gating, + .enable_stream_gating = dcn20_enable_stream_gating, + .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = dcn20_init_blank, + .disable_vga = dcn20_disable_vga, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn20_plane_atomic_disable, + .plane_atomic_power_down = dcn10_plane_atomic_power_down, + .enable_power_gating_plane = dcn20_enable_power_gating_plane, + .dpp_pg_control = dcn20_dpp_pg_control, + .hubp_pg_control = dcn20_hubp_pg_control, + .update_odm = dcn20_update_odm, + .dsc_pg_control = dcn20_dsc_pg_control, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .wait_for_blank_complete = dcn20_wait_for_blank_complete, + .dccg_init = dcn20_dccg_init, + .set_blend_lut = dcn20_set_blend_lut, + .set_shaper_3dlut = dcn20_set_shaper_3dlut, +}; + +void dcn20_hw_sequencer_construct(struct dc *dc) +{ + dc->hwss = dcn20_funcs; + dc->hwseq->funcs = dcn20_private_funcs; + +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.h new file mode 100644 index 0000000000..12277797cd --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN20_INIT_H__ +#define __DC_DCN20_INIT_H__ + +struct dc; + +void dcn20_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN20_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c index d3fe6092f5..d5769f3887 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c @@ -320,7 +320,7 @@ void dcn201_init_hw(struct dc *dc) res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; pipe_ctx->stream_res.opp = res_pool->opps[i]; /*To do: number of MPCC != number of opp*/ - hws->funcs.plane_atomic_disconnect(dc, pipe_ctx); + hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx); } /* initialize DWB pointer to MCIF_WB */ @@ -337,7 +337,7 @@ void dcn201_init_hw(struct dc *dc) for (i = 0; i < res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - dc->hwss.disable_plane(dc, pipe_ctx); + dc->hwss.disable_plane(dc, context, pipe_ctx); pipe_ctx->stream_res.tg = NULL; pipe_ctx->plane_res.hubp = NULL; @@ -369,7 +369,9 @@ void dcn201_init_hw(struct dc *dc) } /* trigger HW to start disconnect plane from stream on the next vsync */ -void dcn201_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn201_plane_atomic_disconnect(struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx) { struct dce_hwseq *hws = dc->hwseq; struct hubp *hubp = pipe_ctx->plane_res.hubp; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h index 26cd62be64..6a50a9894b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h @@ -33,7 +33,7 @@ void dcn201_init_hw(struct dc *dc); void dcn201_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings); void dcn201_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx); -void dcn201_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn201_plane_atomic_disconnect(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx); void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn201_set_cursor_attribute(struct pipe_ctx *pipe_ctx); void dcn201_pipe_control_lock( diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c new file mode 100644 index 0000000000..a13bf6c938 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c @@ -0,0 +1,136 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce110/dce110_hwseq.h" +#include "dcn10/dcn10_hwseq.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn201/dcn201_hwseq.h" +#include "dcn201_init.h" + +static const struct hw_sequencer_funcs dcn201_funcs = { + .program_gamut_remap = dcn10_program_gamut_remap, + .init_hw = dcn201_init_hw, + .power_down_on_boot = NULL, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, + .post_unlock_program_front_end = dcn10_post_unlock_program_front_end, + .update_plane_addr = dcn201_update_plane_addr, + .update_dchub = dcn10_update_dchub, + .update_pending_status = dcn10_update_pending_status, + .program_output_csc = dcn20_program_output_csc, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dce110_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dce110_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn201_unblank_stream, + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .disable_plane = dcn10_disable_plane, + .pipe_control_lock = dcn201_pipe_control_lock, + .interdependent_update_lock = dcn10_lock_all_pipes, + .cursor_lock = dcn10_cursor_lock, + .prepare_bandwidth = dcn20_prepare_bandwidth, + .optimize_bandwidth = dcn20_optimize_bandwidth, + .update_bandwidth = dcn20_update_bandwidth, + .set_drr = dcn10_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn10_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dce110_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_backlight_control = dce110_edp_backlight_control, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .program_triplebuffer = dcn20_program_triple_buffer, + .dmdata_status_done = dcn20_dmdata_status_done, + .set_dmdata_attributes = dcn201_set_dmdata_attributes, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn201_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .set_backlight_level = dce110_set_backlight_level, + .set_abm_immediate_disable = dce110_set_abm_immediate_disable, + .set_pipe = dce110_set_pipe, + .enable_lvds_link_output = dce110_enable_lvds_link_output, + .enable_tmds_link_output = dce110_enable_tmds_link_output, + .enable_dp_link_output = dce110_enable_dp_link_output, + .disable_link_output = dce110_disable_link_output, + .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, +}; + +static const struct hwseq_private_funcs dcn201_private_funcs = { + .init_pipes = NULL, + .update_plane_addr = dcn201_update_plane_addr, + .plane_atomic_disconnect = dcn201_plane_atomic_disconnect, + .program_pipe = dcn10_program_pipe, + .update_mpcc = dcn201_update_mpcc, + .set_input_transfer_func = dcn20_set_input_transfer_func, + .set_output_transfer_func = dcn20_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn20_blank_pixel_data, + .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap, + .enable_stream_timing = dcn20_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .disable_stream_gating = NULL, + .enable_stream_gating = NULL, + .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = dcn201_init_blank, + .disable_vga = dcn10_disable_vga, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn10_plane_atomic_disable, + .plane_atomic_power_down = dcn10_plane_atomic_power_down, + .enable_power_gating_plane = dcn10_enable_power_gating_plane, + .dpp_pg_control = dcn10_dpp_pg_control, + .hubp_pg_control = dcn10_hubp_pg_control, + .dsc_pg_control = NULL, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .wait_for_blank_complete = dcn20_wait_for_blank_complete, + .dccg_init = dcn20_dccg_init, + .set_blend_lut = dcn20_set_blend_lut, + .set_shaper_3dlut = dcn20_set_shaper_3dlut, +}; + +void dcn201_hw_sequencer_construct(struct dc *dc) +{ + dc->hwss = dcn201_funcs; + dc->hwseq->funcs = dcn201_private_funcs; +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.h new file mode 100644 index 0000000000..1168887b03 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN201_INIT_H__ +#define __DC_DCN201_INIT_H__ + +struct dc; + +void dcn201_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN201_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c index 5c7f380a84..7252f5f781 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c @@ -211,7 +211,7 @@ void dcn21_set_pipe(struct pipe_ctx *pipe_ctx) struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu; uint32_t otg_inst; - if (!abm && !tg && !panel_cntl) + if (!abm || !tg || !panel_cntl) return; otg_inst = tg->inst; @@ -245,7 +245,7 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl; uint32_t otg_inst; - if (!abm && !tg && !panel_cntl) + if (!abm || !tg || !panel_cntl) return false; otg_inst = tg->inst; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c new file mode 100644 index 0000000000..18249c6b6d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c @@ -0,0 +1,151 @@ +/* + * Copyright 2016-2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce110/dce110_hwseq.h" +#include "dcn10/dcn10_hwseq.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn21/dcn21_hwseq.h" + +#include "dcn21_init.h" + +static const struct hw_sequencer_funcs dcn21_funcs = { + .program_gamut_remap = dcn10_program_gamut_remap, + .init_hw = dcn10_init_hw, + .power_down_on_boot = dcn10_power_down_on_boot, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, + .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, + .update_plane_addr = dcn20_update_plane_addr, + .update_dchub = dcn10_update_dchub, + .update_pending_status = dcn10_update_pending_status, + .program_output_csc = dcn20_program_output_csc, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dce110_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dcn20_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn20_unblank_stream, + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .disable_plane = dcn20_disable_plane, + .pipe_control_lock = dcn20_pipe_control_lock, + .interdependent_update_lock = dcn10_lock_all_pipes, + .cursor_lock = dcn10_cursor_lock, + .prepare_bandwidth = dcn20_prepare_bandwidth, + .optimize_bandwidth = dcn20_optimize_bandwidth, + .update_bandwidth = dcn20_update_bandwidth, + .set_drr = dcn10_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn10_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dce110_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_backlight_control = dce110_edp_backlight_control, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .program_triplebuffer = dcn20_program_triple_buffer, + .enable_writeback = dcn20_enable_writeback, + .disable_writeback = dcn20_disable_writeback, + .dmdata_status_done = dcn20_dmdata_status_done, + .program_dmdata_engine = dcn20_program_dmdata_engine, + .set_dmdata_attributes = dcn20_set_dmdata_attributes, + .init_sys_ctx = dcn21_init_sys_ctx, + .init_vm_ctx = dcn20_init_vm_ctx, + .set_flip_control_gsl = dcn20_set_flip_control_gsl, + .optimize_pwr_state = dcn21_optimize_pwr_state, + .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, + .power_down = dce110_power_down, + .set_backlight_level = dcn21_set_backlight_level, + .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, + .set_pipe = dcn21_set_pipe, + .enable_lvds_link_output = dce110_enable_lvds_link_output, + .enable_tmds_link_output = dce110_enable_tmds_link_output, + .enable_dp_link_output = dce110_enable_dp_link_output, + .disable_link_output = dce110_disable_link_output, + .is_abm_supported = dcn21_is_abm_supported, + .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, + .get_dcc_en_bits = dcn10_get_dcc_en_bits, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, +}; + +static const struct hwseq_private_funcs dcn21_private_funcs = { + .init_pipes = dcn10_init_pipes, + .update_plane_addr = dcn20_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .update_mpcc = dcn20_update_mpcc, + .set_input_transfer_func = dcn20_set_input_transfer_func, + .set_output_transfer_func = dcn20_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn20_blank_pixel_data, + .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, + .enable_stream_timing = dcn20_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .disable_stream_gating = dcn20_disable_stream_gating, + .enable_stream_gating = dcn20_enable_stream_gating, + .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = dcn20_init_blank, + .disable_vga = dcn20_disable_vga, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn20_plane_atomic_disable, + .plane_atomic_power_down = dcn10_plane_atomic_power_down, + .enable_power_gating_plane = dcn20_enable_power_gating_plane, + .dpp_pg_control = dcn20_dpp_pg_control, + .hubp_pg_control = dcn20_hubp_pg_control, + .update_odm = dcn20_update_odm, + .dsc_pg_control = dcn20_dsc_pg_control, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa, + .wait_for_blank_complete = dcn20_wait_for_blank_complete, + .dccg_init = dcn20_dccg_init, + .set_blend_lut = dcn20_set_blend_lut, + .set_shaper_3dlut = dcn20_set_shaper_3dlut, + .PLAT_58856_wa = dcn21_PLAT_58856_wa, +}; + +void dcn21_hw_sequencer_construct(struct dc *dc) +{ + dc->hwss = dcn21_funcs; + dc->hwseq->funcs = dcn21_private_funcs; + +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.h new file mode 100644 index 0000000000..3ed2429264 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN21_INIT_H__ +#define __DC_DCN21_INIT_H__ + +struct dc; + +void dcn21_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN20_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index c89149d153..55cf4c9e6a 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -51,7 +51,7 @@ #include "dcn20/dcn20_hwseq.h" #include "dcn30/dcn30_resource.h" #include "link.h" - +#include "dc_state_priv.h" @@ -367,6 +367,10 @@ void dcn30_enable_writeback( DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\ __func__, wb_info->dwb_pipe_inst,\ wb_info->mpcc_inst); + + /* Warmup interface */ + dcn30_mmhubbub_warmup(dc, 1, wb_info); + /* Update writeback pipe */ dcn30_set_writeback(dc, wb_info, context); @@ -472,6 +476,7 @@ void dcn30_init_hw(struct dc *dc) int i; int edp_num; uint32_t backlight = MAX_BACKLIGHT_LEVEL; + uint32_t user_level = MAX_BACKLIGHT_LEVEL; if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); @@ -608,13 +613,15 @@ void dcn30_init_hw(struct dc *dc) for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; - if (link->panel_cntl) + if (link->panel_cntl) { backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); + user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL; + } } for (i = 0; i < dc->res_pool->pipe_count; i++) { if (abms[i] != NULL) - abms[i]->funcs->abm_init(abms[i], backlight); + abms[i]->funcs->abm_init(abms[i], backlight, user_level); } /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ @@ -972,7 +979,7 @@ void dcn30_hardware_release(struct dc *dc) if (!pipe->stream) continue; - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_MAIN) { subvp_in_use = true; break; } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c new file mode 100644 index 0000000000..9894caedff --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c @@ -0,0 +1,154 @@ +/* + * Copyright 2016-2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce110/dce110_hwseq.h" +#include "dcn10/dcn10_hwseq.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn21/dcn21_hwseq.h" +#include "dcn30/dcn30_hwseq.h" + +#include "dcn30_init.h" + +static const struct hw_sequencer_funcs dcn30_funcs = { + .program_gamut_remap = dcn30_program_gamut_remap, + .init_hw = dcn30_init_hw, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, + .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, + .update_plane_addr = dcn20_update_plane_addr, + .update_dchub = dcn10_update_dchub, + .update_pending_status = dcn10_update_pending_status, + .program_output_csc = dcn20_program_output_csc, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dcn30_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dcn20_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn20_unblank_stream, + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .disable_plane = dcn20_disable_plane, + .disable_pixel_data = dcn20_disable_pixel_data, + .pipe_control_lock = dcn20_pipe_control_lock, + .interdependent_update_lock = dcn10_lock_all_pipes, + .cursor_lock = dcn10_cursor_lock, + .prepare_bandwidth = dcn30_prepare_bandwidth, + .optimize_bandwidth = dcn20_optimize_bandwidth, + .update_bandwidth = dcn20_update_bandwidth, + .set_drr = dcn10_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn30_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dcn30_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_backlight_control = dce110_edp_backlight_control, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .edp_wait_for_T12 = dce110_edp_wait_for_T12, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .program_triplebuffer = dcn20_program_triple_buffer, + .enable_writeback = dcn30_enable_writeback, + .disable_writeback = dcn30_disable_writeback, + .update_writeback = dcn30_update_writeback, + .mmhubbub_warmup = dcn30_mmhubbub_warmup, + .dmdata_status_done = dcn20_dmdata_status_done, + .program_dmdata_engine = dcn30_program_dmdata_engine, + .set_dmdata_attributes = dcn20_set_dmdata_attributes, + .init_sys_ctx = dcn20_init_sys_ctx, + .init_vm_ctx = dcn20_init_vm_ctx, + .set_flip_control_gsl = dcn20_set_flip_control_gsl, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, + .apply_idle_power_optimizations = dcn30_apply_idle_power_optimizations, + .does_plane_fit_in_mall = dcn30_does_plane_fit_in_mall, + .set_backlight_level = dcn21_set_backlight_level, + .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, + .hardware_release = dcn30_hardware_release, + .set_pipe = dcn21_set_pipe, + .enable_lvds_link_output = dce110_enable_lvds_link_output, + .enable_tmds_link_output = dce110_enable_tmds_link_output, + .enable_dp_link_output = dce110_enable_dp_link_output, + .disable_link_output = dce110_disable_link_output, + .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, + .get_dcc_en_bits = dcn10_get_dcc_en_bits, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, + .is_abm_supported = dcn21_is_abm_supported +}; + +static const struct hwseq_private_funcs dcn30_private_funcs = { + .init_pipes = dcn10_init_pipes, + .update_plane_addr = dcn20_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .update_mpcc = dcn20_update_mpcc, + .set_input_transfer_func = dcn30_set_input_transfer_func, + .set_output_transfer_func = dcn30_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn20_blank_pixel_data, + .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, + .enable_stream_timing = dcn20_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .disable_stream_gating = dcn20_disable_stream_gating, + .enable_stream_gating = dcn20_enable_stream_gating, + .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = dcn20_init_blank, + .disable_vga = dcn20_disable_vga, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn20_plane_atomic_disable, + .plane_atomic_power_down = dcn10_plane_atomic_power_down, + .enable_power_gating_plane = dcn20_enable_power_gating_plane, + .dpp_pg_control = dcn20_dpp_pg_control, + .hubp_pg_control = dcn20_hubp_pg_control, + .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, + .update_odm = dcn20_update_odm, + .dsc_pg_control = dcn20_dsc_pg_control, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .wait_for_blank_complete = dcn20_wait_for_blank_complete, + .dccg_init = dcn20_dccg_init, + .set_blend_lut = dcn30_set_blend_lut, + .set_shaper_3dlut = dcn20_set_shaper_3dlut, +}; + +void dcn30_hw_sequencer_construct(struct dc *dc) +{ + dc->hwss = dcn30_funcs; + dc->hwseq->funcs = dcn30_private_funcs; + +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.h new file mode 100644 index 0000000000..c280ff90bf --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN30_INIT_H__ +#define __DC_DCN30_INIT_H__ + +struct dc; + +void dcn30_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN30_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c new file mode 100644 index 0000000000..6477009ce0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c @@ -0,0 +1,154 @@ +/* + * Copyright 2016-2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce110/dce110_hwseq.h" +#include "dcn10/dcn10_hwseq.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn21/dcn21_hwseq.h" +#include "dcn30/dcn30_hwseq.h" +#include "dcn301/dcn301_hwseq.h" + +#include "dcn301_init.h" + +static const struct hw_sequencer_funcs dcn301_funcs = { + .program_gamut_remap = dcn30_program_gamut_remap, + .init_hw = dcn10_init_hw, + .power_down_on_boot = dcn10_power_down_on_boot, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, + .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, + .update_plane_addr = dcn20_update_plane_addr, + .update_dchub = dcn10_update_dchub, + .update_pending_status = dcn10_update_pending_status, + .program_output_csc = dcn20_program_output_csc, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dcn30_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dcn20_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn20_unblank_stream, +#ifdef FREESYNC_POWER_OPTIMIZE + .are_streams_coarse_grain_aligned = dcn20_are_streams_coarse_grain_aligned, +#endif + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .disable_plane = dcn20_disable_plane, + .pipe_control_lock = dcn20_pipe_control_lock, + .interdependent_update_lock = dcn10_lock_all_pipes, + .cursor_lock = dcn10_cursor_lock, + .prepare_bandwidth = dcn20_prepare_bandwidth, + .optimize_bandwidth = dcn20_optimize_bandwidth, + .update_bandwidth = dcn20_update_bandwidth, + .set_drr = dcn10_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn10_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dcn30_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_backlight_control = dce110_edp_backlight_control, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .program_triplebuffer = dcn20_program_triple_buffer, + .enable_writeback = dcn30_enable_writeback, + .disable_writeback = dcn30_disable_writeback, + .update_writeback = dcn30_update_writeback, + .mmhubbub_warmup = dcn30_mmhubbub_warmup, + .dmdata_status_done = dcn20_dmdata_status_done, + .program_dmdata_engine = dcn30_program_dmdata_engine, + .set_dmdata_attributes = dcn20_set_dmdata_attributes, + .init_sys_ctx = dcn20_init_sys_ctx, + .init_vm_ctx = dcn20_init_vm_ctx, + .set_flip_control_gsl = dcn20_set_flip_control_gsl, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, + .set_backlight_level = dcn21_set_backlight_level, + .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, + .set_pipe = dcn21_set_pipe, + .enable_lvds_link_output = dce110_enable_lvds_link_output, + .enable_tmds_link_output = dce110_enable_tmds_link_output, + .enable_dp_link_output = dce110_enable_dp_link_output, + .disable_link_output = dce110_disable_link_output, + .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, + .get_dcc_en_bits = dcn10_get_dcc_en_bits, + .optimize_pwr_state = dcn21_optimize_pwr_state, + .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, +}; + +static const struct hwseq_private_funcs dcn301_private_funcs = { + .init_pipes = dcn10_init_pipes, + .update_plane_addr = dcn20_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .update_mpcc = dcn20_update_mpcc, + .set_input_transfer_func = dcn30_set_input_transfer_func, + .set_output_transfer_func = dcn30_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn20_blank_pixel_data, + .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, + .enable_stream_timing = dcn20_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .disable_stream_gating = dcn20_disable_stream_gating, + .enable_stream_gating = dcn20_enable_stream_gating, + .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = dcn20_init_blank, + .disable_vga = dcn20_disable_vga, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn20_plane_atomic_disable, + .plane_atomic_power_down = dcn10_plane_atomic_power_down, + .enable_power_gating_plane = dcn20_enable_power_gating_plane, + .dpp_pg_control = dcn20_dpp_pg_control, + .hubp_pg_control = dcn20_hubp_pg_control, + .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, + .update_odm = dcn20_update_odm, + .dsc_pg_control = dcn20_dsc_pg_control, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .wait_for_blank_complete = dcn20_wait_for_blank_complete, + .dccg_init = dcn20_dccg_init, + .set_blend_lut = dcn30_set_blend_lut, + .set_shaper_3dlut = dcn20_set_shaper_3dlut, +}; + +void dcn301_hw_sequencer_construct(struct dc *dc) +{ + dc->hwss = dcn301_funcs; + dc->hwseq->funcs = dcn301_private_funcs; +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h new file mode 100644 index 0000000000..0bca48ccbf --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN30_INIT_H__ +#define __DC_DCN30_INIT_H__ + +struct dc; + +void dcn301_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN30_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.c new file mode 100644 index 0000000000..637f9514d3 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.c @@ -0,0 +1,41 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dcn302/dcn302_hwseq.h" + +#include "dcn30/dcn30_init.h" + +#include "dc.h" + +#include "dcn302_init.h" + +void dcn302_hw_sequencer_construct(struct dc *dc) +{ + dcn30_hw_sequencer_construct(dc); + + dc->hwseq->funcs.dpp_pg_control = dcn302_dpp_pg_control; + dc->hwseq->funcs.hubp_pg_control = dcn302_hubp_pg_control; + dc->hwseq->funcs.dsc_pg_control = dcn302_dsc_pg_control; +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.h new file mode 100644 index 0000000000..899587b93a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN302_INIT_H__ +#define __DC_DCN302_INIT_H__ + +struct dc; + +void dcn302_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN302_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.c new file mode 100644 index 0000000000..edb4d68b81 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + */ + +#include "dcn303/dcn303_hwseq.h" +#include "dcn30/dcn30_init.h" +#include "dc.h" + +#include "dcn303_init.h" + +void dcn303_hw_sequencer_construct(struct dc *dc) +{ + dcn30_hw_sequencer_construct(dc); + + dc->hwseq->funcs.dpp_pg_control = dcn303_dpp_pg_control; + dc->hwseq->funcs.hubp_pg_control = dcn303_hubp_pg_control; + dc->hwseq->funcs.dsc_pg_control = dcn303_dsc_pg_control; + dc->hwseq->funcs.enable_power_gating_plane = dcn303_enable_power_gating_plane; +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.h new file mode 100644 index 0000000000..4949981126 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.h @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + */ + +#ifndef __DC_DCN303_INIT_H__ +#define __DC_DCN303_INIT_H__ + +struct dc; + +void dcn303_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN303_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index 3a70a3cbc2..7423880fab 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -96,7 +96,8 @@ static void enable_memory_low_power(struct dc *dc) if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) { // Power down VPGs for (i = 0; i < dc->res_pool->stream_enc_count; i++) - dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg); + if (dc->res_pool->stream_enc[i]->vpg) + dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg); #if defined(CONFIG_DRM_AMD_DC_FP) for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg); @@ -112,6 +113,7 @@ void dcn31_init_hw(struct dc *dc) struct dc_bios *dcb = dc->ctx->dc_bios; struct resource_pool *res_pool = dc->res_pool; uint32_t backlight = MAX_BACKLIGHT_LEVEL; + uint32_t user_level = MAX_BACKLIGHT_LEVEL; int i; if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) @@ -223,13 +225,15 @@ void dcn31_init_hw(struct dc *dc) for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; - if (link->panel_cntl) + if (link->panel_cntl) { backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); + user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL; + } } for (i = 0; i < dc->res_pool->pipe_count; i++) { if (abms[i] != NULL) - abms[i]->funcs->abm_init(abms[i], backlight); + abms[i]->funcs->abm_init(abms[i], backlight, user_level); } /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c new file mode 100644 index 0000000000..669f524bd0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c @@ -0,0 +1,157 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce110/dce110_hwseq.h" +#include "dcn10/dcn10_hwseq.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn21/dcn21_hwseq.h" +#include "dcn30/dcn30_hwseq.h" +#include "dcn301/dcn301_hwseq.h" +#include "dcn31/dcn31_hwseq.h" + +#include "dcn31_init.h" + +static const struct hw_sequencer_funcs dcn31_funcs = { + .program_gamut_remap = dcn30_program_gamut_remap, + .init_hw = dcn31_init_hw, + .power_down_on_boot = dcn10_power_down_on_boot, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, + .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, + .update_plane_addr = dcn20_update_plane_addr, + .update_dchub = dcn10_update_dchub, + .update_pending_status = dcn10_update_pending_status, + .program_output_csc = dcn20_program_output_csc, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dcn31_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dcn20_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn20_unblank_stream, + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .disable_plane = dcn20_disable_plane, + .disable_pixel_data = dcn20_disable_pixel_data, + .pipe_control_lock = dcn20_pipe_control_lock, + .interdependent_update_lock = dcn10_lock_all_pipes, + .cursor_lock = dcn10_cursor_lock, + .prepare_bandwidth = dcn20_prepare_bandwidth, + .optimize_bandwidth = dcn20_optimize_bandwidth, + .update_bandwidth = dcn20_update_bandwidth, + .set_drr = dcn10_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn30_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dcn30_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_backlight_control = dce110_edp_backlight_control, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_T12 = dce110_edp_wait_for_T12, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .program_triplebuffer = dcn20_program_triple_buffer, + .enable_writeback = dcn30_enable_writeback, + .disable_writeback = dcn30_disable_writeback, + .update_writeback = dcn30_update_writeback, + .mmhubbub_warmup = dcn30_mmhubbub_warmup, + .dmdata_status_done = dcn20_dmdata_status_done, + .program_dmdata_engine = dcn30_program_dmdata_engine, + .set_dmdata_attributes = dcn20_set_dmdata_attributes, + .init_sys_ctx = dcn31_init_sys_ctx, + .init_vm_ctx = dcn20_init_vm_ctx, + .set_flip_control_gsl = dcn20_set_flip_control_gsl, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, + .power_down = dce110_power_down, + .set_backlight_level = dcn21_set_backlight_level, + .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, + .set_pipe = dcn21_set_pipe, + .enable_lvds_link_output = dce110_enable_lvds_link_output, + .enable_tmds_link_output = dce110_enable_tmds_link_output, + .enable_dp_link_output = dce110_enable_dp_link_output, + .disable_link_output = dce110_disable_link_output, + .z10_restore = dcn31_z10_restore, + .z10_save_init = dcn31_z10_save_init, + .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, + .optimize_pwr_state = dcn21_optimize_pwr_state, + .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, +}; + +static const struct hwseq_private_funcs dcn31_private_funcs = { + .init_pipes = dcn10_init_pipes, + .update_plane_addr = dcn20_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .update_mpcc = dcn20_update_mpcc, + .set_input_transfer_func = dcn30_set_input_transfer_func, + .set_output_transfer_func = dcn30_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn20_blank_pixel_data, + .reset_hw_ctx_wrap = dcn31_reset_hw_ctx_wrap, + .enable_stream_timing = dcn20_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .disable_stream_gating = dcn20_disable_stream_gating, + .enable_stream_gating = dcn20_enable_stream_gating, + .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = dcn20_init_blank, + .disable_vga = dcn20_disable_vga, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn20_plane_atomic_disable, + .plane_atomic_power_down = dcn10_plane_atomic_power_down, + .enable_power_gating_plane = dcn31_enable_power_gating_plane, + .hubp_pg_control = dcn31_hubp_pg_control, + .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, + .update_odm = dcn20_update_odm, + .dsc_pg_control = dcn31_dsc_pg_control, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .wait_for_blank_complete = dcn20_wait_for_blank_complete, + .dccg_init = dcn20_dccg_init, + .set_blend_lut = dcn30_set_blend_lut, + .set_shaper_3dlut = dcn20_set_shaper_3dlut, + .setup_hpo_hw_control = dcn31_setup_hpo_hw_control, +}; + +void dcn31_hw_sequencer_construct(struct dc *dc) +{ + dc->hwss = dcn31_funcs; + dc->hwseq->funcs = dcn31_private_funcs; + +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.h new file mode 100644 index 0000000000..a3db08c8bd --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN31_INIT_H__ +#define __DC_DCN31_INIT_H__ + +struct dc; + +void dcn31_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN31_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c new file mode 100644 index 0000000000..ccb7e317e8 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce110/dce110_hwseq.h" +#include "dcn10/dcn10_hwseq.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn21/dcn21_hwseq.h" +#include "dcn30/dcn30_hwseq.h" +#include "dcn301/dcn301_hwseq.h" +#include "dcn31/dcn31_hwseq.h" +#include "dcn314/dcn314_hwseq.h" + +#include "dcn314_init.h" + +static const struct hw_sequencer_funcs dcn314_funcs = { + .program_gamut_remap = dcn30_program_gamut_remap, + .init_hw = dcn31_init_hw, + .power_down_on_boot = dcn10_power_down_on_boot, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, + .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, + .update_plane_addr = dcn20_update_plane_addr, + .update_dchub = dcn10_update_dchub, + .update_pending_status = dcn10_update_pending_status, + .program_output_csc = dcn20_program_output_csc, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dcn31_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dcn20_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn20_unblank_stream, + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .disable_plane = dcn20_disable_plane, + .disable_pixel_data = dcn20_disable_pixel_data, + .pipe_control_lock = dcn20_pipe_control_lock, + .interdependent_update_lock = dcn10_lock_all_pipes, + .cursor_lock = dcn10_cursor_lock, + .prepare_bandwidth = dcn20_prepare_bandwidth, + .optimize_bandwidth = dcn20_optimize_bandwidth, + .update_bandwidth = dcn20_update_bandwidth, + .set_drr = dcn10_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn30_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dcn30_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_backlight_control = dce110_edp_backlight_control, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_T12 = dce110_edp_wait_for_T12, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .program_triplebuffer = dcn20_program_triple_buffer, + .enable_writeback = dcn30_enable_writeback, + .disable_writeback = dcn30_disable_writeback, + .update_writeback = dcn30_update_writeback, + .mmhubbub_warmup = dcn30_mmhubbub_warmup, + .dmdata_status_done = dcn20_dmdata_status_done, + .program_dmdata_engine = dcn30_program_dmdata_engine, + .set_dmdata_attributes = dcn20_set_dmdata_attributes, + .init_sys_ctx = dcn31_init_sys_ctx, + .init_vm_ctx = dcn20_init_vm_ctx, + .set_flip_control_gsl = dcn20_set_flip_control_gsl, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, + .power_down = dce110_power_down, + .set_backlight_level = dcn21_set_backlight_level, + .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, + .set_pipe = dcn21_set_pipe, + .enable_lvds_link_output = dce110_enable_lvds_link_output, + .enable_tmds_link_output = dce110_enable_tmds_link_output, + .enable_dp_link_output = dce110_enable_dp_link_output, + .disable_link_output = dcn314_disable_link_output, + .z10_restore = dcn31_z10_restore, + .z10_save_init = dcn31_z10_save_init, + .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, + .optimize_pwr_state = dcn21_optimize_pwr_state, + .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, +}; + +static const struct hwseq_private_funcs dcn314_private_funcs = { + .init_pipes = dcn10_init_pipes, + .update_plane_addr = dcn20_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .update_mpcc = dcn20_update_mpcc, + .set_input_transfer_func = dcn30_set_input_transfer_func, + .set_output_transfer_func = dcn30_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn20_blank_pixel_data, + .reset_hw_ctx_wrap = dcn31_reset_hw_ctx_wrap, + .enable_stream_timing = dcn20_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .disable_stream_gating = dcn20_disable_stream_gating, + .enable_stream_gating = dcn20_enable_stream_gating, + .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = dcn20_init_blank, + .disable_vga = dcn20_disable_vga, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn20_plane_atomic_disable, + .plane_atomic_power_down = dcn10_plane_atomic_power_down, + .enable_power_gating_plane = dcn314_enable_power_gating_plane, + .dpp_root_clock_control = dcn314_dpp_root_clock_control, + .hubp_pg_control = dcn31_hubp_pg_control, + .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, + .update_odm = dcn314_update_odm, + .dsc_pg_control = dcn314_dsc_pg_control, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .wait_for_blank_complete = dcn20_wait_for_blank_complete, + .dccg_init = dcn20_dccg_init, + .set_blend_lut = dcn30_set_blend_lut, + .set_shaper_3dlut = dcn20_set_shaper_3dlut, + .setup_hpo_hw_control = dcn31_setup_hpo_hw_control, + .calculate_dccg_k1_k2_values = dcn314_calculate_dccg_k1_k2_values, + .set_pixels_per_cycle = dcn314_set_pixels_per_cycle, + .resync_fifo_dccg_dio = dcn314_resync_fifo_dccg_dio, +}; + +void dcn314_hw_sequencer_construct(struct dc *dc) +{ + dc->hwss = dcn314_funcs; + dc->hwseq->funcs = dcn314_private_funcs; + +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.h new file mode 100644 index 0000000000..8f92e66577 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN314_INIT_H__ +#define __DC_DCN314_INIT_H__ + +struct dc; + +void dcn314_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN314_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index 580afb0088..7668229438 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -51,6 +51,7 @@ #include "dcn32/dcn32_resource.h" #include "link.h" #include "../dcn20/dcn20_hwseq.h" +#include "dc_state_priv.h" #define DC_LOGGER_INIT(logger) @@ -348,8 +349,7 @@ void dcn32_commit_subvp_config(struct dc *dc, struct dc_state *context) for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.paired_stream && - pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) { + if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) { // There is at least 1 SubVP pipe, so enable SubVP enable_subvp = true; break; @@ -375,18 +375,20 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc, bool subvp_immediate_flip = false; bool subvp_in_use = false; struct pipe_ctx *pipe; + enum mall_stream_type pipe_mall_type = SUBVP_NONE; for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; + pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); - if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + if (pipe->stream && pipe->plane_state && pipe_mall_type == SUBVP_MAIN) { subvp_in_use = true; break; } } if (top_pipe_to_program && top_pipe_to_program->stream && top_pipe_to_program->plane_state) { - if (top_pipe_to_program->stream->mall_stream_config.type == SUBVP_MAIN && + if (dc_state_get_pipe_subvp_type(context, top_pipe_to_program) == SUBVP_MAIN && top_pipe_to_program->plane_state->flip_immediate) subvp_immediate_flip = true; } @@ -398,7 +400,7 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc, if (!lock) { for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &context->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN && + if (pipe->stream && pipe->plane_state && pipe_mall_type == SUBVP_MAIN && should_lock_all_pipes) pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); } @@ -416,14 +418,7 @@ void dcn32_subvp_pipe_control_lock_fast(union block_sequence_params *params) { struct dc *dc = params->subvp_pipe_control_lock_fast_params.dc; bool lock = params->subvp_pipe_control_lock_fast_params.lock; - struct pipe_ctx *pipe_ctx = params->subvp_pipe_control_lock_fast_params.pipe_ctx; - bool subvp_immediate_flip = false; - - if (pipe_ctx && pipe_ctx->stream && pipe_ctx->plane_state) { - if (pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN && - pipe_ctx->plane_state->flip_immediate) - subvp_immediate_flip = true; - } + bool subvp_immediate_flip = params->subvp_pipe_control_lock_fast_params.subvp_immediate_flip; // Don't need to lock for DRR VSYNC flips -- FW will wait for DRR pending update cleared. if (subvp_immediate_flip) { @@ -609,7 +604,7 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context) struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct hubp *hubp = pipe->plane_res.hubp; - if (!pipe->stream || !(pipe->stream->mall_stream_config.type == SUBVP_MAIN || + if (!pipe->stream || !(dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN || pipe->stream->fpo_in_use)) { if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) hubp->funcs->hubp_update_force_pstate_disallow(hubp, false); @@ -624,7 +619,7 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context) struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct hubp *hubp = pipe->plane_res.hubp; - if (pipe->stream && (pipe->stream->mall_stream_config.type == SUBVP_MAIN || + if (pipe->stream && (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN || pipe->stream->fpo_in_use)) { if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) hubp->funcs->hubp_update_force_pstate_disallow(hubp, true); @@ -671,8 +666,8 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context) if (cursor_size > 16384) cache_cursor = true; - if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - hubp->funcs->hubp_update_mall_sel(hubp, 1, false); + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { + hubp->funcs->hubp_update_mall_sel(hubp, 1, false); } else { // MALL not supported with Stereo3D hubp->funcs->hubp_update_mall_sel(hubp, @@ -714,9 +709,8 @@ void dcn32_program_mall_pipe_config(struct dc *dc, struct dc_state *context) * see if CURSOR_REQ_MODE will be back to 1 for SubVP * when it should be 0 for MPO */ - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) hubp->funcs->hubp_prepare_subvp_buffering(hubp, true); - } } } } @@ -759,6 +753,7 @@ void dcn32_init_hw(struct dc *dc) int i; int edp_num; uint32_t backlight = MAX_BACKLIGHT_LEVEL; + uint32_t user_level = MAX_BACKLIGHT_LEVEL; if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); @@ -913,13 +908,15 @@ void dcn32_init_hw(struct dc *dc) for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; - if (link->panel_cntl) + if (link->panel_cntl) { backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); + user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL; + } } for (i = 0; i < dc->res_pool->pipe_count; i++) { if (abms[i] != NULL && abms[i]->funcs != NULL) - abms[i]->funcs->abm_init(abms[i], backlight); + abms[i]->funcs->abm_init(abms[i], backlight, user_level); } /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ @@ -1194,7 +1191,7 @@ void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_ continue; if ((pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal)) - && pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { + && dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_PHANTOM) { pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg); reset_sync_context_for_pipe(dc, context, i); otg_disabled[i] = true; @@ -1345,8 +1342,8 @@ void dcn32_update_phantom_vp_position(struct dc *dc, for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN && - pipe->stream->mall_stream_config.paired_stream == phantom_pipe->stream) { + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN && + dc_state_get_paired_subvp_stream(context, pipe->stream) == phantom_pipe->stream) { if (pipe->plane_state && pipe->plane_state->update_flags.bits.position_change) { phantom_plane->src_rect.x = pipe->plane_state->src_rect.x; @@ -1371,21 +1368,19 @@ void dcn32_update_phantom_vp_position(struct dc *dc, void dcn32_apply_update_flags_for_phantom(struct pipe_ctx *phantom_pipe) { phantom_pipe->update_flags.raw = 0; - if (phantom_pipe->stream && phantom_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - if (resource_is_pipe_type(phantom_pipe, DPP_PIPE)) { - phantom_pipe->update_flags.bits.enable = 1; - phantom_pipe->update_flags.bits.mpcc = 1; - phantom_pipe->update_flags.bits.dppclk = 1; - phantom_pipe->update_flags.bits.hubp_interdependent = 1; - phantom_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; - phantom_pipe->update_flags.bits.gamut_remap = 1; - phantom_pipe->update_flags.bits.scaler = 1; - phantom_pipe->update_flags.bits.viewport = 1; - phantom_pipe->update_flags.bits.det_size = 1; - if (resource_is_pipe_type(phantom_pipe, OTG_MASTER)) { - phantom_pipe->update_flags.bits.odm = 1; - phantom_pipe->update_flags.bits.global_sync = 1; - } + if (resource_is_pipe_type(phantom_pipe, DPP_PIPE)) { + phantom_pipe->update_flags.bits.enable = 1; + phantom_pipe->update_flags.bits.mpcc = 1; + phantom_pipe->update_flags.bits.dppclk = 1; + phantom_pipe->update_flags.bits.hubp_interdependent = 1; + phantom_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; + phantom_pipe->update_flags.bits.gamut_remap = 1; + phantom_pipe->update_flags.bits.scaler = 1; + phantom_pipe->update_flags.bits.viewport = 1; + phantom_pipe->update_flags.bits.det_size = 1; + if (resource_is_pipe_type(phantom_pipe, OTG_MASTER)) { + phantom_pipe->update_flags.bits.odm = 1; + phantom_pipe->update_flags.bits.global_sync = 1; } } } @@ -1445,9 +1440,44 @@ void dcn32_update_dsc_pg(struct dc *dc, } } +void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context) +{ + struct dce_hwseq *hws = dc->hwseq; + int i; + + for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { + struct pipe_ctx *pipe_ctx_old = + &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (!pipe_ctx_old->stream) + continue; + + if (dc_state_get_pipe_subvp_type(dc->current_state, pipe_ctx_old) != SUBVP_PHANTOM) + continue; + + if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) + continue; + + if (!pipe_ctx->stream || pipe_need_reprogram(pipe_ctx_old, pipe_ctx) || + (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)) { + struct clock_source *old_clk = pipe_ctx_old->clock_source; + + if (hws->funcs.reset_back_end_for_pipe) + hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); + if (hws->funcs.enable_stream_gating) + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); + if (old_clk) + old_clk->funcs->cs_power_down(old_clk); + } + } +} + void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context) { unsigned int i; + enum dc_status status = DC_OK; + struct dce_hwseq *hws = dc->hwseq; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -1457,8 +1487,8 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context) * pipe, wait for the double buffer update to complete first before we do * ANY phantom pipe programming. */ - if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM && - old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM && + old_pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) != SUBVP_PHANTOM) { old_pipe->stream_res.tg->funcs->wait_for_state( old_pipe->stream_res.tg, CRTC_STATE_VBLANK); @@ -1468,16 +1498,39 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context) } } for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; - - if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - // If old context or new context has phantom pipes, apply - // the phantom timings now. We can't change the phantom - // pipe configuration safely without driver acquiring - // the DMCUB lock first. - dc->hwss.apply_ctx_to_hw(dc, context); - break; + struct pipe_ctx *pipe_ctx_old = + &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->stream == NULL) + continue; + + if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) + continue; + + if (pipe_ctx->stream == pipe_ctx_old->stream && + pipe_ctx->stream->link->link_state_valid) { + continue; } + + if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) + continue; + + if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe) + continue; + + if (hws->funcs.apply_single_controller_ctx_to_hw) + status = hws->funcs.apply_single_controller_ctx_to_hw( + pipe_ctx, + context, + dc); + + ASSERT(status == DC_OK); + +#ifdef CONFIG_DRM_AMD_DC_FP + if (hws->funcs.resync_fifo_dccg_dio) + hws->funcs.resync_fifo_dccg_dio(hws, dc, context); +#endif } } @@ -1691,3 +1744,26 @@ void dcn32_prepare_bandwidth(struct dc *dc, context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; } } + +void dcn32_interdependent_update_lock(struct dc *dc, + struct dc_state *context, bool lock) +{ + unsigned int i; + struct pipe_ctx *pipe; + struct timing_generator *tg; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + tg = pipe->stream_res.tg; + + if (!resource_is_pipe_type(pipe, OTG_MASTER) || + !tg->funcs->is_tg_enabled(tg) || + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) + continue; + + if (lock) + dc->hwss.pipe_control_lock(dc, pipe, true); + else + dc->hwss.pipe_control_lock(dc, pipe, false); + } +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h index cecf7f0f56..f55c11fc56 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h @@ -111,6 +111,8 @@ void dcn32_update_dsc_pg(struct dc *dc, void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context); +void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context); + void dcn32_init_blank( struct dc *dc, struct timing_generator *tg); @@ -127,4 +129,6 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc, void dcn32_prepare_bandwidth(struct dc *dc, struct dc_state *context); +void dcn32_interdependent_update_lock(struct dc *dc, + struct dc_state *context, bool lock); #endif /* __DC_HWSS_DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c new file mode 100644 index 0000000000..03253faeae --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c @@ -0,0 +1,172 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce110/dce110_hwseq.h" +#include "dcn10/dcn10_hwseq.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn21/dcn21_hwseq.h" +#include "dcn30/dcn30_hwseq.h" +#include "dcn31/dcn31_hwseq.h" +#include "dcn32/dcn32_hwseq.h" +#include "dcn32_init.h" + +static const struct hw_sequencer_funcs dcn32_funcs = { + .program_gamut_remap = dcn30_program_gamut_remap, + .init_hw = dcn32_init_hw, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, + .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, + .update_plane_addr = dcn20_update_plane_addr, + .update_dchub = dcn10_update_dchub, + .update_pending_status = dcn10_update_pending_status, + .program_output_csc = dcn20_program_output_csc, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dcn31_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dcn20_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn32_unblank_stream, + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .disable_plane = dcn20_disable_plane, + .disable_pixel_data = dcn20_disable_pixel_data, + .pipe_control_lock = dcn20_pipe_control_lock, + .interdependent_update_lock = dcn32_interdependent_update_lock, + .cursor_lock = dcn10_cursor_lock, + .prepare_bandwidth = dcn32_prepare_bandwidth, + .optimize_bandwidth = dcn20_optimize_bandwidth, + .update_bandwidth = dcn20_update_bandwidth, + .set_drr = dcn10_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn30_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dcn30_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_backlight_control = dce110_edp_backlight_control, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .edp_wait_for_T12 = dce110_edp_wait_for_T12, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .program_triplebuffer = dcn20_program_triple_buffer, + .enable_writeback = dcn30_enable_writeback, + .disable_writeback = dcn30_disable_writeback, + .update_writeback = dcn30_update_writeback, + .mmhubbub_warmup = dcn30_mmhubbub_warmup, + .dmdata_status_done = dcn20_dmdata_status_done, + .program_dmdata_engine = dcn30_program_dmdata_engine, + .set_dmdata_attributes = dcn20_set_dmdata_attributes, + .init_sys_ctx = dcn20_init_sys_ctx, + .init_vm_ctx = dcn20_init_vm_ctx, + .set_flip_control_gsl = dcn20_set_flip_control_gsl, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, + .apply_idle_power_optimizations = dcn32_apply_idle_power_optimizations, + .does_plane_fit_in_mall = NULL, + .set_backlight_level = dcn21_set_backlight_level, + .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, + .hardware_release = dcn30_hardware_release, + .set_pipe = dcn21_set_pipe, + .enable_lvds_link_output = dce110_enable_lvds_link_output, + .enable_tmds_link_output = dce110_enable_tmds_link_output, + .enable_dp_link_output = dce110_enable_dp_link_output, + .disable_link_output = dcn32_disable_link_output, + .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, + .get_dcc_en_bits = dcn10_get_dcc_en_bits, + .commit_subvp_config = dcn32_commit_subvp_config, + .enable_phantom_streams = dcn32_enable_phantom_streams, + .disable_phantom_streams = dcn32_disable_phantom_streams, + .subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, + .subvp_pipe_control_lock_fast = dcn32_subvp_pipe_control_lock_fast, + .update_phantom_vp_position = dcn32_update_phantom_vp_position, + .update_dsc_pg = dcn32_update_dsc_pg, + .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom, + .blank_phantom = dcn32_blank_phantom, + .is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless, +}; + +static const struct hwseq_private_funcs dcn32_private_funcs = { + .init_pipes = dcn10_init_pipes, + .update_plane_addr = dcn20_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .update_mpcc = dcn20_update_mpcc, + .set_input_transfer_func = dcn32_set_input_transfer_func, + .set_output_transfer_func = dcn32_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn20_blank_pixel_data, + .reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap, + .enable_stream_timing = dcn20_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .disable_stream_gating = dcn20_disable_stream_gating, + .enable_stream_gating = dcn20_enable_stream_gating, + .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = dcn32_init_blank, + .disable_vga = dcn20_disable_vga, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn20_plane_atomic_disable, + .plane_atomic_power_down = dcn10_plane_atomic_power_down, + .enable_power_gating_plane = dcn32_enable_power_gating_plane, + .hubp_pg_control = dcn32_hubp_pg_control, + .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, + .update_odm = dcn32_update_odm, + .dsc_pg_control = dcn32_dsc_pg_control, + .dsc_pg_status = dcn32_dsc_pg_status, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .wait_for_blank_complete = dcn20_wait_for_blank_complete, + .dccg_init = dcn20_dccg_init, + .set_mcm_luts = dcn32_set_mcm_luts, + .program_mall_pipe_config = dcn32_program_mall_pipe_config, + .update_force_pstate = dcn32_update_force_pstate, + .update_mall_sel = dcn32_update_mall_sel, + .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values, + .set_pixels_per_cycle = dcn32_set_pixels_per_cycle, + .resync_fifo_dccg_dio = dcn32_resync_fifo_dccg_dio, + .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, + .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw, + .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe, +}; + +void dcn32_hw_sequencer_init_functions(struct dc *dc) +{ + dc->hwss = dcn32_funcs; + dc->hwseq->funcs = dcn32_private_funcs; + +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.h new file mode 100644 index 0000000000..89a591eb2c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN32_INIT_H__ +#define __DC_DCN32_INIT_H__ + +struct dc; + +void dcn32_hw_sequencer_init_functions(struct dc *dc); + +#endif /* __DC_DCN32_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 325a711a14..1e67374b89 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -56,6 +56,7 @@ #include "dcn30/dcn30_cm_common.h" #include "dcn31/dcn31_hwseq.h" #include "dcn20/dcn20_hwseq.h" +#include "dc_state_priv.h" #define DC_LOGGER_INIT(logger) \ struct dal_logger *dc_logger = logger @@ -133,6 +134,7 @@ void dcn35_init_hw(struct dc *dc) struct dc_bios *dcb = dc->ctx->dc_bios; struct resource_pool *res_pool = dc->res_pool; uint32_t backlight = MAX_BACKLIGHT_LEVEL; + uint32_t user_level = MAX_BACKLIGHT_LEVEL; int i; if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) @@ -145,17 +147,36 @@ void dcn35_init_hw(struct dc *dc) hws->funcs.bios_golden_init(dc); } - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); - - /* Disable gating for PHYASYMCLK. This will be enabled in dccg if needed */ - REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, 1, - PHYBSYMCLK_ROOT_GATE_DISABLE, 1, - PHYCSYMCLK_ROOT_GATE_DISABLE, 1, - PHYDSYMCLK_ROOT_GATE_DISABLE, 1, - PHYESYMCLK_ROOT_GATE_DISABLE, 1); + if (!dc->debug.disable_clock_gate) { + REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); + REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); + + /* Disable gating for PHYASYMCLK. This will be enabled in dccg if needed */ + REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, 1, + PHYBSYMCLK_ROOT_GATE_DISABLE, 1, + PHYCSYMCLK_ROOT_GATE_DISABLE, 1, + PHYDSYMCLK_ROOT_GATE_DISABLE, 1, + PHYESYMCLK_ROOT_GATE_DISABLE, 1); + + REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL4, + DPIASYMCLK0_GATE_DISABLE, 0, + DPIASYMCLK1_GATE_DISABLE, 0, + DPIASYMCLK2_GATE_DISABLE, 0, + DPIASYMCLK3_GATE_DISABLE, 0); + + REG_WRITE(DCCG_GATE_DISABLE_CNTL5, 0xFFFFFFFF); + REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL5, + DTBCLK_P0_GATE_DISABLE, 0, + DTBCLK_P1_GATE_DISABLE, 0, + DTBCLK_P2_GATE_DISABLE, 0, + DTBCLK_P3_GATE_DISABLE, 0); + REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL5, + DPSTREAMCLK0_GATE_DISABLE, 0, + DPSTREAMCLK1_GATE_DISABLE, 0, + DPSTREAMCLK2_GATE_DISABLE, 0, + DPSTREAMCLK3_GATE_DISABLE, 0); - REG_WRITE(DCCG_GATE_DISABLE_CNTL5, 0x1f7c3fcf); + } // Initialize the dccg if (res_pool->dccg->funcs->dccg_init) @@ -260,13 +281,15 @@ void dcn35_init_hw(struct dc *dc) for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; - if (link->panel_cntl) + if (link->panel_cntl) { backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); + user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL; + } } if (dc->ctx->dmub_srv) { for (i = 0; i < dc->res_pool->pipe_count; i++) { if (abms[i] != NULL && abms[i]->funcs != NULL) - abms[i]->funcs->abm_init(abms[i], backlight); + abms[i]->funcs->abm_init(abms[i], backlight, user_level); } } @@ -332,9 +355,6 @@ void dcn35_init_hw(struct dc *dc) if (dc->res_pool->pg_cntl) { if (dc->res_pool->pg_cntl->funcs->init_pg_status) dc->res_pool->pg_cntl->funcs->init_pg_status(dc->res_pool->pg_cntl); - - if (dc->res_pool->pg_cntl->funcs->set_force_poweron_domain22) - dc->res_pool->pg_cntl->funcs->set_force_poweron_domain22(dc->res_pool->pg_cntl, false); } } @@ -619,7 +639,7 @@ void dcn35_power_down_on_boot(struct dc *dc) bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable) { struct dc_link *edp_links[MAX_NUM_EDP]; - int edp_num; + int i, edp_num; if (dc->debug.dmcub_emulation) return true; @@ -627,6 +647,13 @@ bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable) dc_get_edp_links(dc, edp_links, &edp_num); if (edp_num == 0 || edp_num > 1) return false; + + for (i = 0; i < dc->current_state->stream_count; ++i) { + struct dc_stream_state *stream = dc->current_state->streams[i]; + + if (!stream->dpms_off && !dc_is_embedded_signal(stream->signal)) + return false; + } } // TODO: review other cases when idle optimization is allowed @@ -756,12 +783,12 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; - hws->funcs.plane_atomic_disconnect(dc, pipe_ctx); + hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx); if (tg->funcs->is_tg_enabled(tg)) tg->funcs->unlock(tg); - dc->hwss.disable_plane(dc, pipe_ctx); + dc->hwss.disable_plane(dc, context, pipe_ctx); pipe_ctx->stream_res.tg = NULL; pipe_ctx->plane_res.hubp = NULL; @@ -888,10 +915,10 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) pipe_ctx->plane_state = NULL; } -void dcn35_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx) { struct dce_hwseq *hws = dc->hwseq; - bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom; + bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM; struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL; DC_LOGGER_INIT(dc->ctx->logger); @@ -918,6 +945,8 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, bool hpo_frl_stream_enc_acquired = false; bool hpo_dp_stream_enc_acquired = false; int i = 0, j = 0; + int edp_num = 0; + struct dc_link *edp_links[MAX_NUM_EDP] = { NULL }; memset(update_state, 0, sizeof(struct pg_block_update)); @@ -958,10 +987,24 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, if (pipe_ctx->stream_res.opp) update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false; + } + /*domain24 controls all the otg, mpc, opp, as long as one otg is still up, avoid enabling OTG PG*/ + for (i = 0; i < dc->res_pool->timing_generator_count; i++) { + struct timing_generator *tg = dc->res_pool->timing_generators[i]; + if (tg && tg->funcs->is_tg_enabled(tg)) { + update_state->pg_pipe_res_update[PG_OPTC][i] = false; + break; + } + } - if (pipe_ctx->stream_res.tg) - update_state->pg_pipe_res_update[PG_OPTC][pipe_ctx->stream_res.tg->inst] = false; + dc_get_edp_links(dc, edp_links, &edp_num); + if (edp_num == 0 || + ((!edp_links[0] || !edp_links[0]->edp_sink_present) && + (!edp_links[1] || !edp_links[1]->edp_sink_present))) { + /*eDP not exist on this config, keep Domain24 power on, for S0i3, this will be handled in dmubfw*/ + update_state->pg_pipe_res_update[PG_OPTC][0] = false; } + } void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, @@ -1047,8 +1090,29 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, } -void dcn35_block_power_control(struct dc *dc, - struct pg_block_update *update_state, bool power_on) +/** + * dcn35_hw_block_power_down() - power down sequence + * + * The following sequence describes the ON-OFF (ONO) for power down: + * + * ONO Region 3, DCPG 25: hpo - SKIPPED + * ONO Region 4, DCPG 0: dchubp0, dpp0 + * ONO Region 6, DCPG 1: dchubp1, dpp1 + * ONO Region 8, DCPG 2: dchubp2, dpp2 + * ONO Region 10, DCPG 3: dchubp3, dpp3 + * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will pwr dwn at IPS2 entry + * ONO Region 5, DCPG 16: dsc0 + * ONO Region 7, DCPG 17: dsc1 + * ONO Region 9, DCPG 18: dsc2 + * ONO Region 11, DCPG 19: dsc3 + * ONO Region 2, DCPG 24: mpc opp optc dwb + * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed + * + * @dc: Current DC state + * @update_state: update PG sequence states for HW block + */ +void dcn35_hw_block_power_down(struct dc *dc, + struct pg_block_update *update_state) { int i = 0; struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; @@ -1057,64 +1121,106 @@ void dcn35_block_power_control(struct dc *dc, return; if (dc->debug.ignore_pg) return; + if (update_state->pg_res_update[PG_HPO]) { if (pg_cntl->funcs->hpo_pg_control) - pg_cntl->funcs->hpo_pg_control(pg_cntl, power_on); + pg_cntl->funcs->hpo_pg_control(pg_cntl, false); } for (i = 0; i < dc->res_pool->pipe_count; i++) { if (update_state->pg_pipe_res_update[PG_HUBP][i] && update_state->pg_pipe_res_update[PG_DPP][i]) { if (pg_cntl->funcs->hubp_dpp_pg_control) - pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, power_on); + pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false); } - + } + for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) if (update_state->pg_pipe_res_update[PG_DSC][i]) { if (pg_cntl->funcs->dsc_pg_control) - pg_cntl->funcs->dsc_pg_control(pg_cntl, i, power_on); + pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false); } - if (update_state->pg_pipe_res_update[PG_MPCC][i]) { - if (pg_cntl->funcs->mpcc_pg_control) - pg_cntl->funcs->mpcc_pg_control(pg_cntl, i, power_on); - } - - if (update_state->pg_pipe_res_update[PG_OPP][i]) { - if (pg_cntl->funcs->opp_pg_control) - pg_cntl->funcs->opp_pg_control(pg_cntl, i, power_on); - } - if (update_state->pg_pipe_res_update[PG_OPTC][i]) { - if (pg_cntl->funcs->optc_pg_control) - pg_cntl->funcs->optc_pg_control(pg_cntl, i, power_on); - } - } + /*this will need all the clients to unregister optc interruts let dmubfw handle this*/ + if (pg_cntl->funcs->plane_otg_pg_control) + pg_cntl->funcs->plane_otg_pg_control(pg_cntl, false); - if (update_state->pg_res_update[PG_DWB]) { - if (pg_cntl->funcs->dwb_pg_control) - pg_cntl->funcs->dwb_pg_control(pg_cntl, power_on); - } + //domain22, 23, 25 currently always on. - if (pg_cntl->funcs->plane_otg_pg_control) - pg_cntl->funcs->plane_otg_pg_control(pg_cntl, power_on); } -void dcn35_root_clock_control(struct dc *dc, - struct pg_block_update *update_state, bool power_on) +/** + * dcn35_hw_block_power_up() - power up sequence + * + * The following sequence describes the ON-OFF (ONO) for power up: + * + * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED + * ONO Region 2, DCPG 24: mpc opp optc dwb + * ONO Region 5, DCPG 16: dsc0 + * ONO Region 7, DCPG 17: dsc1 + * ONO Region 9, DCPG 18: dsc2 + * ONO Region 11, DCPG 19: dsc3 + * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will power up at IPS2 exit + * ONO Region 4, DCPG 0: dchubp0, dpp0 + * ONO Region 6, DCPG 1: dchubp1, dpp1 + * ONO Region 8, DCPG 2: dchubp2, dpp2 + * ONO Region 10, DCPG 3: dchubp3, dpp3 + * ONO Region 3, DCPG 25: hpo - SKIPPED + * + * @dc: Current DC state + * @update_state: update PG sequence states for HW block + */ +void dcn35_hw_block_power_up(struct dc *dc, + struct pg_block_update *update_state) { int i = 0; struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; if (!pg_cntl) return; + if (dc->debug.ignore_pg) + return; + //domain22, 23, 25 currently always on. + /*this will need all the clients to unregister optc interruts let dmubfw handle this*/ + if (pg_cntl->funcs->plane_otg_pg_control) + pg_cntl->funcs->plane_otg_pg_control(pg_cntl, true); + + for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) + if (update_state->pg_pipe_res_update[PG_DSC][i]) { + if (pg_cntl->funcs->dsc_pg_control) + pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true); + } for (i = 0; i < dc->res_pool->pipe_count; i++) { if (update_state->pg_pipe_res_update[PG_HUBP][i] && update_state->pg_pipe_res_update[PG_DPP][i]) { - if (dc->hwseq->funcs.dpp_root_clock_control) - dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on); + if (pg_cntl->funcs->hubp_dpp_pg_control) + pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, true); } + } + if (update_state->pg_res_update[PG_HPO]) { + if (pg_cntl->funcs->hpo_pg_control) + pg_cntl->funcs->hpo_pg_control(pg_cntl, true); + } +} +void dcn35_root_clock_control(struct dc *dc, + struct pg_block_update *update_state, bool power_on) +{ + int i = 0; + struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; + if (!pg_cntl) + return; + /*enable root clock first when power up*/ + if (power_on) + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (update_state->pg_pipe_res_update[PG_HUBP][i] && + update_state->pg_pipe_res_update[PG_DPP][i]) { + if (dc->hwseq->funcs.dpp_root_clock_control) + dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on); + } + } + for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { if (update_state->pg_pipe_res_update[PG_DSC][i]) { if (power_on) { if (dc->res_pool->dccg->funcs->enable_dsc) @@ -1125,6 +1231,15 @@ void dcn35_root_clock_control(struct dc *dc, } } } + /*disable root clock first when power down*/ + if (!power_on) + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (update_state->pg_pipe_res_update[PG_HUBP][i] && + update_state->pg_pipe_res_update[PG_DPP][i]) { + if (dc->hwseq->funcs.dpp_root_clock_control) + dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on); + } + } } void dcn35_prepare_bandwidth( @@ -1138,9 +1253,9 @@ void dcn35_prepare_bandwidth( if (dc->hwss.root_clock_control) dc->hwss.root_clock_control(dc, &pg_update_state, true); - - if (dc->hwss.block_power_control) - dc->hwss.block_power_control(dc, &pg_update_state, true); + /*power up required HW block*/ + if (dc->hwss.hw_block_power_up) + dc->hwss.hw_block_power_up(dc, &pg_update_state); } dcn20_prepare_bandwidth(dc, context); @@ -1156,9 +1271,9 @@ void dcn35_optimize_bandwidth( if (dc->hwss.calc_blocks_to_gate) { dc->hwss.calc_blocks_to_gate(dc, context, &pg_update_state); - - if (dc->hwss.block_power_control) - dc->hwss.block_power_control(dc, &pg_update_state, false); + /*try to power down unused block*/ + if (dc->hwss.hw_block_power_down) + dc->hwss.hw_block_power_down(dc, &pg_update_state); if (dc->hwss.root_clock_control) dc->hwss.root_clock_control(dc, &pg_update_state, false); @@ -1180,3 +1295,44 @@ uint32_t dcn35_get_idle_state(const struct dc *dc) return 0; } + +void dcn35_set_drr(struct pipe_ctx **pipe_ctx, + int num_pipes, struct dc_crtc_timing_adjust adjust) +{ + int i = 0; + struct drr_params params = {0}; + // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow + unsigned int event_triggers = 0x800; + // Note DRR trigger events are generated regardless of whether num frames met. + unsigned int num_frames = 2; + + params.vertical_total_max = adjust.v_total_max; + params.vertical_total_min = adjust.v_total_min; + params.vertical_total_mid = adjust.v_total_mid; + params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num; + + for (i = 0; i < num_pipes; i++) { + if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) { + struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing; + struct dc *dc = pipe_ctx[i]->stream->ctx->dc; + + if (dc->debug.static_screen_wait_frames) { + unsigned int frame_rate = timing->pix_clk_100hz / (timing->h_total * timing->v_total); + + if (frame_rate >= 120 && dc->caps.ips_support && + dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) { + /*ips enable case*/ + num_frames = 2 * (frame_rate % 60); + } + } + if (pipe_ctx[i]->stream_res.tg->funcs->set_drr) + pipe_ctx[i]->stream_res.tg->funcs->set_drr( + pipe_ctx[i]->stream_res.tg, ¶ms); + if (adjust.v_total_max != 0 && adjust.v_total_min != 0) + if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control) + pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control( + pipe_ctx[i]->stream_res.tg, + event_triggers, num_frames); + } + } +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h index 0dff10d179..fd66316e33 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h @@ -57,14 +57,16 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context); void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx); void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); -void dcn35_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx); void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, struct pg_block_update *update_state); void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, struct pg_block_update *update_state); -void dcn35_block_power_control(struct dc *dc, - struct pg_block_update *update_state, bool power_on); +void dcn35_hw_block_power_up(struct dc *dc, + struct pg_block_update *update_state); +void dcn35_hw_block_power_down(struct dc *dc, + struct pg_block_update *update_state); void dcn35_root_clock_control(struct dc *dc, struct pg_block_update *update_state, bool power_on); @@ -84,4 +86,8 @@ void dcn35_dsc_pg_control( void dcn35_set_idle_state(const struct dc *dc, bool allow_idle); uint32_t dcn35_get_idle_state(const struct dc *dc); + +void dcn35_set_drr(struct pipe_ctx **pipe_ctx, + int num_pipes, struct dc_crtc_timing_adjust adjust); + #endif /* __DC_HWSS_DCN35_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c new file mode 100644 index 0000000000..a630aa77dc --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce110/dce110_hwseq.h" +#include "dcn10/dcn10_hwseq.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn21/dcn21_hwseq.h" +#include "dcn30/dcn30_hwseq.h" +#include "dcn301/dcn301_hwseq.h" +#include "dcn31/dcn31_hwseq.h" +#include "dcn32/dcn32_hwseq.h" +#include "dcn35/dcn35_hwseq.h" + +#include "dcn35_init.h" + +static const struct hw_sequencer_funcs dcn35_funcs = { + .program_gamut_remap = dcn30_program_gamut_remap, + .init_hw = dcn35_init_hw, + .power_down_on_boot = dcn35_power_down_on_boot, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, + .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, + .update_plane_addr = dcn20_update_plane_addr, + .update_dchub = dcn10_update_dchub, + .update_pending_status = dcn10_update_pending_status, + .program_output_csc = dcn20_program_output_csc, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dcn31_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dcn20_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn32_unblank_stream, + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .disable_plane = dcn35_disable_plane, + .disable_pixel_data = dcn20_disable_pixel_data, + .pipe_control_lock = dcn20_pipe_control_lock, + .interdependent_update_lock = dcn10_lock_all_pipes, + .cursor_lock = dcn10_cursor_lock, + .prepare_bandwidth = dcn35_prepare_bandwidth, + .optimize_bandwidth = dcn35_optimize_bandwidth, + .update_bandwidth = dcn20_update_bandwidth, + .set_drr = dcn35_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn30_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dcn30_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_backlight_control = dce110_edp_backlight_control, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_T12 = dce110_edp_wait_for_T12, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .program_triplebuffer = dcn20_program_triple_buffer, + .enable_writeback = dcn30_enable_writeback, + .disable_writeback = dcn30_disable_writeback, + .update_writeback = dcn30_update_writeback, + .mmhubbub_warmup = dcn30_mmhubbub_warmup, + .dmdata_status_done = dcn20_dmdata_status_done, + .program_dmdata_engine = dcn30_program_dmdata_engine, + .set_dmdata_attributes = dcn20_set_dmdata_attributes, + .init_sys_ctx = dcn31_init_sys_ctx, + .init_vm_ctx = dcn20_init_vm_ctx, + .set_flip_control_gsl = dcn20_set_flip_control_gsl, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, + .power_down = dce110_power_down, + .set_backlight_level = dcn21_set_backlight_level, + .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, + .set_pipe = dcn21_set_pipe, + .enable_lvds_link_output = dce110_enable_lvds_link_output, + .enable_tmds_link_output = dce110_enable_tmds_link_output, + .enable_dp_link_output = dce110_enable_dp_link_output, + .disable_link_output = dcn32_disable_link_output, + .z10_restore = dcn35_z10_restore, + .z10_save_init = dcn31_z10_save_init, + .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, + .optimize_pwr_state = dcn21_optimize_pwr_state, + .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, + .apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations, + .update_dsc_pg = dcn32_update_dsc_pg, + .calc_blocks_to_gate = dcn35_calc_blocks_to_gate, + .calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate, + .hw_block_power_up = dcn35_hw_block_power_up, + .hw_block_power_down = dcn35_hw_block_power_down, + .root_clock_control = dcn35_root_clock_control, + .set_idle_state = dcn35_set_idle_state, + .get_idle_state = dcn35_get_idle_state +}; + +static const struct hwseq_private_funcs dcn35_private_funcs = { + .init_pipes = dcn35_init_pipes, + .update_plane_addr = dcn20_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .update_mpcc = dcn20_update_mpcc, + .set_input_transfer_func = dcn32_set_input_transfer_func, + .set_output_transfer_func = dcn32_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn20_blank_pixel_data, + .reset_hw_ctx_wrap = dcn31_reset_hw_ctx_wrap, + .enable_stream_timing = dcn20_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = dcn20_init_blank, + .disable_vga = NULL, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn35_plane_atomic_disable, + //.plane_atomic_disable = dcn20_plane_atomic_disable,/*todo*/ + //.hubp_pg_control = dcn35_hubp_pg_control, + .enable_power_gating_plane = dcn35_enable_power_gating_plane, + .dpp_root_clock_control = dcn35_dpp_root_clock_control, + .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, + .update_odm = dcn35_update_odm, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .wait_for_blank_complete = dcn20_wait_for_blank_complete, + .dccg_init = dcn20_dccg_init, + .set_mcm_luts = dcn32_set_mcm_luts, + .setup_hpo_hw_control = dcn35_setup_hpo_hw_control, + .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values, + .set_pixels_per_cycle = dcn32_set_pixels_per_cycle, + .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, + .dsc_pg_control = dcn35_dsc_pg_control, + .dsc_pg_status = dcn32_dsc_pg_status, + .enable_plane = dcn35_enable_plane, +}; + +void dcn35_hw_sequencer_construct(struct dc *dc) +{ + dc->hwss = dcn35_funcs; + dc->hwseq->funcs = dcn35_private_funcs; + +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.h new file mode 100644 index 0000000000..b67015032c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN35_INIT_H__ +#define __DC_DCN35_INIT_H__ + +struct dc; + +void dcn35_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN35_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt new file mode 100644 index 0000000000..951ca2da44 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt @@ -0,0 +1,4 @@ +dal3_subdirectory_sources( + dcn351_init.c + dcn351_init.h +) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile new file mode 100644 index 0000000000..b24ad27fe6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile @@ -0,0 +1,17 @@ +# +# (c) Copyright 2022 Advanced Micro Devices, Inc. All the rights reserved +# +# All rights reserved. This notice is intended as a precaution against +# inadvertent publication and does not imply publication or any waiver +# of confidentiality. The year included in the foregoing notice is the +# year of creation of the work. +# +# Authors: AMD +# +# Makefile for DCN351. + +DCN351 = dcn351_init.o + +AMD_DAL_DCN351 = $(addprefix $(AMDDALPATH)/dc/dcn351/,$(DCN351)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DCN351) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c new file mode 100644 index 0000000000..143d3fc022 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c @@ -0,0 +1,171 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce110/dce110_hwseq.h" +#include "dcn10/dcn10_hwseq.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn21/dcn21_hwseq.h" +#include "dcn30/dcn30_hwseq.h" +#include "dcn301/dcn301_hwseq.h" +#include "dcn31/dcn31_hwseq.h" +#include "dcn32/dcn32_hwseq.h" +#include "dcn35/dcn35_hwseq.h" + +#include "dcn351_init.h" + +static const struct hw_sequencer_funcs dcn351_funcs = { + .program_gamut_remap = dcn30_program_gamut_remap, + .init_hw = dcn35_init_hw, + .power_down_on_boot = dcn35_power_down_on_boot, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, + .post_unlock_program_front_end = dcn20_post_unlock_program_front_end, + .update_plane_addr = dcn20_update_plane_addr, + .update_dchub = dcn10_update_dchub, + .update_pending_status = dcn10_update_pending_status, + .program_output_csc = dcn20_program_output_csc, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dcn31_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dcn20_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn32_unblank_stream, + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .disable_plane = dcn35_disable_plane, + .disable_pixel_data = dcn20_disable_pixel_data, + .pipe_control_lock = dcn20_pipe_control_lock, + .interdependent_update_lock = dcn10_lock_all_pipes, + .cursor_lock = dcn10_cursor_lock, + .prepare_bandwidth = dcn35_prepare_bandwidth, + .optimize_bandwidth = dcn35_optimize_bandwidth, + .update_bandwidth = dcn20_update_bandwidth, + .set_drr = dcn10_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn30_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dcn30_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_backlight_control = dce110_edp_backlight_control, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_T12 = dce110_edp_wait_for_T12, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn10_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .program_triplebuffer = dcn20_program_triple_buffer, + .enable_writeback = dcn30_enable_writeback, + .disable_writeback = dcn30_disable_writeback, + .update_writeback = dcn30_update_writeback, + .mmhubbub_warmup = dcn30_mmhubbub_warmup, + .dmdata_status_done = dcn20_dmdata_status_done, + .program_dmdata_engine = dcn30_program_dmdata_engine, + .set_dmdata_attributes = dcn20_set_dmdata_attributes, + .init_sys_ctx = dcn31_init_sys_ctx, + .init_vm_ctx = dcn20_init_vm_ctx, + .set_flip_control_gsl = dcn20_set_flip_control_gsl, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, + .power_down = dce110_power_down, + .set_backlight_level = dcn21_set_backlight_level, + .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, + .set_pipe = dcn21_set_pipe, + .enable_lvds_link_output = dce110_enable_lvds_link_output, + .enable_tmds_link_output = dce110_enable_tmds_link_output, + .enable_dp_link_output = dce110_enable_dp_link_output, + .disable_link_output = dcn32_disable_link_output, + .z10_restore = dcn35_z10_restore, + .z10_save_init = dcn31_z10_save_init, + .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, + .optimize_pwr_state = dcn21_optimize_pwr_state, + .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, + .apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations, + .update_dsc_pg = dcn32_update_dsc_pg, + .calc_blocks_to_gate = dcn35_calc_blocks_to_gate, + .calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate, + .hw_block_power_up = dcn35_hw_block_power_up, + .hw_block_power_down = dcn35_hw_block_power_down, + .root_clock_control = dcn35_root_clock_control, + .set_idle_state = dcn35_set_idle_state, + .get_idle_state = dcn35_get_idle_state +}; + +static const struct hwseq_private_funcs dcn351_private_funcs = { + .init_pipes = dcn35_init_pipes, + .update_plane_addr = dcn20_update_plane_addr, + .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .update_mpcc = dcn20_update_mpcc, + .set_input_transfer_func = dcn32_set_input_transfer_func, + .set_output_transfer_func = dcn32_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn20_blank_pixel_data, + .reset_hw_ctx_wrap = dcn31_reset_hw_ctx_wrap, + .enable_stream_timing = dcn20_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = dcn20_init_blank, + .disable_vga = NULL, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn35_plane_atomic_disable, + //.plane_atomic_disable = dcn20_plane_atomic_disable,/*todo*/ + //.hubp_pg_control = dcn35_hubp_pg_control, + .enable_power_gating_plane = dcn35_enable_power_gating_plane, + .dpp_root_clock_control = dcn35_dpp_root_clock_control, + .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, + .update_odm = dcn35_update_odm, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .wait_for_blank_complete = dcn20_wait_for_blank_complete, + .dccg_init = dcn20_dccg_init, + .set_mcm_luts = dcn32_set_mcm_luts, + .setup_hpo_hw_control = dcn35_setup_hpo_hw_control, + .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values, + .set_pixels_per_cycle = dcn32_set_pixels_per_cycle, + .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, + .dsc_pg_control = dcn35_dsc_pg_control, + .dsc_pg_status = dcn32_dsc_pg_status, + .enable_plane = dcn35_enable_plane, +}; + +void dcn351_hw_sequencer_construct(struct dc *dc) +{ + dc->hwss = dcn351_funcs; + dc->hwseq->funcs = dcn351_private_funcs; + +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h new file mode 100644 index 0000000000..970b01008b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN351_INIT_H__ +#define __DC_DCN351_INIT_H__ + +struct dc; + +void dcn351_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN351_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index 452680fe9a..64ca7c6650 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -50,7 +50,7 @@ struct pg_block_update; struct subvp_pipe_control_lock_fast_params { struct dc *dc; bool lock; - struct pipe_ctx *pipe_ctx; + bool subvp_immediate_flip; }; struct pipe_control_lock_params { @@ -200,7 +200,7 @@ struct hw_sequencer_funcs { struct dc_state *context); enum dc_status (*apply_ctx_to_hw)(struct dc *dc, struct dc_state *context); - void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*disable_plane)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx); void (*disable_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank); void (*apply_ctx_for_surface)(struct dc *dc, const struct dc_stream_state *stream, @@ -248,6 +248,7 @@ struct hw_sequencer_funcs { void (*enable_per_frame_crtc_position_reset)(struct dc *dc, int group_size, struct pipe_ctx *grouped_pipes[]); void (*enable_timing_synchronization)(struct dc *dc, + struct dc_state *state, int group_index, int group_size, struct pipe_ctx *grouped_pipes[]); void (*enable_vblanks_synchronization)(struct dc *dc, @@ -378,6 +379,7 @@ struct hw_sequencer_funcs { struct dc_cursor_attributes *cursor_attr); void (*commit_subvp_config)(struct dc *dc, struct dc_state *context); void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context); + void (*disable_phantom_streams)(struct dc *dc, struct dc_state *context); void (*subvp_pipe_control_lock)(struct dc *dc, struct dc_state *context, bool lock, @@ -414,8 +416,10 @@ struct hw_sequencer_funcs { struct pg_block_update *update_state); void (*calc_blocks_to_ungate)(struct dc *dc, struct dc_state *context, struct pg_block_update *update_state); - void (*block_power_control)(struct dc *dc, - struct pg_block_update *update_state, bool power_on); + void (*hw_block_power_up)(struct dc *dc, + struct pg_block_update *update_state); + void (*hw_block_power_down)(struct dc *dc, + struct pg_block_update *update_state); void (*root_clock_control)(struct dc *dc, struct pg_block_update *update_state, bool power_on); void (*set_idle_state)(const struct dc *dc, bool allow_idle); @@ -452,17 +456,18 @@ void get_mpctree_visual_confirm_color( struct tg_color *color); void get_subvp_visual_confirm_color( - struct dc *dc, - struct dc_state *context, struct pipe_ctx *pipe_ctx, struct tg_color *color); void get_mclk_switch_visual_confirm_color( - struct dc *dc, - struct dc_state *context, struct pipe_ctx *pipe_ctx, struct tg_color *color); +void set_p_state_switch_method( + struct dc *dc, + struct dc_state *context, + struct pipe_ctx *pipe_ctx); + void hwss_execute_sequence(struct dc *dc, struct block_sequence block_sequence[], int num_steps); @@ -472,7 +477,8 @@ void hwss_build_fast_sequence(struct dc *dc, unsigned int dmub_cmd_count, struct block_sequence block_sequence[], int *num_steps, - struct pipe_ctx *pipe_ctx); + struct pipe_ctx *pipe_ctx, + struct dc_stream_status *stream_status); void hwss_send_dmcub_cmd(union block_sequence_params *params); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h index 82c5921668..b3c62a82cb 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h @@ -79,6 +79,7 @@ struct hwseq_private_funcs { void (*update_plane_addr)(const struct dc *dc, struct pipe_ctx *pipe_ctx); void (*plane_atomic_disconnect)(struct dc *dc, + struct dc_state *state, struct pipe_ctx *pipe_ctx); void (*update_mpcc)(struct dc *dc, struct pipe_ctx *pipe_ctx); bool (*set_input_transfer_func)(struct dc *dc, @@ -164,8 +165,15 @@ struct hwseq_private_funcs { void (*set_pixels_per_cycle)(struct pipe_ctx *pipe_ctx); void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc, struct dc_state *context); + enum dc_status (*apply_single_controller_ctx_to_hw)( + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct dc *dc); bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx); #endif + void (*reset_back_end_for_pipe)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); }; struct dce_hwseq { diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index bc9cda3294..3a6bf77a68 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -200,11 +200,7 @@ struct resource_funcs { unsigned int pipe_cnt, unsigned int index); - bool (*remove_phantom_pipes)(struct dc *dc, struct dc_state *context, bool fast_update); - void (*retain_phantom_pipes)(struct dc *dc, struct dc_state *context); void (*get_panel_config_defaults)(struct dc_panel_config *panel_config); - void (*save_mall_state)(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config); - void (*restore_mall_state)(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config); void (*build_pipe_pix_clk_params)(struct pipe_ctx *pipe_ctx); }; @@ -385,6 +381,16 @@ union pipe_update_flags { uint32_t raw; }; +enum p_state_switch_method { + P_STATE_UNKNOWN = 0, + P_STATE_V_BLANK = 1, + P_STATE_FPO, + P_STATE_V_ACTIVE, + P_STATE_SUB_VP, + P_STATE_DRR_SUB_VP, + P_STATE_V_BLANK_SUB_VP +}; + struct pipe_ctx { struct dc_plane_state *plane_state; struct dc_stream_state *stream; @@ -433,6 +439,7 @@ struct pipe_ctx { struct dwbc *dwbc; struct mcif_wb *mcif_wb; union pipe_update_flags update_flags; + enum p_state_switch_method p_state_type; struct tg_color visual_confirm_color; bool has_vactive_margin; /* subvp_index: only valid if the pipe is a SUBVP_MAIN*/ @@ -528,6 +535,14 @@ struct dc_state { * @stream_status: Planes status on a given stream */ struct dc_stream_status stream_status[MAX_PIPES]; + /** + * @phantom_streams: Stream state properties for phantoms + */ + struct dc_stream_state *phantom_streams[MAX_PHANTOM_PIPES]; + /** + * @phantom_planes: Planes state properties for phantoms + */ + struct dc_plane_state *phantom_planes[MAX_PHANTOM_PIPES]; /** * @stream_count: Total of streams in use @@ -535,6 +550,14 @@ struct dc_state { uint8_t stream_count; uint8_t stream_mask; + /** + * @stream_count: Total phantom streams in use + */ + uint8_t phantom_stream_count; + /** + * @stream_count: Total phantom planes in use + */ + uint8_t phantom_plane_count; /** * @res_ctx: Persistent state of resources */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h index 9f521cf0fc..3f0161d646 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h @@ -36,7 +36,7 @@ struct abm { }; struct abm_funcs { - void (*abm_init)(struct abm *abm, uint32_t back_light); + void (*abm_init)(struct abm *abm, uint32_t back_light, uint32_t user_level); bool (*set_abm_level)(struct abm *abm, unsigned int abm_level); bool (*set_abm_immediate_disable)(struct abm *abm, unsigned int panel_inst); bool (*set_pipe)(struct abm *abm, unsigned int controller_id, unsigned int panel_inst); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 55ded5fb8a..17e014d3bd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -62,6 +62,25 @@ struct dcn3_clk_internal { uint32_t CLK4_CLK0_CURRENT_CNT; //fclk }; +struct dcn35_clk_internal { + int dummy; + uint32_t CLK1_CLK0_CURRENT_CNT; //dispclk + uint32_t CLK1_CLK1_CURRENT_CNT; //dppclk + uint32_t CLK1_CLK2_CURRENT_CNT; //dprefclk + uint32_t CLK1_CLK3_CURRENT_CNT; //dcfclk + uint32_t CLK1_CLK4_CURRENT_CNT; //dtbclk + //uint32_t CLK1_CLK5_CURRENT_CNT; //dpiaclk + //uint32_t CLK1_CLK6_CURRENT_CNT; //srdbgclk + uint32_t CLK1_CLK3_DS_CNTL; //dcf_deep_sleep_divider + uint32_t CLK1_CLK3_ALLOW_DS; //dcf_deep_sleep_allow + + uint32_t CLK1_CLK0_BYPASS_CNTL; //dispclk bypass + uint32_t CLK1_CLK1_BYPASS_CNTL; //dppclk bypass + uint32_t CLK1_CLK2_BYPASS_CNTL; //dprefclk bypass + uint32_t CLK1_CLK3_BYPASS_CNTL; //dcfclk bypass + uint32_t CLK1_CLK4_BYPASS_CNTL; //dtbclk bypass +}; + struct dcn301_clk_internal { int dummy; uint32_t CLK1_CLK0_CURRENT_CNT; //dispclk diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index 6b44557fcb..b9a06bf84c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -59,8 +59,8 @@ enum dentist_dispclk_change_mode { struct dp_dto_params { int otg_inst; enum signal_type signal; - long long pixclk_hz; - long long refclk_hz; + uint64_t pixclk_hz; + uint64_t refclk_hz; }; enum pixel_rate_div { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h deleted file mode 100644 index 4b27f29d0d..0000000000 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ -#ifndef __DAL_DSC_H__ -#define __DAL_DSC_H__ - -#include "dc_dsc.h" -#include "dc_hw_types.h" -#include "dc_types.h" -/* do not include any other headers - * or else it might break Edid Utility functionality. - */ - - -/* Input parameters for configuring DSC from the outside of DSC */ -struct dsc_config { - uint32_t pic_width; - uint32_t pic_height; - enum dc_pixel_encoding pixel_encoding; - enum dc_color_depth color_depth; /* Bits per component */ - bool is_odm; - struct dc_dsc_config dc_dsc_cfg; -}; - - -/* Output parameters for configuring DSC-related part of OPTC */ -struct dsc_optc_config { - uint32_t slice_width; /* Slice width in pixels */ - uint32_t bytes_per_pixel; /* Bytes per pixel in u3.28 format */ - bool is_pixel_format_444; /* 'true' if pixel format is 'RGB 444' or 'Simple YCbCr 4:2:2' (4:2:2 upsampled to 4:4:4)' */ -}; - - -struct dcn_dsc_state { - uint32_t dsc_clock_en; - uint32_t dsc_slice_width; - uint32_t dsc_bits_per_pixel; - uint32_t dsc_slice_height; - uint32_t dsc_pic_width; - uint32_t dsc_pic_height; - uint32_t dsc_slice_bpg_offset; - uint32_t dsc_chunk_size; - uint32_t dsc_fw_en; - uint32_t dsc_opp_source; -}; - - -/* DSC encoder capabilities - * They differ from the DPCD DSC caps because they are based on AMD DSC encoder caps. - */ -union dsc_enc_slice_caps { - struct { - uint8_t NUM_SLICES_1 : 1; - uint8_t NUM_SLICES_2 : 1; - uint8_t NUM_SLICES_3 : 1; /* This one is not per DSC spec, but our encoder supports it */ - uint8_t NUM_SLICES_4 : 1; - uint8_t NUM_SLICES_8 : 1; - uint8_t NUM_SLICES_12 : 1; - uint8_t NUM_SLICES_16 : 1; - } bits; - uint8_t raw; -}; - -struct dsc_enc_caps { - uint8_t dsc_version; - union dsc_enc_slice_caps slice_caps; - int32_t lb_bit_depth; - bool is_block_pred_supported; - union dsc_color_formats color_formats; - union dsc_color_depth color_depth; - int32_t max_total_throughput_mps; /* Maximum total throughput with all the slices combined */ - int32_t max_slice_width; - uint32_t bpp_increment_div; /* bpp increment divisor, e.g. if 16, it's 1/16th of a bit */ - uint32_t edp_sink_max_bits_per_pixel; - bool is_dp; -}; - -struct dsc_funcs { - void (*dsc_get_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz); - void (*dsc_read_state)(struct display_stream_compressor *dsc, struct dcn_dsc_state *s); - bool (*dsc_validate_stream)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg); - void (*dsc_set_config)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, - struct dsc_optc_config *dsc_optc_cfg); - bool (*dsc_get_packed_pps)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, - uint8_t *dsc_packed_pps); - void (*dsc_enable)(struct display_stream_compressor *dsc, int opp_pipe); - void (*dsc_disable)(struct display_stream_compressor *dsc); - void (*dsc_disconnect)(struct display_stream_compressor *dsc); -}; - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h index 86b711dcc7..729ca0064e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h @@ -188,6 +188,10 @@ struct dwbc_funcs { bool (*is_enabled)( struct dwbc *dwbc); + void (*set_fc_enable)( + struct dwbc *dwbc, + enum dwb_frame_capture_enable enable); + void (*set_stereo)( struct dwbc *dwbc, struct dwb_stereo_params *stereo_params); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index b95ae9596c..dcae23faee 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -43,6 +43,7 @@ * to be used inside loops and for determining array sizes. */ #define MAX_PIPES 6 +#define MAX_PHANTOM_PIPES (MAX_PIPES / 2) #define MAX_DIG_LINK_ENCODERS 7 #define MAX_DWB_PIPES 1 #define MAX_HPO_DP2_ENCODERS 4 diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h index 7617fabbd1..0717920812 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h @@ -321,6 +321,9 @@ struct opp_funcs { bool (*dpg_is_blanked)( struct output_pixel_processor *opp); + bool (*dpg_is_pending)(struct output_pixel_processor *opp); + + void (*opp_dpg_set_blank_color)( struct output_pixel_processor *opp, const struct tg_color *color); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h index 660897e12a..e97d964a17 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h @@ -40,6 +40,7 @@ struct panel_cntl_backlight_registers { unsigned int BL_PWM_PERIOD_CNTL; unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV; unsigned int PANEL_PWRSEQ_REF_DIV2; + unsigned int USER_LEVEL; }; struct panel_cntl_funcs { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h index b9812afb88..00ea3864dd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h @@ -47,8 +47,6 @@ struct pg_cntl_funcs { void (*optc_pg_control)(struct pg_cntl *pg_cntl, unsigned int optc_inst, bool power_on); void (*dwb_pg_control)(struct pg_cntl *pg_cntl, bool power_on); void (*init_pg_status)(struct pg_cntl *pg_cntl); - - void (*set_force_poweron_domain22)(struct pg_cntl *pg_cntl, bool power_on); }; #endif //__DC_PG_CNTL_H__ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 9a00a99317..cad3e5f148 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -333,6 +333,7 @@ struct timing_generator_funcs { void (*init_odm)(struct timing_generator *tg); void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg); + void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h index d768536814..bf29fc58ea 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link.h @@ -281,11 +281,16 @@ struct link_service { const unsigned int *power_opts); bool (*edp_setup_replay)(struct dc_link *link, const struct dc_stream_state *stream); + bool (*edp_send_replay_cmd)(struct dc_link *link, + enum replay_FW_Message_type msg, + union dmub_replay_cmd_set *cmd_data); bool (*edp_set_coasting_vtotal)( - struct dc_link *link, uint16_t coasting_vtotal); + struct dc_link *link, uint32_t coasting_vtotal); bool (*edp_replay_residency)(const struct dc_link *link, unsigned int *residency, const bool is_start, const bool is_alpm); + bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link, + const unsigned int *power_opts, uint32_t coasting_vtotal); bool (*edp_wait_for_t12)(struct dc_link *link); bool (*edp_is_ilr_optimization_required)(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 3d72443938..77a60aa9f2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -496,6 +496,18 @@ int recource_find_free_pipe_not_used_in_cur_res_ctx( struct resource_context *new_res_ctx, const struct resource_pool *pool); +/* + * Look for a free pipe in new resource context that is used in current resource + * context as an OTG master pipe. + * + * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise + * pipe idx of the free pipe + */ +int recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool); + /* * Look for a free pipe in new resource context that is used as a secondary DPP * pipe in any MPCC combine in current resource context. @@ -557,9 +569,6 @@ void update_audio_usage( unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format); -void get_audio_check(struct audio_info *aud_modes, - struct audio_check *aud_chk); - bool get_temp_dp_link_res(struct dc_link *link, struct link_resource *link_res, struct dc_link_settings *link_settings); @@ -606,5 +615,4 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc, struct pipe_ctx *pipe_ctx); bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream); - #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 007ee32c20..3cbfbf8d10 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -1279,86 +1279,6 @@ static void remove_stream_from_alloc_table( } } -static enum dc_status deallocate_mst_payload_with_temp_drm_wa( - struct pipe_ctx *pipe_ctx) -{ - struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->link; - struct dc_dp_mst_stream_allocation_table proposed_table = {0}; - struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); - int i; - bool mst_mode = (link->type == dc_connection_mst_branch); - /* adjust for drm changes*/ - const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); - const struct dc_link_settings empty_link_settings = {0}; - DC_LOGGER_INIT(link->ctx->logger); - - if (link_hwss->ext.set_throttled_vcp_size) - link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); - if (link_hwss->ext.set_hblank_min_symbol_width) - link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, - &empty_link_settings, - avg_time_slots_per_mtp); - - if (dm_helpers_dp_mst_write_payload_allocation_table( - stream->ctx, - stream, - &proposed_table, - false)) - update_mst_stream_alloc_table( - link, - pipe_ctx->stream_res.stream_enc, - pipe_ctx->stream_res.hpo_dp_stream_enc, - &proposed_table); - else - DC_LOG_WARNING("Failed to update" - "MST allocation table for" - "pipe idx:%d\n", - pipe_ctx->pipe_idx); - - DC_LOG_MST("%s" - "stream_count: %d: ", - __func__, - link->mst_stream_alloc_table.stream_count); - - for (i = 0; i < MAX_CONTROLLER_NUM; i++) { - DC_LOG_MST("stream_enc[%d]: %p " - "stream[%d].hpo_dp_stream_enc: %p " - "stream[%d].vcp_id: %d " - "stream[%d].slot_count: %d\n", - i, - (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, - i, - (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, - i, - link->mst_stream_alloc_table.stream_allocations[i].vcp_id, - i, - link->mst_stream_alloc_table.stream_allocations[i].slot_count); - } - - if (link_hwss->ext.update_stream_allocation_table == NULL || - link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { - DC_LOG_DEBUG("Unknown encoding format\n"); - return DC_ERROR_UNEXPECTED; - } - - link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, - &link->mst_stream_alloc_table); - - if (mst_mode) { - dm_helpers_dp_mst_poll_for_allocation_change_trigger( - stream->ctx, - stream); - } - - dm_helpers_dp_mst_send_payload_allocation( - stream->ctx, - stream, - false); - - return DC_OK; -} - static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; @@ -1371,9 +1291,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) const struct dc_link_settings empty_link_settings = {0}; DC_LOGGER_INIT(link->ctx->logger); - if (link->dc->debug.temp_mst_deallocation_sequence) - return deallocate_mst_payload_with_temp_drm_wa(pipe_ctx); - /* deallocate_mst_payload is called before disable link. When mode or * disable/enable monitor, new stream is created which is not in link * stream[] yet. For this, payload is not allocated yet, so de-alloc @@ -1446,16 +1363,14 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, &link->mst_stream_alloc_table); - if (mst_mode) { + if (mst_mode) dm_helpers_dp_mst_poll_for_allocation_change_trigger( stream->ctx, stream); - dm_helpers_dp_mst_send_payload_allocation( - stream->ctx, - stream, - false); - } + dm_helpers_dp_mst_update_mst_mgr_for_deallocation( + stream->ctx, + stream); return DC_OK; } @@ -1536,12 +1451,10 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) stream->ctx, stream); - if (ret != ACT_LINK_LOST) { + if (ret != ACT_LINK_LOST) dm_helpers_dp_mst_send_payload_allocation( stream->ctx, - stream, - true); - } + stream); /* slot X.Y for only current stream */ pbn_per_slot = get_pbn_per_slot(stream); @@ -1801,8 +1714,7 @@ enum dc_status link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in /* send ALLOCATE_PAYLOAD sideband message with updated pbn */ dm_helpers_dp_mst_send_payload_allocation( stream->ctx, - stream, - true); + stream); /* notify immediate branch device table update */ if (dm_helpers_dp_mst_write_payload_allocation_table( @@ -1931,8 +1843,7 @@ enum dc_status link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_ /* send ALLOCATE_PAYLOAD sideband message with updated pbn */ dm_helpers_dp_mst_send_payload_allocation( stream->ctx, - stream, - true); + stream); } /* increase throttled vcp size */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c index 41f8230f94..cf22b8f28b 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -213,8 +213,10 @@ static void construct_link_service_edp_panel_control(struct link_service *link_s link_srv->edp_get_replay_state = edp_get_replay_state; link_srv->edp_set_replay_allow_active = edp_set_replay_allow_active; link_srv->edp_setup_replay = edp_setup_replay; + link_srv->edp_send_replay_cmd = edp_send_replay_cmd; link_srv->edp_set_coasting_vtotal = edp_set_coasting_vtotal; link_srv->edp_replay_residency = edp_replay_residency; + link_srv->edp_set_replay_power_opt_and_coasting_vtotal = edp_set_replay_power_opt_and_coasting_vtotal; link_srv->edp_wait_for_t12 = edp_wait_for_t12; link_srv->edp_is_ilr_optimization_required = diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h index 4a954317d0..595fb05946 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h @@ -25,6 +25,7 @@ #ifndef __LINK_VALIDATION_H__ #define __LINK_VALIDATION_H__ #include "link.h" + enum dc_status link_validate_mode_timing( const struct dc_stream_state *stream, struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index 2f11eaabbe..289f5d1333 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -412,12 +412,18 @@ static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link) { enum dc_link_rate cable_max_link_rate = LINK_RATE_UNKNOWN; - if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20) + if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20) { cable_max_link_rate = LINK_RATE_UHBR20; - else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY) + } else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY) { cable_max_link_rate = LINK_RATE_UHBR13_5; - else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10) - cable_max_link_rate = LINK_RATE_UHBR10; + } else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10) { + // allow DP40 cables to do UHBR13.5 for passive or unknown cable type + if (link->dpcd_caps.cable_id.bits.CABLE_TYPE < 2) { + cable_max_link_rate = LINK_RATE_UHBR13_5; + } else { + cable_max_link_rate = LINK_RATE_UHBR10; + } + } return cable_max_link_rate; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c index 982eda3c46..6af42ba988 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c @@ -82,25 +82,33 @@ bool dpia_query_hpd_status(struct dc_link *link) { union dmub_rb_cmd cmd = {0}; struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv; - bool is_hpd_high = false; /* prepare QUERY_HPD command */ cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE; cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1; cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA; - /* Return HPD status reported by DMUB if query successfully executed. */ - if (dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && - cmd.query_hpd.data.status == AUX_RET_SUCCESS) - is_hpd_high = cmd.query_hpd.data.result; - - DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n", - __func__, - link->link_index, - link->link_id.enum_id - ENUM_ID_1, - cmd.query_hpd.data.status, - cmd.query_hpd.data.result); - - return is_hpd_high; + /* Query dpia hpd status from dmub */ + if (dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, + DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && + cmd.query_hpd.data.status == AUX_RET_SUCCESS) { + DC_LOG_DEBUG("%s: for link(%d) dpia(%d) success, current_hpd_status(%d) new_hpd_status(%d)\n", + __func__, + link->link_index, + link->link_id.enum_id - ENUM_ID_1, + link->hpd_status, + cmd.query_hpd.data.result); + link->hpd_status = cmd.query_hpd.data.result; + } else { + DC_LOG_ERROR("%s: for link(%d) dpia(%d) failed with status(%d), current_hpd_status(%d) new_hpd_status(0)\n", + __func__, + link->link_index, + link->link_id.enum_id - ENUM_ID_1, + cmd.query_hpd.data.status, + link->hpd_status); + link->hpd_status = false; + } + + return link->hpd_status; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c index 9eadc2c7f2..ba69874be5 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c @@ -265,7 +265,7 @@ void dp_handle_link_loss(struct dc_link *link) for (i = count - 1; i >= 0; i--) { // Always use max settings here for DP 1.4a LL Compliance CTS - if (link->is_automated) { + if (link->skip_fallback_on_link_loss) { pipes[i]->link_config.dp_link_settings.lane_count = link->verified_link_cap.lane_count; pipes[i]->link_config.dp_link_settings.link_rate = @@ -404,7 +404,9 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link, if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { // Workaround for DP 1.4a LL Compliance CTS as USB4 has to share encoders unlike DP and USBC - link->is_automated = true; + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + link->skip_fallback_on_link_loss = true; + device_service_clear.bits.AUTOMATED_TEST = 1; core_link_write_dpcd( link, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c index 4f4e899e5c..5d36bab002 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c @@ -619,7 +619,7 @@ static enum link_training_result dpia_training_eq_non_transparent( uint32_t retries_eq = 0; enum dc_status status; enum dc_dp_training_pattern tr_pattern; - uint32_t wait_time_microsec; + uint32_t wait_time_microsec = 0; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_align_status_updated dpcd_lane_status_updated = {0}; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; @@ -811,7 +811,7 @@ static enum link_training_result dpia_training_eq_transparent( /* Take into consideration corner case for DP 1.4a LL Compliance CTS as USB4 * has to share encoders unlike DP and USBC */ - if (dp_is_interlane_aligned(dpcd_lane_status_updated) || (link->is_automated && retries_eq)) { + if (dp_is_interlane_aligned(dpcd_lane_status_updated) || (link->skip_fallback_on_link_loss && retries_eq)) { result = LINK_TRAINING_SUCCESS; break; } @@ -1037,7 +1037,7 @@ enum link_training_result dpia_perform_link_training( */ if (result == LINK_TRAINING_SUCCESS) { fsleep(5000); - if (!link->is_automated) + if (!link->skip_fallback_on_link_loss) result = dp_check_link_loss_status(link, <_settings); } else if (result == LINK_TRAINING_ABORT) dpia_training_abort(link, <_settings, repeater_id); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c index 68096d12f5..7087cdc9e9 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c @@ -205,6 +205,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; + const uint8_t vendor_lttpr_write_data_dpmf[4] = {0x1, 0x6, 0x70, 0x87}; enum link_training_result status = LINK_TRAINING_SUCCESS; uint8_t lane = 0; union down_spread_ctrl downspread = {0}; @@ -293,6 +294,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( DP_DOWNSPREAD_CTRL, lt_settings->link_settings.link_spread); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_dpmf[0], + sizeof(vendor_lttpr_write_data_dpmf)); + if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) { link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1)); @@ -552,6 +557,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; + const uint8_t vendor_lttpr_write_data_dpmf[4] = {0x1, 0x6, 0x70, 0x87}; enum link_training_result status = LINK_TRAINING_SUCCESS; uint8_t lane = 0; union down_spread_ctrl downspread = {0}; @@ -639,6 +645,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence( DP_DOWNSPREAD_CTRL, lt_settings->link_settings.link_spread); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_dpmf[0], + sizeof(vendor_lttpr_write_data_dpmf)); + if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) { link_configure_fixed_vs_pe_retimer(link->ddc, &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1)); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index 7c4a93d3cd..d01b77fb98 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -529,6 +529,9 @@ bool edp_set_backlight_level(const struct dc_link *link, if (dc_is_embedded_signal(link->connector_signal)) { struct pipe_ctx *pipe_ctx = get_pipe_from_link(link); + if (link->panel_cntl) + link->panel_cntl->stored_backlight_registers.USER_LEVEL = backlight_pwm_u16_16; + if (pipe_ctx) { /* Disable brightness ramping when the display is blanked * as it can hang the DMCU @@ -1001,7 +1004,37 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream return true; } -bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal) +/* + * This is general Interface for Replay to set an 32 bit variable to dmub + * replay_FW_Message_type: Indicates which instruction or variable pass to DMUB + * cmd_data: Value of the config. + */ +bool edp_send_replay_cmd(struct dc_link *link, + enum replay_FW_Message_type msg, + union dmub_replay_cmd_set *cmd_data) +{ + struct dc *dc = link->ctx->dc; + struct dmub_replay *replay = dc->res_pool->replay; + unsigned int panel_inst; + + if (!replay) + return false; + + DC_LOGGER_INIT(link->ctx->logger); + + if (dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + cmd_data->panel_inst = panel_inst; + else { + DC_LOG_DC("%s(): get edp panel inst fail ", __func__); + return false; + } + + replay->funcs->replay_send_cmd(replay, msg, cmd_data); + + return true; +} + +bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; @@ -1039,6 +1072,33 @@ bool edp_replay_residency(const struct dc_link *link, return true; } +bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, + const unsigned int *power_opts, uint32_t coasting_vtotal) +{ + struct dc *dc = link->ctx->dc; + struct dmub_replay *replay = dc->res_pool->replay; + unsigned int panel_inst; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + /* Only both power and coasting vtotal changed, this func could return true */ + if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts && + coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) { + if (link->replay_settings.replay_feature_enabled && + replay->funcs->replay_set_power_opt_and_coasting_vtotal) { + replay->funcs->replay_set_power_opt_and_coasting_vtotal(replay, + *power_opts, panel_inst, coasting_vtotal); + link->replay_settings.replay_power_opt_active = *power_opts; + link->replay_settings.coasting_vtotal = coasting_vtotal; + } else + return false; + } else + return false; + + return true; +} + static struct abm *get_abm_from_stream_res(const struct dc_link *link) { int i; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index a034288ad7..a158c6234d 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -56,10 +56,15 @@ bool edp_set_replay_allow_active(struct dc_link *dc_link, const bool *enable, bool wait, bool force_static, const unsigned int *power_opts); bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream); -bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal); +bool edp_send_replay_cmd(struct dc_link *link, + enum replay_FW_Message_type msg, + union dmub_replay_cmd_set *cmd_data); +bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal); bool edp_replay_residency(const struct dc_link *link, unsigned int *residency, const bool is_start, const bool is_alpm); bool edp_get_replay_state(const struct dc_link *link, uint64_t *state); +bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, + const unsigned int *power_opts, uint32_t coasting_vtotal); bool edp_wait_for_t12(struct dc_link *link); bool edp_is_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing); diff --git a/drivers/gpu/drm/amd/display/dc/optc/Makefile b/drivers/gpu/drm/amd/display/dc/optc/Makefile new file mode 100644 index 0000000000..bb213335fb --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/optc/Makefile @@ -0,0 +1,108 @@ + +# Copyright 2022 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# Makefile for the 'optc' sub-component of DAL. +# + + +ifdef CONFIG_DRM_AMD_DC_FP +############################################################################### +# DCN +############################################################################### + +OPTC_DCN10 = dcn10_optc.o + +AMD_DAL_OPTC_DCN10 = $(addprefix $(AMDDALPATH)/dc/optc/dcn10/,$(OPTC_DCN10)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN10) + +############################################################################### + +OPTC_DCN20 = dcn20_optc.o + +AMD_DAL_OPTC_DCN20 = $(addprefix $(AMDDALPATH)/dc/optc/dcn20/,$(OPTC_DCN20)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN20) + +############################################################################### + +OPTC_DCN201 = dcn201_optc.o + +AMD_DAL_OPTC_DCN201 = $(addprefix $(AMDDALPATH)/dc/optc/dcn201/,$(OPTC_DCN201)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN201) + +############################################################################### + +############################################################################### + +############################################################################### + +OPTC_DCN30 = dcn30_optc.o + +AMD_DAL_OPTC_DCN30 = $(addprefix $(AMDDALPATH)/dc/optc/dcn30/,$(OPTC_DCN30)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN30) + +############################################################################### + +OPTC_DCN301 = dcn301_optc.o + +AMD_DAL_OPTC_DCN301 = $(addprefix $(AMDDALPATH)/dc/optc/dcn301/,$(OPTC_DCN301)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN301) + +############################################################################### + +OPTC_DCN31 = dcn31_optc.o + +AMD_DAL_OPTC_DCN31 = $(addprefix $(AMDDALPATH)/dc/optc/dcn31/,$(OPTC_DCN31)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN31) + +############################################################################### + +OPTC_DCN314 = dcn314_optc.o + +AMD_DAL_OPTC_DCN314 = $(addprefix $(AMDDALPATH)/dc/optc/dcn314/,$(OPTC_DCN314)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN314) + +############################################################################### + +OPTC_DCN32 = dcn32_optc.o + +AMD_DAL_OPTC_DCN32 = $(addprefix $(AMDDALPATH)/dc/optc/dcn32/,$(OPTC_DCN32)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN32) + +############################################################################### + +OPTC_DCN35 = dcn35_optc.o + +AMD_DAL_OPTC_DCN35 = $(addprefix $(AMDDALPATH)/dc/optc/dcn35/,$(OPTC_DCN35)) + +AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN35) + +############################################################################### + +############################################################################### +endif diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c new file mode 100644 index 0000000000..0e8f4f36c8 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c @@ -0,0 +1,1621 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "reg_helper.h" +#include "dcn10_optc.h" +#include "dc.h" +#include "dc_trace.h" + +#define REG(reg)\ + optc1->tg_regs->reg + +#define CTX \ + optc1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + optc1->tg_shift->field_name, optc1->tg_mask->field_name + +#define STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN 0x100 + +/** + * apply_front_porch_workaround() - This is a workaround for a bug that has + * existed since R5xx and has not been fixed + * keep Front porch at minimum 2 for Interlaced + * mode or 1 for progressive. + * + * @timing: Timing parameters used to configure DCN blocks. + */ +static void apply_front_porch_workaround(struct dc_crtc_timing *timing) +{ + if (timing->flags.INTERLACE == 1) { + if (timing->v_front_porch < 2) + timing->v_front_porch = 2; + } else { + if (timing->v_front_porch < 1) + timing->v_front_porch = 1; + } +} + +void optc1_program_global_sync( + struct timing_generator *optc, + int vready_offset, + int vstartup_start, + int vupdate_offset, + int vupdate_width) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + optc1->vready_offset = vready_offset; + optc1->vstartup_start = vstartup_start; + optc1->vupdate_offset = vupdate_offset; + optc1->vupdate_width = vupdate_width; + + if (optc1->vstartup_start == 0) { + BREAK_TO_DEBUGGER(); + return; + } + + REG_SET(OTG_VSTARTUP_PARAM, 0, + VSTARTUP_START, optc1->vstartup_start); + + REG_SET_2(OTG_VUPDATE_PARAM, 0, + VUPDATE_OFFSET, optc1->vupdate_offset, + VUPDATE_WIDTH, optc1->vupdate_width); + + REG_SET(OTG_VREADY_PARAM, 0, + VREADY_OFFSET, optc1->vready_offset); +} + +static void optc1_disable_stereo(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET(OTG_STEREO_CONTROL, 0, + OTG_STEREO_EN, 0); + + REG_SET_2(OTG_3D_STRUCTURE_CONTROL, 0, + OTG_3D_STRUCTURE_EN, 0, + OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0); +} + +void optc1_setup_vertical_interrupt0( + struct timing_generator *optc, + uint32_t start_line, + uint32_t end_line) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET_2(OTG_VERTICAL_INTERRUPT0_POSITION, 0, + OTG_VERTICAL_INTERRUPT0_LINE_START, start_line, + OTG_VERTICAL_INTERRUPT0_LINE_END, end_line); +} + +void optc1_setup_vertical_interrupt1( + struct timing_generator *optc, + uint32_t start_line) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET(OTG_VERTICAL_INTERRUPT1_POSITION, 0, + OTG_VERTICAL_INTERRUPT1_LINE_START, start_line); +} + +void optc1_setup_vertical_interrupt2( + struct timing_generator *optc, + uint32_t start_line) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0, + OTG_VERTICAL_INTERRUPT2_LINE_START, start_line); +} + +/** + * optc1_program_timing() - used by mode timing set Program + * CRTC Timing Registers - OTG_H_*, + * OTG_V_*, Pixel repetition. + * Including SYNC. Call BIOS command table to program Timings. + * + * @optc: timing_generator instance. + * @dc_crtc_timing: Timing parameters used to configure DCN blocks. + * @vready_offset: Vready's starting position. + * @vstartup_start: Vstartup period. + * @vupdate_offset: Vupdate starting position. + * @vupdate_width: Vupdate duration. + * @signal: DC signal types. + * @use_vbios: to program timings from BIOS command table. + * + */ +void optc1_program_timing( + struct timing_generator *optc, + const struct dc_crtc_timing *dc_crtc_timing, + int vready_offset, + int vstartup_start, + int vupdate_offset, + int vupdate_width, + const enum signal_type signal, + bool use_vbios) +{ + struct dc_crtc_timing patched_crtc_timing; + uint32_t asic_blank_end; + uint32_t asic_blank_start; + uint32_t v_total; + uint32_t v_sync_end; + uint32_t h_sync_polarity, v_sync_polarity; + uint32_t start_point = 0; + uint32_t field_num = 0; + enum h_timing_div_mode h_div = H_TIMING_NO_DIV; + + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + optc1->signal = signal; + optc1->vready_offset = vready_offset; + optc1->vstartup_start = vstartup_start; + optc1->vupdate_offset = vupdate_offset; + optc1->vupdate_width = vupdate_width; + patched_crtc_timing = *dc_crtc_timing; + apply_front_porch_workaround(&patched_crtc_timing); + optc1->orginal_patched_timing = patched_crtc_timing; + + /* Load horizontal timing */ + + /* CRTC_H_TOTAL = vesa.h_total - 1 */ + REG_SET(OTG_H_TOTAL, 0, + OTG_H_TOTAL, patched_crtc_timing.h_total - 1); + + /* h_sync_start = 0, h_sync_end = vesa.h_sync_width */ + REG_UPDATE_2(OTG_H_SYNC_A, + OTG_H_SYNC_A_START, 0, + OTG_H_SYNC_A_END, patched_crtc_timing.h_sync_width); + + /* blank_start = line end - front porch */ + asic_blank_start = patched_crtc_timing.h_total - + patched_crtc_timing.h_front_porch; + + /* blank_end = blank_start - active */ + asic_blank_end = asic_blank_start - + patched_crtc_timing.h_border_right - + patched_crtc_timing.h_addressable - + patched_crtc_timing.h_border_left; + + REG_UPDATE_2(OTG_H_BLANK_START_END, + OTG_H_BLANK_START, asic_blank_start, + OTG_H_BLANK_END, asic_blank_end); + + /* h_sync polarity */ + h_sync_polarity = patched_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? + 0 : 1; + + REG_UPDATE(OTG_H_SYNC_A_CNTL, + OTG_H_SYNC_A_POL, h_sync_polarity); + + v_total = patched_crtc_timing.v_total - 1; + + REG_SET(OTG_V_TOTAL, 0, + OTG_V_TOTAL, v_total); + + /* In case of V_TOTAL_CONTROL is on, make sure OTG_V_TOTAL_MAX and + * OTG_V_TOTAL_MIN are equal to V_TOTAL. + */ + optc->funcs->set_vtotal_min_max(optc, v_total, v_total); + + /* v_sync_start = 0, v_sync_end = v_sync_width */ + v_sync_end = patched_crtc_timing.v_sync_width; + + REG_UPDATE_2(OTG_V_SYNC_A, + OTG_V_SYNC_A_START, 0, + OTG_V_SYNC_A_END, v_sync_end); + + /* blank_start = frame end - front porch */ + asic_blank_start = patched_crtc_timing.v_total - + patched_crtc_timing.v_front_porch; + + /* blank_end = blank_start - active */ + asic_blank_end = asic_blank_start - + patched_crtc_timing.v_border_bottom - + patched_crtc_timing.v_addressable - + patched_crtc_timing.v_border_top; + + REG_UPDATE_2(OTG_V_BLANK_START_END, + OTG_V_BLANK_START, asic_blank_start, + OTG_V_BLANK_END, asic_blank_end); + + /* v_sync polarity */ + v_sync_polarity = patched_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? + 0 : 1; + + REG_UPDATE(OTG_V_SYNC_A_CNTL, + OTG_V_SYNC_A_POL, v_sync_polarity); + + if (optc1->signal == SIGNAL_TYPE_DISPLAY_PORT || + optc1->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || + optc1->signal == SIGNAL_TYPE_EDP) { + start_point = 1; + if (patched_crtc_timing.flags.INTERLACE == 1) + field_num = 1; + } + + /* Interlace */ + if (REG(OTG_INTERLACE_CONTROL)) { + if (patched_crtc_timing.flags.INTERLACE == 1) + REG_UPDATE(OTG_INTERLACE_CONTROL, + OTG_INTERLACE_ENABLE, 1); + else + REG_UPDATE(OTG_INTERLACE_CONTROL, + OTG_INTERLACE_ENABLE, 0); + } + + /* VTG enable set to 0 first VInit */ + REG_UPDATE(CONTROL, + VTG0_ENABLE, 0); + + /* original code is using VTG offset to address OTG reg, seems wrong */ + REG_UPDATE_2(OTG_CONTROL, + OTG_START_POINT_CNTL, start_point, + OTG_FIELD_NUMBER_CNTL, field_num); + + optc->funcs->program_global_sync(optc, + vready_offset, + vstartup_start, + vupdate_offset, + vupdate_width); + + optc->funcs->set_vtg_params(optc, dc_crtc_timing, true); + + /* TODO + * patched_crtc_timing.flags.HORZ_COUNT_BY_TWO == 1 + * program_horz_count_by_2 + * for DVI 30bpp mode, 0 otherwise + * program_horz_count_by_2(optc, &patched_crtc_timing); + */ + + /* Enable stereo - only when we need to pack 3D frame. Other types + * of stereo handled in explicit call + */ + + if (optc1_is_two_pixels_per_containter(&patched_crtc_timing) || optc1->opp_count == 2) + h_div = H_TIMING_DIV_BY2; + + if (REG(OPTC_DATA_FORMAT_CONTROL) && optc1->tg_mask->OPTC_DATA_FORMAT != 0) { + uint32_t data_fmt = 0; + + if (patched_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) + data_fmt = 1; + else if (patched_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) + data_fmt = 2; + + REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt); + } + + if (optc1->tg_mask->OTG_H_TIMING_DIV_MODE != 0) { + if (optc1->opp_count == 4) + h_div = H_TIMING_DIV_BY4; + + REG_UPDATE(OTG_H_TIMING_CNTL, + OTG_H_TIMING_DIV_MODE, h_div); + } else { + REG_UPDATE(OTG_H_TIMING_CNTL, + OTG_H_TIMING_DIV_BY2, h_div); + } +} + +/** + * optc1_set_vtg_params - Set Vertical Timing Generator (VTG) parameters + * + * @optc: timing_generator struct used to extract the optc parameters + * @dc_crtc_timing: Timing parameters configured + * @program_fp2: Boolean value indicating if FP2 will be programmed or not + * + * OTG is responsible for generating the global sync signals, including + * vertical timing information for each HUBP in the dcfclk domain. Each VTG is + * associated with one OTG that provides HUBP with vertical timing information + * (i.e., there is 1:1 correspondence between OTG and VTG). This function is + * responsible for setting the OTG parameters to the VTG during the pipe + * programming. + */ +void optc1_set_vtg_params(struct timing_generator *optc, + const struct dc_crtc_timing *dc_crtc_timing, bool program_fp2) +{ + struct dc_crtc_timing patched_crtc_timing; + uint32_t asic_blank_end; + uint32_t v_init; + uint32_t v_fp2 = 0; + int32_t vertical_line_start; + + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + patched_crtc_timing = *dc_crtc_timing; + apply_front_porch_workaround(&patched_crtc_timing); + + /* VCOUNT_INIT is the start of blank */ + v_init = patched_crtc_timing.v_total - patched_crtc_timing.v_front_porch; + + /* end of blank = v_init - active */ + asic_blank_end = v_init - + patched_crtc_timing.v_border_bottom - + patched_crtc_timing.v_addressable - + patched_crtc_timing.v_border_top; + + /* if VSTARTUP is before VSYNC, FP2 is the offset, otherwise 0 */ + vertical_line_start = asic_blank_end - optc1->vstartup_start + 1; + if (vertical_line_start < 0) + v_fp2 = -vertical_line_start; + + /* Interlace */ + if (REG(OTG_INTERLACE_CONTROL)) { + if (patched_crtc_timing.flags.INTERLACE == 1) { + v_init = v_init / 2; + if ((optc1->vstartup_start/2)*2 > asic_blank_end) + v_fp2 = v_fp2 / 2; + } + } + + if (program_fp2) + REG_UPDATE_2(CONTROL, + VTG0_FP2, v_fp2, + VTG0_VCOUNT_INIT, v_init); + else + REG_UPDATE(CONTROL, VTG0_VCOUNT_INIT, v_init); +} + +void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + uint32_t blank_data_double_buffer_enable = enable ? 1 : 0; + + REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL, + OTG_BLANK_DATA_DOUBLE_BUFFER_EN, blank_data_double_buffer_enable); +} + +/** + * optc1_set_timing_double_buffer() - DRR double buffering control + * + * Sets double buffer point for V_TOTAL, H_TOTAL, VTOTAL_MIN, + * VTOTAL_MAX, VTOTAL_MIN_SEL and VTOTAL_MAX_SEL registers. + * + * @optc: timing_generator instance. + * @enable: Enable DRR double buffering control if true, disable otherwise. + * + * Options: any time, start of frame, dp start of frame (range timing) + */ +void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t mode = enable ? 2 : 0; + + REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL, + OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mode); +} + +/** + * optc1_unblank_crtc() - Call ASIC Control Object to UnBlank CRTC. + * + * @optc: timing_generator instance. + */ +static void optc1_unblank_crtc(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_UPDATE_2(OTG_BLANK_CONTROL, + OTG_BLANK_DATA_EN, 0, + OTG_BLANK_DE_MODE, 0); + + /* W/A for automated testing + * Automated testing will fail underflow test as there + * sporadic underflows which occur during the optc blank + * sequence. As a w/a, clear underflow on unblank. + * This prevents the failure, but will not mask actual + * underflow that affect real use cases. + */ + optc1_clear_optc_underflow(optc); +} + +/** + * optc1_blank_crtc() - Call ASIC Control Object to Blank CRTC. + * + * @optc: timing_generator instance. + */ + +static void optc1_blank_crtc(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_UPDATE_2(OTG_BLANK_CONTROL, + OTG_BLANK_DATA_EN, 1, + OTG_BLANK_DE_MODE, 0); + + optc1_set_blank_data_double_buffer(optc, false); +} + +void optc1_set_blank(struct timing_generator *optc, + bool enable_blanking) +{ + if (enable_blanking) + optc1_blank_crtc(optc); + else + optc1_unblank_crtc(optc); +} + +bool optc1_is_blanked(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t blank_en; + uint32_t blank_state; + + REG_GET_2(OTG_BLANK_CONTROL, + OTG_BLANK_DATA_EN, &blank_en, + OTG_CURRENT_BLANK_STATE, &blank_state); + + return blank_en && blank_state; +} + +void optc1_enable_optc_clock(struct timing_generator *optc, bool enable) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + if (enable) { + REG_UPDATE_2(OPTC_INPUT_CLOCK_CONTROL, + OPTC_INPUT_CLK_EN, 1, + OPTC_INPUT_CLK_GATE_DIS, 1); + + REG_WAIT(OPTC_INPUT_CLOCK_CONTROL, + OPTC_INPUT_CLK_ON, 1, + 1, 1000); + + /* Enable clock */ + REG_UPDATE_2(OTG_CLOCK_CONTROL, + OTG_CLOCK_EN, 1, + OTG_CLOCK_GATE_DIS, 1); + REG_WAIT(OTG_CLOCK_CONTROL, + OTG_CLOCK_ON, 1, + 1, 1000); + } else { + + //last chance to clear underflow, otherwise, it will always there due to clock is off. + if (optc->funcs->is_optc_underflow_occurred(optc) == true) + optc->funcs->clear_optc_underflow(optc); + + REG_UPDATE_2(OTG_CLOCK_CONTROL, + OTG_CLOCK_GATE_DIS, 0, + OTG_CLOCK_EN, 0); + + REG_UPDATE_2(OPTC_INPUT_CLOCK_CONTROL, + OPTC_INPUT_CLK_GATE_DIS, 0, + OPTC_INPUT_CLK_EN, 0); + } +} + +/** + * optc1_enable_crtc() - Enable CRTC - call ASIC Control Object to enable Timing generator. + * + * @optc: timing_generator instance. + */ +static bool optc1_enable_crtc(struct timing_generator *optc) +{ + /* TODO FPGA wait for answer + * OTG_MASTER_UPDATE_MODE != CRTC_MASTER_UPDATE_MODE + * OTG_MASTER_UPDATE_LOCK != CRTC_MASTER_UPDATE_LOCK + */ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + /* opp instance for OTG. For DCN1.0, ODM is remoed. + * OPP and OPTC should 1:1 mapping + */ + REG_UPDATE(OPTC_DATA_SOURCE_SELECT, + OPTC_SRC_SEL, optc->inst); + + /* VTG enable first is for HW workaround */ + REG_UPDATE(CONTROL, + VTG0_ENABLE, 1); + + REG_SEQ_START(); + + /* Enable CRTC */ + REG_UPDATE_2(OTG_CONTROL, + OTG_DISABLE_POINT_CNTL, 3, + OTG_MASTER_EN, 1); + + REG_SEQ_SUBMIT(); + REG_SEQ_WAIT_DONE(); + + return true; +} + +/* disable_crtc - call ASIC Control Object to disable Timing generator. */ +bool optc1_disable_crtc(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + /* disable otg request until end of the first line + * in the vertical blank region + */ + REG_UPDATE_2(OTG_CONTROL, + OTG_DISABLE_POINT_CNTL, 3, + OTG_MASTER_EN, 0); + + REG_UPDATE(CONTROL, + VTG0_ENABLE, 0); + + /* CRTC disabled, so disable clock. */ + REG_WAIT(OTG_CLOCK_CONTROL, + OTG_BUSY, 0, + 1, 100000); + + return true; +} + + +void optc1_program_blank_color( + struct timing_generator *optc, + const struct tg_color *black_color) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET_3(OTG_BLACK_COLOR, 0, + OTG_BLACK_COLOR_B_CB, black_color->color_b_cb, + OTG_BLACK_COLOR_G_Y, black_color->color_g_y, + OTG_BLACK_COLOR_R_CR, black_color->color_r_cr); +} + +bool optc1_validate_timing( + struct timing_generator *optc, + const struct dc_crtc_timing *timing) +{ + uint32_t v_blank; + uint32_t h_blank; + uint32_t min_v_blank; + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + ASSERT(timing != NULL); + + v_blank = (timing->v_total - timing->v_addressable - + timing->v_border_top - timing->v_border_bottom); + + h_blank = (timing->h_total - timing->h_addressable - + timing->h_border_right - + timing->h_border_left); + + if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE && + timing->timing_3d_format != TIMING_3D_FORMAT_HW_FRAME_PACKING && + timing->timing_3d_format != TIMING_3D_FORMAT_TOP_AND_BOTTOM && + timing->timing_3d_format != TIMING_3D_FORMAT_SIDE_BY_SIDE && + timing->timing_3d_format != TIMING_3D_FORMAT_FRAME_ALTERNATE && + timing->timing_3d_format != TIMING_3D_FORMAT_INBAND_FA) + return false; + + /* Temporarily blocking interlacing mode until it's supported */ + if (timing->flags.INTERLACE == 1) + return false; + + /* Check maximum number of pixels supported by Timing Generator + * (Currently will never fail, in order to fail needs display which + * needs more than 8192 horizontal and + * more than 8192 vertical total pixels) + */ + if (timing->h_total > optc1->max_h_total || + timing->v_total > optc1->max_v_total) + return false; + + + if (h_blank < optc1->min_h_blank) + return false; + + if (timing->h_sync_width < optc1->min_h_sync_width || + timing->v_sync_width < optc1->min_v_sync_width) + return false; + + min_v_blank = timing->flags.INTERLACE?optc1->min_v_blank_interlace:optc1->min_v_blank; + + if (v_blank < min_v_blank) + return false; + + return true; + +} + +/* + * get_vblank_counter + * + * @brief + * Get counter for vertical blanks. use register CRTC_STATUS_FRAME_COUNT which + * holds the counter of frames. + * + * @param + * struct timing_generator *optc - [in] timing generator which controls the + * desired CRTC + * + * @return + * Counter of frames, which should equal to number of vblanks. + */ +uint32_t optc1_get_vblank_counter(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t frame_count; + + REG_GET(OTG_STATUS_FRAME_COUNT, + OTG_FRAME_COUNT, &frame_count); + + return frame_count; +} + +void optc1_lock(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET(OTG_GLOBAL_CONTROL0, 0, + OTG_MASTER_UPDATE_LOCK_SEL, optc->inst); + REG_SET(OTG_MASTER_UPDATE_LOCK, 0, + OTG_MASTER_UPDATE_LOCK, 1); + + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, 1, + 1, 10); + + TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true); +} + +void optc1_unlock(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET(OTG_MASTER_UPDATE_LOCK, 0, + OTG_MASTER_UPDATE_LOCK, 0); + + TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, false); +} + +void optc1_get_position(struct timing_generator *optc, + struct crtc_position *position) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_GET_2(OTG_STATUS_POSITION, + OTG_HORZ_COUNT, &position->horizontal_count, + OTG_VERT_COUNT, &position->vertical_count); + + REG_GET(OTG_NOM_VERT_POSITION, + OTG_VERT_COUNT_NOM, &position->nominal_vcount); +} + +bool optc1_is_counter_moving(struct timing_generator *optc) +{ + struct crtc_position position1, position2; + + optc->funcs->get_position(optc, &position1); + optc->funcs->get_position(optc, &position2); + + if (position1.horizontal_count == position2.horizontal_count && + position1.vertical_count == position2.vertical_count) + return false; + else + return true; +} + +bool optc1_did_triggered_reset_occur( + struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t occurred_force, occurred_vsync; + + REG_GET(OTG_FORCE_COUNT_NOW_CNTL, + OTG_FORCE_COUNT_NOW_OCCURRED, &occurred_force); + + REG_GET(OTG_VERT_SYNC_CONTROL, + OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED, &occurred_vsync); + + return occurred_vsync != 0 || occurred_force != 0; +} + +void optc1_disable_reset_trigger(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_WRITE(OTG_TRIGA_CNTL, 0); + + REG_SET(OTG_FORCE_COUNT_NOW_CNTL, 0, + OTG_FORCE_COUNT_NOW_CLEAR, 1); + + REG_SET(OTG_VERT_SYNC_CONTROL, 0, + OTG_FORCE_VSYNC_NEXT_LINE_CLEAR, 1); +} + +void optc1_enable_reset_trigger(struct timing_generator *optc, int source_tg_inst) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t falling_edge; + + REG_GET(OTG_V_SYNC_A_CNTL, + OTG_V_SYNC_A_POL, &falling_edge); + + if (falling_edge) + REG_SET_3(OTG_TRIGA_CNTL, 0, + /* vsync signal from selected OTG pipe based + * on OTG_TRIG_SOURCE_PIPE_SELECT setting + */ + OTG_TRIGA_SOURCE_SELECT, 20, + OTG_TRIGA_SOURCE_PIPE_SELECT, source_tg_inst, + /* always detect falling edge */ + OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 1); + else + REG_SET_3(OTG_TRIGA_CNTL, 0, + /* vsync signal from selected OTG pipe based + * on OTG_TRIG_SOURCE_PIPE_SELECT setting + */ + OTG_TRIGA_SOURCE_SELECT, 20, + OTG_TRIGA_SOURCE_PIPE_SELECT, source_tg_inst, + /* always detect rising edge */ + OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1); + + REG_SET(OTG_FORCE_COUNT_NOW_CNTL, 0, + /* force H count to H_TOTAL and V count to V_TOTAL in + * progressive mode and V_TOTAL-1 in interlaced mode + */ + OTG_FORCE_COUNT_NOW_MODE, 2); +} + +void optc1_enable_crtc_reset( + struct timing_generator *optc, + int source_tg_inst, + struct crtc_trigger_info *crtc_tp) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t falling_edge = 0; + uint32_t rising_edge = 0; + + switch (crtc_tp->event) { + + case CRTC_EVENT_VSYNC_RISING: + rising_edge = 1; + break; + + case CRTC_EVENT_VSYNC_FALLING: + falling_edge = 1; + break; + } + + REG_SET_4(OTG_TRIGA_CNTL, 0, + /* vsync signal from selected OTG pipe based + * on OTG_TRIG_SOURCE_PIPE_SELECT setting + */ + OTG_TRIGA_SOURCE_SELECT, 20, + OTG_TRIGA_SOURCE_PIPE_SELECT, source_tg_inst, + /* always detect falling edge */ + OTG_TRIGA_RISING_EDGE_DETECT_CNTL, rising_edge, + OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, falling_edge); + + switch (crtc_tp->delay) { + case TRIGGER_DELAY_NEXT_LINE: + REG_SET(OTG_VERT_SYNC_CONTROL, 0, + OTG_AUTO_FORCE_VSYNC_MODE, 1); + break; + case TRIGGER_DELAY_NEXT_PIXEL: + REG_SET(OTG_FORCE_COUNT_NOW_CNTL, 0, + /* force H count to H_TOTAL and V count to V_TOTAL in + * progressive mode and V_TOTAL-1 in interlaced mode + */ + OTG_FORCE_COUNT_NOW_MODE, 2); + break; + } +} + +void optc1_wait_for_state(struct timing_generator *optc, + enum crtc_state state) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + switch (state) { + case CRTC_STATE_VBLANK: + REG_WAIT(OTG_STATUS, + OTG_V_BLANK, 1, + 1, 100000); /* 1 vupdate at 10hz */ + break; + + case CRTC_STATE_VACTIVE: + REG_WAIT(OTG_STATUS, + OTG_V_ACTIVE_DISP, 1, + 1, 100000); /* 1 vupdate at 10hz */ + break; + + default: + break; + } +} + +void optc1_set_early_control( + struct timing_generator *optc, + uint32_t early_cntl) +{ + /* asic design change, do not need this control + * empty for share caller logic + */ +} + + +void optc1_set_static_screen_control( + struct timing_generator *optc, + uint32_t event_triggers, + uint32_t num_frames) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + // By register spec, it only takes 8 bit value + if (num_frames > 0xFF) + num_frames = 0xFF; + + /* Bit 8 is no longer applicable in RV for PSR case, + * set bit 8 to 0 if given + */ + if ((event_triggers & STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN) + != 0) + event_triggers = event_triggers & + ~STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN; + + REG_SET_2(OTG_STATIC_SCREEN_CONTROL, 0, + OTG_STATIC_SCREEN_EVENT_MASK, event_triggers, + OTG_STATIC_SCREEN_FRAME_COUNT, num_frames); +} + +static void optc1_setup_manual_trigger(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET(OTG_GLOBAL_CONTROL2, 0, + MANUAL_FLOW_CONTROL_SEL, optc->inst); + + REG_SET_8(OTG_TRIGA_CNTL, 0, + OTG_TRIGA_SOURCE_SELECT, 22, + OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst, + OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1, + OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0, + OTG_TRIGA_POLARITY_SELECT, 0, + OTG_TRIGA_FREQUENCY_SELECT, 0, + OTG_TRIGA_DELAY, 0, + OTG_TRIGA_CLEAR, 1); +} + +static void optc1_program_manual_trigger(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET(OTG_MANUAL_FLOW_CONTROL, 0, + MANUAL_FLOW_CONTROL, 1); + + REG_SET(OTG_MANUAL_FLOW_CONTROL, 0, + MANUAL_FLOW_CONTROL, 0); +} + +/** + * optc1_set_drr() - Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*. + * + * @optc: timing_generator instance. + * @params: parameters used for Dynamic Refresh Rate. + */ +void optc1_set_drr( + struct timing_generator *optc, + const struct drr_params *params) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + if (params != NULL && + params->vertical_total_max > 0 && + params->vertical_total_min > 0) { + + if (params->vertical_total_mid != 0) { + + REG_SET(OTG_V_TOTAL_MID, 0, + OTG_V_TOTAL_MID, params->vertical_total_mid - 1); + + REG_UPDATE_2(OTG_V_TOTAL_CONTROL, + OTG_VTOTAL_MID_REPLACING_MAX_EN, 1, + OTG_VTOTAL_MID_FRAME_NUM, + (uint8_t)params->vertical_total_mid_frame_num); + + } + + optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1); + + REG_UPDATE_5(OTG_V_TOTAL_CONTROL, + OTG_V_TOTAL_MIN_SEL, 1, + OTG_V_TOTAL_MAX_SEL, 1, + OTG_FORCE_LOCK_ON_EVENT, 0, + OTG_SET_V_TOTAL_MIN_MASK_EN, 0, + OTG_SET_V_TOTAL_MIN_MASK, 0); + } + + // Setup manual flow control for EOF via TRIG_A + optc->funcs->setup_manual_trigger(optc); +} + +void optc1_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET(OTG_V_TOTAL_MAX, 0, + OTG_V_TOTAL_MAX, vtotal_max); + + REG_SET(OTG_V_TOTAL_MIN, 0, + OTG_V_TOTAL_MIN, vtotal_min); +} + +static void optc1_set_test_pattern( + struct timing_generator *optc, + /* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode' + * because this is not DP-specific (which is probably somewhere in DP + * encoder) */ + enum controller_dp_test_pattern test_pattern, + enum dc_color_depth color_depth) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + enum test_pattern_color_format bit_depth; + enum test_pattern_dyn_range dyn_range; + enum test_pattern_mode mode; + uint32_t pattern_mask; + uint32_t pattern_data; + /* color ramp generator mixes 16-bits color */ + uint32_t src_bpc = 16; + /* requested bpc */ + uint32_t dst_bpc; + uint32_t index; + /* RGB values of the color bars. + * Produce two RGB colors: RGB0 - white (all Fs) + * and RGB1 - black (all 0s) + * (three RGB components for two colors) + */ + uint16_t src_color[6] = {0xFFFF, 0xFFFF, 0xFFFF, 0x0000, + 0x0000, 0x0000}; + /* dest color (converted to the specified color format) */ + uint16_t dst_color[6]; + uint32_t inc_base; + + /* translate to bit depth */ + switch (color_depth) { + case COLOR_DEPTH_666: + bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_6; + break; + case COLOR_DEPTH_888: + bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8; + break; + case COLOR_DEPTH_101010: + bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_10; + break; + case COLOR_DEPTH_121212: + bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_12; + break; + default: + bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8; + break; + } + + switch (test_pattern) { + case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES: + case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA: + { + dyn_range = (test_pattern == + CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA ? + TEST_PATTERN_DYN_RANGE_CEA : + TEST_PATTERN_DYN_RANGE_VESA); + mode = TEST_PATTERN_MODE_COLORSQUARES_RGB; + + REG_UPDATE_2(OTG_TEST_PATTERN_PARAMETERS, + OTG_TEST_PATTERN_VRES, 6, + OTG_TEST_PATTERN_HRES, 6); + + REG_UPDATE_4(OTG_TEST_PATTERN_CONTROL, + OTG_TEST_PATTERN_EN, 1, + OTG_TEST_PATTERN_MODE, mode, + OTG_TEST_PATTERN_DYNAMIC_RANGE, dyn_range, + OTG_TEST_PATTERN_COLOR_FORMAT, bit_depth); + } + break; + + case CONTROLLER_DP_TEST_PATTERN_VERTICALBARS: + case CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS: + { + mode = (test_pattern == + CONTROLLER_DP_TEST_PATTERN_VERTICALBARS ? + TEST_PATTERN_MODE_VERTICALBARS : + TEST_PATTERN_MODE_HORIZONTALBARS); + + switch (bit_depth) { + case TEST_PATTERN_COLOR_FORMAT_BPC_6: + dst_bpc = 6; + break; + case TEST_PATTERN_COLOR_FORMAT_BPC_8: + dst_bpc = 8; + break; + case TEST_PATTERN_COLOR_FORMAT_BPC_10: + dst_bpc = 10; + break; + default: + dst_bpc = 8; + break; + } + + /* adjust color to the required colorFormat */ + for (index = 0; index < 6; index++) { + /* dst = 2^dstBpc * src / 2^srcBpc = src >> + * (srcBpc - dstBpc); + */ + dst_color[index] = + src_color[index] >> (src_bpc - dst_bpc); + /* CRTC_TEST_PATTERN_DATA has 16 bits, + * lowest 6 are hardwired to ZERO + * color bits should be left aligned to MSB + * XXXXXXXXXX000000 for 10 bit, + * XXXXXXXX00000000 for 8 bit and XXXXXX0000000000 for 6 + */ + dst_color[index] <<= (16 - dst_bpc); + } + + REG_WRITE(OTG_TEST_PATTERN_PARAMETERS, 0); + + /* We have to write the mask before data, similar to pipeline. + * For example, for 8 bpc, if we want RGB0 to be magenta, + * and RGB1 to be cyan, + * we need to make 7 writes: + * MASK DATA + * 000001 00000000 00000000 set mask to R0 + * 000010 11111111 00000000 R0 255, 0xFF00, set mask to G0 + * 000100 00000000 00000000 G0 0, 0x0000, set mask to B0 + * 001000 11111111 00000000 B0 255, 0xFF00, set mask to R1 + * 010000 00000000 00000000 R1 0, 0x0000, set mask to G1 + * 100000 11111111 00000000 G1 255, 0xFF00, set mask to B1 + * 100000 11111111 00000000 B1 255, 0xFF00 + * + * we will make a loop of 6 in which we prepare the mask, + * then write, then prepare the color for next write. + * first iteration will write mask only, + * but each next iteration color prepared in + * previous iteration will be written within new mask, + * the last component will written separately, + * mask is not changing between 6th and 7th write + * and color will be prepared by last iteration + */ + + /* write color, color values mask in CRTC_TEST_PATTERN_MASK + * is B1, G1, R1, B0, G0, R0 + */ + pattern_data = 0; + for (index = 0; index < 6; index++) { + /* prepare color mask, first write PATTERN_DATA + * will have all zeros + */ + pattern_mask = (1 << index); + + /* write color component */ + REG_SET_2(OTG_TEST_PATTERN_COLOR, 0, + OTG_TEST_PATTERN_MASK, pattern_mask, + OTG_TEST_PATTERN_DATA, pattern_data); + + /* prepare next color component, + * will be written in the next iteration + */ + pattern_data = dst_color[index]; + } + /* write last color component, + * it's been already prepared in the loop + */ + REG_SET_2(OTG_TEST_PATTERN_COLOR, 0, + OTG_TEST_PATTERN_MASK, pattern_mask, + OTG_TEST_PATTERN_DATA, pattern_data); + + /* enable test pattern */ + REG_UPDATE_4(OTG_TEST_PATTERN_CONTROL, + OTG_TEST_PATTERN_EN, 1, + OTG_TEST_PATTERN_MODE, mode, + OTG_TEST_PATTERN_DYNAMIC_RANGE, 0, + OTG_TEST_PATTERN_COLOR_FORMAT, bit_depth); + } + break; + + case CONTROLLER_DP_TEST_PATTERN_COLORRAMP: + { + mode = (bit_depth == + TEST_PATTERN_COLOR_FORMAT_BPC_10 ? + TEST_PATTERN_MODE_DUALRAMP_RGB : + TEST_PATTERN_MODE_SINGLERAMP_RGB); + + switch (bit_depth) { + case TEST_PATTERN_COLOR_FORMAT_BPC_6: + dst_bpc = 6; + break; + case TEST_PATTERN_COLOR_FORMAT_BPC_8: + dst_bpc = 8; + break; + case TEST_PATTERN_COLOR_FORMAT_BPC_10: + dst_bpc = 10; + break; + default: + dst_bpc = 8; + break; + } + + /* increment for the first ramp for one color gradation + * 1 gradation for 6-bit color is 2^10 + * gradations in 16-bit color + */ + inc_base = (src_bpc - dst_bpc); + + switch (bit_depth) { + case TEST_PATTERN_COLOR_FORMAT_BPC_6: + { + REG_UPDATE_5(OTG_TEST_PATTERN_PARAMETERS, + OTG_TEST_PATTERN_INC0, inc_base, + OTG_TEST_PATTERN_INC1, 0, + OTG_TEST_PATTERN_HRES, 6, + OTG_TEST_PATTERN_VRES, 6, + OTG_TEST_PATTERN_RAMP0_OFFSET, 0); + } + break; + case TEST_PATTERN_COLOR_FORMAT_BPC_8: + { + REG_UPDATE_5(OTG_TEST_PATTERN_PARAMETERS, + OTG_TEST_PATTERN_INC0, inc_base, + OTG_TEST_PATTERN_INC1, 0, + OTG_TEST_PATTERN_HRES, 8, + OTG_TEST_PATTERN_VRES, 6, + OTG_TEST_PATTERN_RAMP0_OFFSET, 0); + } + break; + case TEST_PATTERN_COLOR_FORMAT_BPC_10: + { + REG_UPDATE_5(OTG_TEST_PATTERN_PARAMETERS, + OTG_TEST_PATTERN_INC0, inc_base, + OTG_TEST_PATTERN_INC1, inc_base + 2, + OTG_TEST_PATTERN_HRES, 8, + OTG_TEST_PATTERN_VRES, 5, + OTG_TEST_PATTERN_RAMP0_OFFSET, 384 << 6); + } + break; + default: + break; + } + + REG_WRITE(OTG_TEST_PATTERN_COLOR, 0); + + /* enable test pattern */ + REG_WRITE(OTG_TEST_PATTERN_CONTROL, 0); + + REG_SET_4(OTG_TEST_PATTERN_CONTROL, 0, + OTG_TEST_PATTERN_EN, 1, + OTG_TEST_PATTERN_MODE, mode, + OTG_TEST_PATTERN_DYNAMIC_RANGE, 0, + OTG_TEST_PATTERN_COLOR_FORMAT, bit_depth); + } + break; + case CONTROLLER_DP_TEST_PATTERN_VIDEOMODE: + { + REG_WRITE(OTG_TEST_PATTERN_CONTROL, 0); + REG_WRITE(OTG_TEST_PATTERN_COLOR, 0); + REG_WRITE(OTG_TEST_PATTERN_PARAMETERS, 0); + } + break; + default: + break; + + } +} + +void optc1_get_crtc_scanoutpos( + struct timing_generator *optc, + uint32_t *v_blank_start, + uint32_t *v_blank_end, + uint32_t *h_position, + uint32_t *v_position) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + struct crtc_position position; + + REG_GET_2(OTG_V_BLANK_START_END, + OTG_V_BLANK_START, v_blank_start, + OTG_V_BLANK_END, v_blank_end); + + optc1_get_position(optc, &position); + + *h_position = position.horizontal_count; + *v_position = position.vertical_count; +} + +static void optc1_enable_stereo(struct timing_generator *optc, + const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + if (flags) { + uint32_t stereo_en; + stereo_en = flags->FRAME_PACKED == 0 ? 1 : 0; + + if (flags->PROGRAM_STEREO) + REG_UPDATE_3(OTG_STEREO_CONTROL, + OTG_STEREO_EN, stereo_en, + OTG_STEREO_SYNC_OUTPUT_LINE_NUM, 0, + OTG_STEREO_SYNC_OUTPUT_POLARITY, flags->RIGHT_EYE_POLARITY == 0 ? 0 : 1); + + if (flags->PROGRAM_POLARITY) + REG_UPDATE(OTG_STEREO_CONTROL, + OTG_STEREO_EYE_FLAG_POLARITY, + flags->RIGHT_EYE_POLARITY == 0 ? 0 : 1); + + if (flags->DISABLE_STEREO_DP_SYNC) + REG_UPDATE(OTG_STEREO_CONTROL, + OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, 1); + + if (flags->PROGRAM_STEREO) + REG_UPDATE_2(OTG_3D_STRUCTURE_CONTROL, + OTG_3D_STRUCTURE_EN, flags->FRAME_PACKED, + OTG_3D_STRUCTURE_STEREO_SEL_OVR, flags->FRAME_PACKED); + + } +} + +void optc1_program_stereo(struct timing_generator *optc, + const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags) +{ + if (flags->PROGRAM_STEREO) + optc1_enable_stereo(optc, timing, flags); + else + optc1_disable_stereo(optc); +} + + +bool optc1_is_stereo_left_eye(struct timing_generator *optc) +{ + bool ret = false; + uint32_t left_eye = 0; + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_GET(OTG_STEREO_STATUS, + OTG_STEREO_CURRENT_EYE, &left_eye); + if (left_eye == 1) + ret = true; + else + ret = false; + + return ret; +} + +bool optc1_get_hw_timing(struct timing_generator *tg, + struct dc_crtc_timing *hw_crtc_timing) +{ + struct dcn_otg_state s = {0}; + + if (tg == NULL || hw_crtc_timing == NULL) + return false; + + optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s); + + hw_crtc_timing->h_total = s.h_total + 1; + hw_crtc_timing->h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end); + hw_crtc_timing->h_front_porch = s.h_total + 1 - s.h_blank_start; + hw_crtc_timing->h_sync_width = s.h_sync_a_end - s.h_sync_a_start; + + hw_crtc_timing->v_total = s.v_total + 1; + hw_crtc_timing->v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end); + hw_crtc_timing->v_front_porch = s.v_total + 1 - s.v_blank_start; + hw_crtc_timing->v_sync_width = s.v_sync_a_end - s.v_sync_a_start; + + return true; +} + + +void optc1_read_otg_state(struct optc *optc1, + struct dcn_otg_state *s) +{ + REG_GET(OTG_CONTROL, + OTG_MASTER_EN, &s->otg_enabled); + + REG_GET_2(OTG_V_BLANK_START_END, + OTG_V_BLANK_START, &s->v_blank_start, + OTG_V_BLANK_END, &s->v_blank_end); + + REG_GET(OTG_V_SYNC_A_CNTL, + OTG_V_SYNC_A_POL, &s->v_sync_a_pol); + + REG_GET(OTG_V_TOTAL, + OTG_V_TOTAL, &s->v_total); + + REG_GET(OTG_V_TOTAL_MAX, + OTG_V_TOTAL_MAX, &s->v_total_max); + + REG_GET(OTG_V_TOTAL_MIN, + OTG_V_TOTAL_MIN, &s->v_total_min); + + REG_GET(OTG_V_TOTAL_CONTROL, + OTG_V_TOTAL_MAX_SEL, &s->v_total_max_sel); + + REG_GET(OTG_V_TOTAL_CONTROL, + OTG_V_TOTAL_MIN_SEL, &s->v_total_min_sel); + + REG_GET_2(OTG_V_SYNC_A, + OTG_V_SYNC_A_START, &s->v_sync_a_start, + OTG_V_SYNC_A_END, &s->v_sync_a_end); + + REG_GET_2(OTG_H_BLANK_START_END, + OTG_H_BLANK_START, &s->h_blank_start, + OTG_H_BLANK_END, &s->h_blank_end); + + REG_GET_2(OTG_H_SYNC_A, + OTG_H_SYNC_A_START, &s->h_sync_a_start, + OTG_H_SYNC_A_END, &s->h_sync_a_end); + + REG_GET(OTG_H_SYNC_A_CNTL, + OTG_H_SYNC_A_POL, &s->h_sync_a_pol); + + REG_GET(OTG_H_TOTAL, + OTG_H_TOTAL, &s->h_total); + + REG_GET(OPTC_INPUT_GLOBAL_CONTROL, + OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status); + + REG_GET(OTG_VERTICAL_INTERRUPT1_CONTROL, + OTG_VERTICAL_INTERRUPT1_INT_ENABLE, &s->vertical_interrupt1_en); + + REG_GET(OTG_VERTICAL_INTERRUPT1_POSITION, + OTG_VERTICAL_INTERRUPT1_LINE_START, &s->vertical_interrupt1_line); + + REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL, + OTG_VERTICAL_INTERRUPT2_INT_ENABLE, &s->vertical_interrupt2_en); + + REG_GET(OTG_VERTICAL_INTERRUPT2_POSITION, + OTG_VERTICAL_INTERRUPT2_LINE_START, &s->vertical_interrupt2_line); +} + +bool optc1_get_otg_active_size(struct timing_generator *optc, + uint32_t *otg_active_width, + uint32_t *otg_active_height) +{ + uint32_t otg_enabled; + uint32_t v_blank_start; + uint32_t v_blank_end; + uint32_t h_blank_start; + uint32_t h_blank_end; + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + + REG_GET(OTG_CONTROL, + OTG_MASTER_EN, &otg_enabled); + + if (otg_enabled == 0) + return false; + + REG_GET_2(OTG_V_BLANK_START_END, + OTG_V_BLANK_START, &v_blank_start, + OTG_V_BLANK_END, &v_blank_end); + + REG_GET_2(OTG_H_BLANK_START_END, + OTG_H_BLANK_START, &h_blank_start, + OTG_H_BLANK_END, &h_blank_end); + + *otg_active_width = v_blank_start - v_blank_end; + *otg_active_height = h_blank_start - h_blank_end; + return true; +} + +void optc1_clear_optc_underflow(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_UPDATE(OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, 1); +} + +void optc1_tg_init(struct timing_generator *optc) +{ + optc1_set_blank_data_double_buffer(optc, true); + optc1_set_timing_double_buffer(optc, true); + optc1_clear_optc_underflow(optc); +} + +bool optc1_is_tg_enabled(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t otg_enabled = 0; + + REG_GET(OTG_CONTROL, OTG_MASTER_EN, &otg_enabled); + + return (otg_enabled != 0); + +} + +bool optc1_is_optc_underflow_occurred(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t underflow_occurred = 0; + + REG_GET(OPTC_INPUT_GLOBAL_CONTROL, + OPTC_UNDERFLOW_OCCURRED_STATUS, + &underflow_occurred); + + return (underflow_occurred == 1); +} + +bool optc1_configure_crc(struct timing_generator *optc, + const struct crc_params *params) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + /* Cannot configure crc on a CRTC that is disabled */ + if (!optc1_is_tg_enabled(optc)) + return false; + + REG_WRITE(OTG_CRC_CNTL, 0); + + if (!params->enable) + return true; + + /* Program frame boundaries */ + /* Window A x axis start and end. */ + REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL, + OTG_CRC0_WINDOWA_X_START, params->windowa_x_start, + OTG_CRC0_WINDOWA_X_END, params->windowa_x_end); + + /* Window A y axis start and end. */ + REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL, + OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start, + OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end); + + /* Window B x axis start and end. */ + REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL, + OTG_CRC0_WINDOWB_X_START, params->windowb_x_start, + OTG_CRC0_WINDOWB_X_END, params->windowb_x_end); + + /* Window B y axis start and end. */ + REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL, + OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start, + OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end); + + /* Set crc mode and selection, and enable. Only using CRC0*/ + REG_UPDATE_3(OTG_CRC_CNTL, + OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0, + OTG_CRC0_SELECT, params->selection, + OTG_CRC_EN, 1); + + return true; +} + +/** + * optc1_get_crc - Capture CRC result per component + * + * @optc: timing_generator instance. + * @r_cr: 16-bit primary CRC signature for red data. + * @g_y: 16-bit primary CRC signature for green data. + * @b_cb: 16-bit primary CRC signature for blue data. + * + * This function reads the CRC signature from the OPTC registers. Notice that + * we have three registers to keep the CRC result per color component (RGB). + * + * Returns: + * If CRC is disabled, return false; otherwise, return true, and the CRC + * results in the parameters. + */ +bool optc1_get_crc(struct timing_generator *optc, + uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) +{ + uint32_t field = 0; + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_GET(OTG_CRC_CNTL, OTG_CRC_EN, &field); + + /* Early return if CRC is not enabled for this CRTC */ + if (!field) + return false; + + /* OTG_CRC0_DATA_RG has the CRC16 results for the red and green component */ + REG_GET_2(OTG_CRC0_DATA_RG, + CRC0_R_CR, r_cr, + CRC0_G_Y, g_y); + + /* OTG_CRC0_DATA_B has the CRC16 results for the blue component */ + REG_GET(OTG_CRC0_DATA_B, + CRC0_B_CB, b_cb); + + return true; +} + +static const struct timing_generator_funcs dcn10_tg_funcs = { + .validate_timing = optc1_validate_timing, + .program_timing = optc1_program_timing, + .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, + .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, + .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, + .program_global_sync = optc1_program_global_sync, + .enable_crtc = optc1_enable_crtc, + .disable_crtc = optc1_disable_crtc, + /* used by enable_timing_synchronization. Not need for FPGA */ + .is_counter_moving = optc1_is_counter_moving, + .get_position = optc1_get_position, + .get_frame_count = optc1_get_vblank_counter, + .get_scanoutpos = optc1_get_crtc_scanoutpos, + .get_otg_active_size = optc1_get_otg_active_size, + .set_early_control = optc1_set_early_control, + /* used by enable_timing_synchronization. Not need for FPGA */ + .wait_for_state = optc1_wait_for_state, + .set_blank = optc1_set_blank, + .is_blanked = optc1_is_blanked, + .set_blank_color = optc1_program_blank_color, + .did_triggered_reset_occur = optc1_did_triggered_reset_occur, + .enable_reset_trigger = optc1_enable_reset_trigger, + .enable_crtc_reset = optc1_enable_crtc_reset, + .disable_reset_trigger = optc1_disable_reset_trigger, + .lock = optc1_lock, + .unlock = optc1_unlock, + .enable_optc_clock = optc1_enable_optc_clock, + .set_drr = optc1_set_drr, + .get_last_used_drr_vtotal = NULL, + .set_vtotal_min_max = optc1_set_vtotal_min_max, + .set_static_screen_control = optc1_set_static_screen_control, + .set_test_pattern = optc1_set_test_pattern, + .program_stereo = optc1_program_stereo, + .is_stereo_left_eye = optc1_is_stereo_left_eye, + .set_blank_data_double_buffer = optc1_set_blank_data_double_buffer, + .tg_init = optc1_tg_init, + .is_tg_enabled = optc1_is_tg_enabled, + .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, + .clear_optc_underflow = optc1_clear_optc_underflow, + .get_crc = optc1_get_crc, + .configure_crc = optc1_configure_crc, + .set_vtg_params = optc1_set_vtg_params, + .program_manual_trigger = optc1_program_manual_trigger, + .setup_manual_trigger = optc1_setup_manual_trigger, + .get_hw_timing = optc1_get_hw_timing, +}; + +void dcn10_timing_generator_init(struct optc *optc1) +{ + optc1->base.funcs = &dcn10_tg_funcs; + + optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; + optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; + + optc1->min_h_blank = 32; + optc1->min_v_blank = 3; + optc1->min_v_blank_interlace = 5; + optc1->min_h_sync_width = 4; + optc1->min_v_sync_width = 1; +} + +/* "Containter" vs. "pixel" is a concept within HW blocks, mostly those closer to the back-end. It works like this: + * + * - In most of the formats (RGB or YCbCr 4:4:4, 4:2:2 uncompressed and DSC 4:2:2 Simple) pixel rate is the same as + * containter rate. + * + * - In 4:2:0 (DSC or uncompressed) there are two pixels per container, hence the target container rate has to be + * halved to maintain the correct pixel rate. + * + * - Unlike 4:2:2 uncompressed, DSC 4:2:2 Native also has two pixels per container (this happens when DSC is applied + * to it) and has to be treated the same as 4:2:0, i.e. target containter rate has to be halved in this case as well. + * + */ +bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing) +{ + bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420; + + two_pix = two_pix || (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422 + && !timing->dsc_cfg.ycbcr422_simple); + return two_pix; +} + diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h new file mode 100644 index 0000000000..6c2e84d396 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h @@ -0,0 +1,600 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_TIMING_GENERATOR_DCN10_H__ +#define __DC_TIMING_GENERATOR_DCN10_H__ + +#include "optc.h" + +#define DCN10TG_FROM_TG(tg)\ + container_of(tg, struct optc, base) + +#define TG_COMMON_REG_LIST_DCN(inst) \ + SRI(OTG_VSTARTUP_PARAM, OTG, inst),\ + SRI(OTG_VUPDATE_PARAM, OTG, inst),\ + SRI(OTG_VREADY_PARAM, OTG, inst),\ + SRI(OTG_BLANK_CONTROL, OTG, inst),\ + SRI(OTG_MASTER_UPDATE_LOCK, OTG, inst),\ + SRI(OTG_GLOBAL_CONTROL0, OTG, inst),\ + SRI(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst),\ + SRI(OTG_H_TOTAL, OTG, inst),\ + SRI(OTG_H_BLANK_START_END, OTG, inst),\ + SRI(OTG_H_SYNC_A, OTG, inst),\ + SRI(OTG_H_SYNC_A_CNTL, OTG, inst),\ + SRI(OTG_H_TIMING_CNTL, OTG, inst),\ + SRI(OTG_V_TOTAL, OTG, inst),\ + SRI(OTG_V_BLANK_START_END, OTG, inst),\ + SRI(OTG_V_SYNC_A, OTG, inst),\ + SRI(OTG_V_SYNC_A_CNTL, OTG, inst),\ + SRI(OTG_INTERLACE_CONTROL, OTG, inst),\ + SRI(OTG_CONTROL, OTG, inst),\ + SRI(OTG_STEREO_CONTROL, OTG, inst),\ + SRI(OTG_3D_STRUCTURE_CONTROL, OTG, inst),\ + SRI(OTG_STEREO_STATUS, OTG, inst),\ + SRI(OTG_V_TOTAL_MAX, OTG, inst),\ + SRI(OTG_V_TOTAL_MID, OTG, inst),\ + SRI(OTG_V_TOTAL_MIN, OTG, inst),\ + SRI(OTG_V_TOTAL_CONTROL, OTG, inst),\ + SRI(OTG_TRIGA_CNTL, OTG, inst),\ + SRI(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\ + SRI(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\ + SRI(OTG_STATUS_FRAME_COUNT, OTG, inst),\ + SRI(OTG_STATUS, OTG, inst),\ + SRI(OTG_STATUS_POSITION, OTG, inst),\ + SRI(OTG_NOM_VERT_POSITION, OTG, inst),\ + SRI(OTG_BLACK_COLOR, OTG, inst),\ + SRI(OTG_CLOCK_CONTROL, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\ + SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\ + SRI(OPTC_DATA_SOURCE_SELECT, ODM, inst),\ + SRI(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\ + SRI(CONTROL, VTG, inst),\ + SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\ + SRI(OTG_MASTER_UPDATE_MODE, OTG, inst),\ + SRI(OTG_GSL_CONTROL, OTG, inst),\ + SRI(OTG_CRC_CNTL, OTG, inst),\ + SRI(OTG_CRC0_DATA_RG, OTG, inst),\ + SRI(OTG_CRC0_DATA_B, OTG, inst),\ + SRI(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\ + SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\ + SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\ + SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst),\ + SR(GSL_SOURCE_SELECT),\ + SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\ + SRI(OTG_TRIGA_MANUAL_TRIG, OTG, inst) + +#define TG_COMMON_REG_LIST_DCN1_0(inst) \ + TG_COMMON_REG_LIST_DCN(inst),\ + SRI(OTG_TEST_PATTERN_PARAMETERS, OTG, inst),\ + SRI(OTG_TEST_PATTERN_CONTROL, OTG, inst),\ + SRI(OTG_TEST_PATTERN_COLOR, OTG, inst),\ + SRI(OTG_MANUAL_FLOW_CONTROL, OTG, inst) + + +struct dcn_optc_registers { + uint32_t OTG_GLOBAL_CONTROL1; + uint32_t OTG_GLOBAL_CONTROL2; + uint32_t OTG_VERT_SYNC_CONTROL; + uint32_t OTG_MASTER_UPDATE_MODE; + uint32_t OTG_GSL_CONTROL; + uint32_t OTG_VSTARTUP_PARAM; + uint32_t OTG_VUPDATE_PARAM; + uint32_t OTG_VREADY_PARAM; + uint32_t OTG_BLANK_CONTROL; + uint32_t OTG_MASTER_UPDATE_LOCK; + uint32_t OTG_GLOBAL_CONTROL0; + uint32_t OTG_DOUBLE_BUFFER_CONTROL; + uint32_t OTG_H_TOTAL; + uint32_t OTG_H_BLANK_START_END; + uint32_t OTG_H_SYNC_A; + uint32_t OTG_H_SYNC_A_CNTL; + uint32_t OTG_H_TIMING_CNTL; + uint32_t OTG_V_TOTAL; + uint32_t OTG_V_BLANK_START_END; + uint32_t OTG_V_SYNC_A; + uint32_t OTG_V_SYNC_A_CNTL; + uint32_t OTG_INTERLACE_CONTROL; + uint32_t OTG_CONTROL; + uint32_t OTG_STEREO_CONTROL; + uint32_t OTG_3D_STRUCTURE_CONTROL; + uint32_t OTG_STEREO_STATUS; + uint32_t OTG_V_TOTAL_MAX; + uint32_t OTG_V_TOTAL_MID; + uint32_t OTG_V_TOTAL_MIN; + uint32_t OTG_V_TOTAL_CONTROL; + uint32_t OTG_TRIGA_CNTL; + uint32_t OTG_TRIGA_MANUAL_TRIG; + uint32_t OTG_MANUAL_FLOW_CONTROL; + uint32_t OTG_FORCE_COUNT_NOW_CNTL; + uint32_t OTG_STATIC_SCREEN_CONTROL; + uint32_t OTG_STATUS_FRAME_COUNT; + uint32_t OTG_STATUS; + uint32_t OTG_STATUS_POSITION; + uint32_t OTG_NOM_VERT_POSITION; + uint32_t OTG_BLACK_COLOR; + uint32_t OTG_TEST_PATTERN_PARAMETERS; + uint32_t OTG_TEST_PATTERN_CONTROL; + uint32_t OTG_TEST_PATTERN_COLOR; + uint32_t OTG_CLOCK_CONTROL; + uint32_t OTG_VERTICAL_INTERRUPT0_CONTROL; + uint32_t OTG_VERTICAL_INTERRUPT0_POSITION; + uint32_t OTG_VERTICAL_INTERRUPT1_CONTROL; + uint32_t OTG_VERTICAL_INTERRUPT1_POSITION; + uint32_t OTG_VERTICAL_INTERRUPT2_CONTROL; + uint32_t OTG_VERTICAL_INTERRUPT2_POSITION; + uint32_t OPTC_INPUT_CLOCK_CONTROL; + uint32_t OPTC_DATA_SOURCE_SELECT; + uint32_t OPTC_MEMORY_CONFIG; + uint32_t OPTC_INPUT_GLOBAL_CONTROL; + uint32_t CONTROL; + uint32_t OTG_GSL_WINDOW_X; + uint32_t OTG_GSL_WINDOW_Y; + uint32_t OTG_VUPDATE_KEEPOUT; + uint32_t OTG_CRC_CNTL; + uint32_t OTG_CRC_CNTL2; + uint32_t OTG_CRC0_DATA_RG; + uint32_t OTG_CRC0_DATA_B; + uint32_t OTG_CRC1_DATA_B; + uint32_t OTG_CRC2_DATA_B; + uint32_t OTG_CRC3_DATA_B; + uint32_t OTG_CRC1_DATA_RG; + uint32_t OTG_CRC2_DATA_RG; + uint32_t OTG_CRC3_DATA_RG; + uint32_t OTG_CRC0_WINDOWA_X_CONTROL; + uint32_t OTG_CRC0_WINDOWA_Y_CONTROL; + uint32_t OTG_CRC0_WINDOWB_X_CONTROL; + uint32_t OTG_CRC0_WINDOWB_Y_CONTROL; + uint32_t OTG_CRC1_WINDOWA_X_CONTROL; + uint32_t OTG_CRC1_WINDOWA_Y_CONTROL; + uint32_t OTG_CRC1_WINDOWB_X_CONTROL; + uint32_t OTG_CRC1_WINDOWB_Y_CONTROL; + uint32_t GSL_SOURCE_SELECT; + uint32_t DWB_SOURCE_SELECT; + uint32_t OTG_DSC_START_POSITION; + uint32_t OPTC_DATA_FORMAT_CONTROL; + uint32_t OPTC_BYTES_PER_PIXEL; + uint32_t OPTC_WIDTH_CONTROL; + uint32_t OTG_DRR_CONTROL; + uint32_t OTG_BLANK_DATA_COLOR; + uint32_t OTG_BLANK_DATA_COLOR_EXT; + uint32_t OTG_DRR_TRIGGER_WINDOW; + uint32_t OTG_M_CONST_DTO0; + uint32_t OTG_M_CONST_DTO1; + uint32_t OTG_DRR_V_TOTAL_CHANGE; + uint32_t OTG_GLOBAL_CONTROL4; + uint32_t OTG_CRC0_WINDOWA_X_CONTROL_READBACK; + uint32_t OTG_CRC0_WINDOWA_Y_CONTROL_READBACK; + uint32_t OTG_CRC0_WINDOWB_X_CONTROL_READBACK; + uint32_t OTG_CRC0_WINDOWB_Y_CONTROL_READBACK; + uint32_t OTG_CRC1_WINDOWA_X_CONTROL_READBACK; + uint32_t OTG_CRC1_WINDOWA_Y_CONTROL_READBACK; + uint32_t OTG_CRC1_WINDOWB_X_CONTROL_READBACK; + uint32_t OTG_CRC1_WINDOWB_Y_CONTROL_READBACK; + uint32_t OPTC_CLOCK_CONTROL; +}; + +#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\ + SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\ + SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_OFFSET, mask_sh),\ + SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_WIDTH, mask_sh),\ + SF(OTG0_OTG_VREADY_PARAM, VREADY_OFFSET, mask_sh),\ + SF(OTG0_OTG_BLANK_CONTROL, OTG_BLANK_DATA_EN, mask_sh),\ + SF(OTG0_OTG_BLANK_CONTROL, OTG_BLANK_DE_MODE, mask_sh),\ + SF(OTG0_OTG_BLANK_CONTROL, OTG_CURRENT_BLANK_STATE, mask_sh),\ + SF(OTG0_OTG_MASTER_UPDATE_LOCK, OTG_MASTER_UPDATE_LOCK, mask_sh),\ + SF(OTG0_OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL0, OTG_MASTER_UPDATE_LOCK_SEL, mask_sh),\ + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_BLANK_DATA_DOUBLE_BUFFER_EN, mask_sh),\ + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\ + SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \ + SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \ + SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \ + SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\ + SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\ + SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\ + SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_START, mask_sh),\ + SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_END, mask_sh),\ + SF(OTG0_OTG_H_SYNC_A_CNTL, OTG_H_SYNC_A_POL, mask_sh),\ + SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_BY2, mask_sh),\ + SF(OTG0_OTG_V_TOTAL, OTG_V_TOTAL, mask_sh),\ + SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_START, mask_sh),\ + SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_END, mask_sh),\ + SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_START, mask_sh),\ + SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_END, mask_sh),\ + SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_A_POL, mask_sh),\ + SF(OTG0_OTG_INTERLACE_CONTROL, OTG_INTERLACE_ENABLE, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_MASTER_EN, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_START_POINT_CNTL, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_CURRENT_MASTER_EN_STATE, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EYE_FLAG_POLARITY, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, mask_sh),\ + SF(OTG0_OTG_STEREO_STATUS, OTG_STEREO_CURRENT_EYE, mask_sh),\ + SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_EN, mask_sh),\ + SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_V_UPDATE_MODE, mask_sh),\ + SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_STEREO_SEL_OVR, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_MAX, OTG_V_TOTAL_MAX, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_MID, OTG_V_TOTAL_MID, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_MIN, OTG_V_TOTAL_MIN, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MIN_SEL, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MAX_SEL, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_FORCE_LOCK_ON_EVENT, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK_EN, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MAX_EN, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_FRAME_NUM, mask_sh),\ + SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_CLEAR, mask_sh),\ + SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_MODE, mask_sh),\ + SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_OCCURRED, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_SELECT, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_PIPE_SELECT, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_RISING_EDGE_DETECT_CNTL, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_POLARITY_SELECT, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FREQUENCY_SELECT, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_DELAY, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_CLEAR, mask_sh),\ + SF(OTG0_OTG_TRIGA_MANUAL_TRIG, OTG_TRIGA_MANUAL_TRIG, mask_sh),\ + SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_EVENT_MASK, mask_sh),\ + SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_FRAME_COUNT, mask_sh),\ + SF(OTG0_OTG_STATUS_FRAME_COUNT, OTG_FRAME_COUNT, mask_sh),\ + SF(OTG0_OTG_STATUS, OTG_V_BLANK, mask_sh),\ + SF(OTG0_OTG_STATUS, OTG_V_ACTIVE_DISP, mask_sh),\ + SF(OTG0_OTG_STATUS_POSITION, OTG_HORZ_COUNT, mask_sh),\ + SF(OTG0_OTG_STATUS_POSITION, OTG_VERT_COUNT, mask_sh),\ + SF(OTG0_OTG_NOM_VERT_POSITION, OTG_VERT_COUNT_NOM, mask_sh),\ + SF(OTG0_OTG_BLACK_COLOR, OTG_BLACK_COLOR_B_CB, mask_sh),\ + SF(OTG0_OTG_BLACK_COLOR, OTG_BLACK_COLOR_G_Y, mask_sh),\ + SF(OTG0_OTG_BLACK_COLOR, OTG_BLACK_COLOR_R_CR, mask_sh),\ + SF(OTG0_OTG_CLOCK_CONTROL, OTG_BUSY, mask_sh),\ + SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_EN, mask_sh),\ + SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_ON, mask_sh),\ + SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_START, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_END, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT1_POSITION, OTG_VERTICAL_INTERRUPT1_LINE_START, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\ + SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\ + SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_ON, mask_sh),\ + SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\ + SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\ + SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\ + SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\ + SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\ + SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\ + SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED, mask_sh),\ + SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_CLEAR, mask_sh),\ + SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_AUTO_FORCE_VSYNC_MODE, mask_sh),\ + SF(OTG0_OTG_MASTER_UPDATE_MODE, MASTER_UPDATE_INTERLACED_MODE, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL0_EN, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL1_EN, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\ + SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\ + SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\ + SF(OTG0_OTG_CRC0_DATA_B, CRC0_B_CB, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_START, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_END, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_START, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_END, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_START, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh),\ + SF(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, mask_sh),\ + SF(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, mask_sh),\ + SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL2, MANUAL_FLOW_CONTROL_SEL, mask_sh) + + + +#define TG_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\ + TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\ + SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_INC0, mask_sh),\ + SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_INC1, mask_sh),\ + SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_VRES, mask_sh),\ + SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_HRES, mask_sh),\ + SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_RAMP0_OFFSET, mask_sh),\ + SF(OTG0_OTG_TEST_PATTERN_CONTROL, OTG_TEST_PATTERN_EN, mask_sh),\ + SF(OTG0_OTG_TEST_PATTERN_CONTROL, OTG_TEST_PATTERN_MODE, mask_sh),\ + SF(OTG0_OTG_TEST_PATTERN_CONTROL, OTG_TEST_PATTERN_DYNAMIC_RANGE, mask_sh),\ + SF(OTG0_OTG_TEST_PATTERN_CONTROL, OTG_TEST_PATTERN_COLOR_FORMAT, mask_sh),\ + SF(OTG0_OTG_TEST_PATTERN_COLOR, OTG_TEST_PATTERN_MASK, mask_sh),\ + SF(OTG0_OTG_TEST_PATTERN_COLOR, OTG_TEST_PATTERN_DATA, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SRC_SEL, mask_sh),\ + SF(OTG0_OTG_MANUAL_FLOW_CONTROL, MANUAL_FLOW_CONTROL, mask_sh),\ + +#define TG_REG_FIELD_LIST_DCN1_0(type) \ + type VSTARTUP_START;\ + type VUPDATE_OFFSET;\ + type VUPDATE_WIDTH;\ + type VREADY_OFFSET;\ + type OTG_BLANK_DATA_EN;\ + type OTG_BLANK_DE_MODE;\ + type OTG_CURRENT_BLANK_STATE;\ + type OTG_MASTER_UPDATE_LOCK;\ + type UPDATE_LOCK_STATUS;\ + type OTG_UPDATE_PENDING;\ + type OTG_MASTER_UPDATE_LOCK_SEL;\ + type OTG_BLANK_DATA_DOUBLE_BUFFER_EN;\ + type OTG_H_TOTAL;\ + type OTG_H_BLANK_START;\ + type OTG_H_BLANK_END;\ + type OTG_H_SYNC_A_START;\ + type OTG_H_SYNC_A_END;\ + type OTG_H_SYNC_A_POL;\ + type OTG_H_TIMING_DIV_BY2;\ + type OTG_V_TOTAL;\ + type OTG_V_BLANK_START;\ + type OTG_V_BLANK_END;\ + type OTG_V_SYNC_A_START;\ + type OTG_V_SYNC_A_END;\ + type OTG_V_SYNC_A_POL;\ + type OTG_INTERLACE_ENABLE;\ + type OTG_MASTER_EN;\ + type OTG_START_POINT_CNTL;\ + type OTG_DISABLE_POINT_CNTL;\ + type OTG_FIELD_NUMBER_CNTL;\ + type OTG_CURRENT_MASTER_EN_STATE;\ + type OTG_STEREO_EN;\ + type OTG_STEREO_SYNC_OUTPUT_LINE_NUM;\ + type OTG_STEREO_SYNC_OUTPUT_POLARITY;\ + type OTG_STEREO_EYE_FLAG_POLARITY;\ + type OTG_STEREO_CURRENT_EYE;\ + type OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP;\ + type OTG_3D_STRUCTURE_EN;\ + type OTG_3D_STRUCTURE_V_UPDATE_MODE;\ + type OTG_3D_STRUCTURE_STEREO_SEL_OVR;\ + type OTG_V_TOTAL_MAX;\ + type OTG_V_TOTAL_MID;\ + type OTG_V_TOTAL_MIN;\ + type OTG_V_TOTAL_MIN_SEL;\ + type OTG_V_TOTAL_MAX_SEL;\ + type OTG_VTOTAL_MID_REPLACING_MAX_EN;\ + type OTG_VTOTAL_MID_FRAME_NUM;\ + type OTG_FORCE_LOCK_ON_EVENT;\ + type OTG_SET_V_TOTAL_MIN_MASK_EN;\ + type OTG_SET_V_TOTAL_MIN_MASK;\ + type OTG_FORCE_COUNT_NOW_CLEAR;\ + type OTG_FORCE_COUNT_NOW_MODE;\ + type OTG_FORCE_COUNT_NOW_OCCURRED;\ + type OTG_TRIGA_SOURCE_SELECT;\ + type OTG_TRIGA_SOURCE_PIPE_SELECT;\ + type OTG_TRIGA_RISING_EDGE_DETECT_CNTL;\ + type OTG_TRIGA_FALLING_EDGE_DETECT_CNTL;\ + type OTG_TRIGA_POLARITY_SELECT;\ + type OTG_TRIGA_FREQUENCY_SELECT;\ + type OTG_TRIGA_DELAY;\ + type OTG_TRIGA_CLEAR;\ + type OTG_TRIGA_MANUAL_TRIG;\ + type OTG_STATIC_SCREEN_EVENT_MASK;\ + type OTG_STATIC_SCREEN_FRAME_COUNT;\ + type OTG_FRAME_COUNT;\ + type OTG_V_BLANK;\ + type OTG_V_ACTIVE_DISP;\ + type OTG_HORZ_COUNT;\ + type OTG_VERT_COUNT;\ + type OTG_VERT_COUNT_NOM;\ + type OTG_BLACK_COLOR_B_CB;\ + type OTG_BLACK_COLOR_G_Y;\ + type OTG_BLACK_COLOR_R_CR;\ + type OTG_BLANK_DATA_COLOR_BLUE_CB;\ + type OTG_BLANK_DATA_COLOR_GREEN_Y;\ + type OTG_BLANK_DATA_COLOR_RED_CR;\ + type OTG_BLANK_DATA_COLOR_BLUE_CB_EXT;\ + type OTG_BLANK_DATA_COLOR_GREEN_Y_EXT;\ + type OTG_BLANK_DATA_COLOR_RED_CR_EXT;\ + type OTG_VTOTAL_MID_REPLACING_MIN_EN;\ + type OTG_TEST_PATTERN_INC0;\ + type OTG_TEST_PATTERN_INC1;\ + type OTG_TEST_PATTERN_VRES;\ + type OTG_TEST_PATTERN_HRES;\ + type OTG_TEST_PATTERN_RAMP0_OFFSET;\ + type OTG_TEST_PATTERN_EN;\ + type OTG_TEST_PATTERN_MODE;\ + type OTG_TEST_PATTERN_DYNAMIC_RANGE;\ + type OTG_TEST_PATTERN_COLOR_FORMAT;\ + type OTG_TEST_PATTERN_MASK;\ + type OTG_TEST_PATTERN_DATA;\ + type OTG_BUSY;\ + type OTG_CLOCK_EN;\ + type OTG_CLOCK_ON;\ + type OTG_CLOCK_GATE_DIS;\ + type OTG_VERTICAL_INTERRUPT0_INT_ENABLE;\ + type OTG_VERTICAL_INTERRUPT0_LINE_START;\ + type OTG_VERTICAL_INTERRUPT0_LINE_END;\ + type OTG_VERTICAL_INTERRUPT1_INT_ENABLE;\ + type OTG_VERTICAL_INTERRUPT1_LINE_START;\ + type OTG_VERTICAL_INTERRUPT2_INT_ENABLE;\ + type OTG_VERTICAL_INTERRUPT2_LINE_START;\ + type OPTC_INPUT_CLK_EN;\ + type OPTC_INPUT_CLK_ON;\ + type OPTC_INPUT_CLK_GATE_DIS;\ + type OPTC_UNDERFLOW_OCCURRED_STATUS;\ + type OPTC_UNDERFLOW_CLEAR;\ + type OPTC_SRC_SEL;\ + type VTG0_ENABLE;\ + type VTG0_FP2;\ + type VTG0_VCOUNT_INIT;\ + type OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED;\ + type OTG_FORCE_VSYNC_NEXT_LINE_CLEAR;\ + type OTG_AUTO_FORCE_VSYNC_MODE;\ + type MASTER_UPDATE_INTERLACED_MODE;\ + type OTG_GSL0_EN;\ + type OTG_GSL1_EN;\ + type OTG_GSL2_EN;\ + type OTG_GSL_MASTER_EN;\ + type OTG_GSL_FORCE_DELAY;\ + type OTG_GSL_CHECK_ALL_FIELDS;\ + type OTG_GSL_WINDOW_START_X;\ + type OTG_GSL_WINDOW_END_X;\ + type OTG_GSL_WINDOW_START_Y;\ + type OTG_GSL_WINDOW_END_Y;\ + type OTG_RANGE_TIMING_DBUF_UPDATE_MODE;\ + type OTG_GSL_MASTER_MODE;\ + type OTG_MASTER_UPDATE_LOCK_GSL_EN;\ + type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET;\ + type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET;\ + type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN;\ + type OTG_CRC_CONT_EN;\ + type OTG_CRC0_SELECT;\ + type OTG_CRC_EN;\ + type CRC0_R_CR;\ + type CRC0_G_Y;\ + type CRC0_B_CB;\ + type CRC1_R_CR;\ + type CRC1_G_Y;\ + type CRC1_B_CB;\ + type CRC2_R_CR;\ + type CRC2_G_Y;\ + type CRC2_B_CB;\ + type CRC3_R_CR;\ + type CRC3_G_Y;\ + type CRC3_B_CB;\ + type OTG_CRC0_WINDOWA_X_START;\ + type OTG_CRC0_WINDOWA_X_END;\ + type OTG_CRC0_WINDOWA_Y_START;\ + type OTG_CRC0_WINDOWA_Y_END;\ + type OTG_CRC0_WINDOWB_X_START;\ + type OTG_CRC0_WINDOWB_X_END;\ + type OTG_CRC0_WINDOWB_Y_START;\ + type OTG_CRC0_WINDOWB_Y_END;\ + type OTG_CRC_WINDOW_DB_EN;\ + type OTG_CRC1_WINDOWA_X_START;\ + type OTG_CRC1_WINDOWA_X_END;\ + type OTG_CRC1_WINDOWA_Y_START;\ + type OTG_CRC1_WINDOWA_Y_END;\ + type OTG_CRC1_WINDOWB_X_START;\ + type OTG_CRC1_WINDOWB_X_END;\ + type OTG_CRC1_WINDOWB_Y_START;\ + type OTG_CRC1_WINDOWB_Y_END;\ + type GSL0_READY_SOURCE_SEL;\ + type GSL1_READY_SOURCE_SEL;\ + type GSL2_READY_SOURCE_SEL;\ + type MANUAL_FLOW_CONTROL;\ + type MANUAL_FLOW_CONTROL_SEL; + +#define TG_REG_FIELD_LIST(type) \ + TG_REG_FIELD_LIST_DCN1_0(type)\ + type OTG_V_SYNC_MODE;\ + type OTG_DRR_TRIGGER_WINDOW_START_X;\ + type OTG_DRR_TRIGGER_WINDOW_END_X;\ + type OTG_DRR_V_TOTAL_CHANGE_LIMIT;\ + type OTG_OUT_MUX;\ + type OTG_M_CONST_DTO_PHASE;\ + type OTG_M_CONST_DTO_MODULO;\ + type MASTER_UPDATE_LOCK_DB_X;\ + type MASTER_UPDATE_LOCK_DB_Y;\ + type MASTER_UPDATE_LOCK_DB_EN;\ + type GLOBAL_UPDATE_LOCK_EN;\ + type DIG_UPDATE_LOCATION;\ + type OTG_DSC_START_POSITION_X;\ + type OTG_DSC_START_POSITION_LINE_NUM;\ + type OPTC_NUM_OF_INPUT_SEGMENT;\ + type OPTC_SEG0_SRC_SEL;\ + type OPTC_SEG1_SRC_SEL;\ + type OPTC_SEG2_SRC_SEL;\ + type OPTC_SEG3_SRC_SEL;\ + type OPTC_MEM_SEL;\ + type OPTC_DATA_FORMAT;\ + type OPTC_DSC_MODE;\ + type OPTC_DSC_BYTES_PER_PIXEL;\ + type OPTC_DSC_SLICE_WIDTH;\ + type OPTC_SEGMENT_WIDTH;\ + type OPTC_DWB0_SOURCE_SELECT;\ + type OPTC_DWB1_SOURCE_SELECT;\ + type MASTER_UPDATE_LOCK_DB_START_X;\ + type MASTER_UPDATE_LOCK_DB_END_X;\ + type MASTER_UPDATE_LOCK_DB_START_Y;\ + type MASTER_UPDATE_LOCK_DB_END_Y;\ + type DIG_UPDATE_POSITION_X;\ + type DIG_UPDATE_POSITION_Y;\ + type OTG_H_TIMING_DIV_MODE;\ + type OTG_DRR_TIMING_DBUF_UPDATE_MODE;\ + type OTG_CRC_DSC_MODE;\ + type OTG_CRC_DATA_STREAM_COMBINE_MODE;\ + type OTG_CRC_DATA_STREAM_SPLIT_MODE;\ + type OTG_CRC_DATA_FORMAT;\ + type OTG_V_TOTAL_LAST_USED_BY_DRR;\ + type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;\ + type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING; + +#define TG_REG_FIELD_LIST_DCN3_2(type) \ + type OTG_H_TIMING_DIV_MODE_MANUAL; + + +#define TG_REG_FIELD_LIST_DCN3_5(type) \ + type OTG_CRC0_WINDOWA_X_START_READBACK;\ + type OTG_CRC0_WINDOWA_X_END_READBACK;\ + type OTG_CRC0_WINDOWA_Y_START_READBACK;\ + type OTG_CRC0_WINDOWA_Y_END_READBACK;\ + type OTG_CRC0_WINDOWB_X_START_READBACK;\ + type OTG_CRC0_WINDOWB_X_END_READBACK;\ + type OTG_CRC0_WINDOWB_Y_START_READBACK;\ + type OTG_CRC0_WINDOWB_Y_END_READBACK; \ + type OTG_CRC1_WINDOWA_X_START_READBACK;\ + type OTG_CRC1_WINDOWA_X_END_READBACK;\ + type OTG_CRC1_WINDOWA_Y_START_READBACK;\ + type OTG_CRC1_WINDOWA_Y_END_READBACK;\ + type OTG_CRC1_WINDOWB_X_START_READBACK;\ + type OTG_CRC1_WINDOWB_X_END_READBACK;\ + type OTG_CRC1_WINDOWB_Y_START_READBACK;\ + type OTG_CRC1_WINDOWB_Y_END_READBACK;\ + type OPTC_FGCG_REP_DIS; + +struct dcn_optc_shift { + TG_REG_FIELD_LIST(uint8_t) + TG_REG_FIELD_LIST_DCN3_2(uint8_t) + TG_REG_FIELD_LIST_DCN3_5(uint8_t) +}; + +struct dcn_optc_mask { + TG_REG_FIELD_LIST(uint32_t) + TG_REG_FIELD_LIST_DCN3_2(uint32_t) + TG_REG_FIELD_LIST_DCN3_5(uint32_t) +}; + +void dcn10_timing_generator_init(struct optc *optc); + +#endif /* __DC_TIMING_GENERATOR_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c new file mode 100644 index 0000000000..58bdbd859b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c @@ -0,0 +1,587 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "reg_helper.h" +#include "dcn20_optc.h" +#include "dc.h" + +#define REG(reg)\ + optc1->tg_regs->reg + +#define CTX \ + optc1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + optc1->tg_shift->field_name, optc1->tg_mask->field_name + +/** + * optc2_enable_crtc() - Enable CRTC - call ASIC Control Object to enable Timing generator. + * + * @optc: timing_generator instance. + * + * Return: If CRTC is enabled, return true. + * + */ +bool optc2_enable_crtc(struct timing_generator *optc) +{ + /* TODO FPGA wait for answer + * OTG_MASTER_UPDATE_MODE != CRTC_MASTER_UPDATE_MODE + * OTG_MASTER_UPDATE_LOCK != CRTC_MASTER_UPDATE_LOCK + */ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + /* opp instance for OTG. For DCN1.0, ODM is remoed. + * OPP and OPTC should 1:1 mapping + */ + REG_UPDATE(OPTC_DATA_SOURCE_SELECT, + OPTC_SEG0_SRC_SEL, optc->inst); + + /* VTG enable first is for HW workaround */ + REG_UPDATE(CONTROL, + VTG0_ENABLE, 1); + + REG_SEQ_START(); + + /* Enable CRTC */ + REG_UPDATE_2(OTG_CONTROL, + OTG_DISABLE_POINT_CNTL, 3, + OTG_MASTER_EN, 1); + + REG_SEQ_SUBMIT(); + REG_SEQ_WAIT_DONE(); + + return true; +} + +/** + * optc2_set_gsl() - Assign OTG to GSL groups, + * set one of the OTGs to be master & rest are slaves + * + * @optc: timing_generator instance. + * @params: pointer to gsl_params + */ +void optc2_set_gsl(struct timing_generator *optc, + const struct gsl_params *params) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + +/* + * There are (MAX_OPTC+1)/2 gsl groups available for use. + * In each group (assign an OTG to a group by setting OTG_GSLX_EN = 1, + * set one of the OTGs to be the master (OTG_GSL_MASTER_EN = 1) and the rest are slaves. + */ + REG_UPDATE_5(OTG_GSL_CONTROL, + OTG_GSL0_EN, params->gsl0_en, + OTG_GSL1_EN, params->gsl1_en, + OTG_GSL2_EN, params->gsl2_en, + OTG_GSL_MASTER_EN, params->gsl_master_en, + OTG_GSL_MASTER_MODE, params->gsl_master_mode); +} + + +void optc2_set_gsl_source_select( + struct timing_generator *optc, + int group_idx, + uint32_t gsl_ready_signal) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + switch (group_idx) { + case 1: + REG_UPDATE(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, gsl_ready_signal); + break; + case 2: + REG_UPDATE(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, gsl_ready_signal); + break; + case 3: + REG_UPDATE(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, gsl_ready_signal); + break; + default: + break; + } +} + +/* Set DSC-related configuration. + * dsc_mode: 0 disables DSC, other values enable DSC in specified format + * sc_bytes_per_pixel: Bytes per pixel in u3.28 format + * dsc_slice_width: Slice width in pixels + */ +void optc2_set_dsc_config(struct timing_generator *optc, + enum optc_dsc_mode dsc_mode, + uint32_t dsc_bytes_per_pixel, + uint32_t dsc_slice_width) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, + OPTC_DSC_MODE, dsc_mode); + + REG_SET(OPTC_BYTES_PER_PIXEL, 0, + OPTC_DSC_BYTES_PER_PIXEL, dsc_bytes_per_pixel); + + REG_UPDATE(OPTC_WIDTH_CONTROL, + OPTC_DSC_SLICE_WIDTH, dsc_slice_width); +} + +/* Get DSC-related configuration. + * dsc_mode: 0 disables DSC, other values enable DSC in specified format + */ +void optc2_get_dsc_status(struct timing_generator *optc, + uint32_t *dsc_mode) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_GET(OPTC_DATA_FORMAT_CONTROL, + OPTC_DSC_MODE, dsc_mode); +} + + +/*TEMP: Need to figure out inheritance model here.*/ +bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing) +{ + return optc1_is_two_pixels_per_containter(timing); +} + +void optc2_set_odm_bypass(struct timing_generator *optc, + const struct dc_crtc_timing *dc_crtc_timing) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t h_div_2 = 0; + + REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0, + OPTC_NUM_OF_INPUT_SEGMENT, 0, + OPTC_SEG0_SRC_SEL, optc->inst, + OPTC_SEG1_SRC_SEL, 0xf); + REG_WRITE(OTG_H_TIMING_CNTL, 0); + + h_div_2 = optc2_is_two_pixels_per_containter(dc_crtc_timing); + REG_UPDATE(OTG_H_TIMING_CNTL, + OTG_H_TIMING_DIV_BY2, h_div_2); + REG_SET(OPTC_MEMORY_CONFIG, 0, + OPTC_MEM_SEL, 0); + optc1->opp_count = 1; +} + +void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, + struct dc_crtc_timing *timing) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right) + / opp_cnt; + uint32_t memory_mask; + + ASSERT(opp_cnt == 2); + + /* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic + * REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1); + * Program OTG register MASTER_UPDATE_LOCK_DB_X/Y to the position before DP frame start + * REG_SET_2(OTG_GLOBAL_CONTROL1, 0, + * MASTER_UPDATE_LOCK_DB_X, 160, + * MASTER_UPDATE_LOCK_DB_Y, 240); + */ + + /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192, + * however, for ODM combine we can simplify by always using 4. + * To make sure there's no overlap, each instance "reserves" 2 memories and + * they are uniquely combined here. + */ + memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2); + + if (REG(OPTC_MEMORY_CONFIG)) + REG_SET(OPTC_MEMORY_CONFIG, 0, + OPTC_MEM_SEL, memory_mask); + + REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0, + OPTC_NUM_OF_INPUT_SEGMENT, 1, + OPTC_SEG0_SRC_SEL, opp_id[0], + OPTC_SEG1_SRC_SEL, opp_id[1]); + + REG_UPDATE(OPTC_WIDTH_CONTROL, + OPTC_SEGMENT_WIDTH, mpcc_hactive); + + REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_BY2, 1); + optc1->opp_count = opp_cnt; +} + +void optc2_get_optc_source(struct timing_generator *optc, + uint32_t *num_of_src_opp, + uint32_t *src_opp_id_0, + uint32_t *src_opp_id_1) +{ + uint32_t num_of_input_segments; + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_GET_3(OPTC_DATA_SOURCE_SELECT, + OPTC_NUM_OF_INPUT_SEGMENT, &num_of_input_segments, + OPTC_SEG0_SRC_SEL, src_opp_id_0, + OPTC_SEG1_SRC_SEL, src_opp_id_1); + + if (num_of_input_segments == 1) + *num_of_src_opp = 2; + else + *num_of_src_opp = 1; + + /* Work around VBIOS not updating OPTC_NUM_OF_INPUT_SEGMENT */ + if (*src_opp_id_1 == 0xf) + *num_of_src_opp = 1; +} + +static void optc2_set_dwb_source(struct timing_generator *optc, + uint32_t dwb_pipe_inst) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + if (dwb_pipe_inst == 0) + REG_UPDATE(DWB_SOURCE_SELECT, + OPTC_DWB0_SOURCE_SELECT, optc->inst); + else if (dwb_pipe_inst == 1) + REG_UPDATE(DWB_SOURCE_SELECT, + OPTC_DWB1_SOURCE_SELECT, optc->inst); +} + +static void optc2_align_vblanks( + struct timing_generator *optc_master, + struct timing_generator *optc_slave, + uint32_t master_pixel_clock_100Hz, + uint32_t slave_pixel_clock_100Hz, + uint8_t master_clock_divider, + uint8_t slave_clock_divider) +{ + /* accessing slave OTG registers */ + struct optc *optc1 = DCN10TG_FROM_TG(optc_slave); + + uint32_t master_v_active = 0; + uint32_t master_h_total = 0; + uint32_t slave_h_total = 0; + uint64_t L, XY; + uint32_t X, Y, p = 10000; + uint32_t master_update_lock; + + /* disable slave OTG */ + REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0); + /* wait until disabled */ + REG_WAIT(OTG_CONTROL, + OTG_CURRENT_MASTER_EN_STATE, + 0, 10, 5000); + + REG_GET(OTG_H_TOTAL, OTG_H_TOTAL, &slave_h_total); + + /* assign slave OTG to be controlled by master update lock */ + REG_SET(OTG_GLOBAL_CONTROL0, 0, + OTG_MASTER_UPDATE_LOCK_SEL, optc_master->inst); + + /* accessing master OTG registers */ + optc1 = DCN10TG_FROM_TG(optc_master); + + /* saving update lock state, not sure if it's needed */ + REG_GET(OTG_MASTER_UPDATE_LOCK, + OTG_MASTER_UPDATE_LOCK, &master_update_lock); + /* unlocking master OTG */ + REG_SET(OTG_MASTER_UPDATE_LOCK, 0, + OTG_MASTER_UPDATE_LOCK, 0); + + REG_GET(OTG_V_BLANK_START_END, + OTG_V_BLANK_START, &master_v_active); + REG_GET(OTG_H_TOTAL, OTG_H_TOTAL, &master_h_total); + + /* calculate when to enable slave OTG */ + L = (uint64_t)p * slave_h_total * master_pixel_clock_100Hz; + L = div_u64(L, master_h_total); + L = div_u64(L, slave_pixel_clock_100Hz); + XY = div_u64(L, p); + Y = master_v_active - XY - 1; + X = div_u64(((XY + 1) * p - L) * master_h_total, p * master_clock_divider); + + /* + * set master OTG to unlock when V/H + * counters reach calculated values + */ + REG_UPDATE(OTG_GLOBAL_CONTROL1, + MASTER_UPDATE_LOCK_DB_EN, 1); + REG_UPDATE_2(OTG_GLOBAL_CONTROL1, + MASTER_UPDATE_LOCK_DB_X, + X, + MASTER_UPDATE_LOCK_DB_Y, + Y); + + /* lock master OTG */ + REG_SET(OTG_MASTER_UPDATE_LOCK, 0, + OTG_MASTER_UPDATE_LOCK, 1); + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, 1, 1, 10); + + /* accessing slave OTG registers */ + optc1 = DCN10TG_FROM_TG(optc_slave); + + /* + * enable slave OTG, the OTG is locked with + * master's update lock, so it will not run + */ + REG_UPDATE(OTG_CONTROL, + OTG_MASTER_EN, 1); + + /* accessing master OTG registers */ + optc1 = DCN10TG_FROM_TG(optc_master); + + /* + * unlock master OTG. When master H/V counters reach + * DB_XY point, slave OTG will start + */ + REG_SET(OTG_MASTER_UPDATE_LOCK, 0, + OTG_MASTER_UPDATE_LOCK, 0); + + /* accessing slave OTG registers */ + optc1 = DCN10TG_FROM_TG(optc_slave); + + /* wait for slave OTG to start running*/ + REG_WAIT(OTG_CONTROL, + OTG_CURRENT_MASTER_EN_STATE, + 1, 10, 5000); + + /* accessing master OTG registers */ + optc1 = DCN10TG_FROM_TG(optc_master); + + /* disable the XY point*/ + REG_UPDATE(OTG_GLOBAL_CONTROL1, + MASTER_UPDATE_LOCK_DB_EN, 0); + REG_UPDATE_2(OTG_GLOBAL_CONTROL1, + MASTER_UPDATE_LOCK_DB_X, + 0, + MASTER_UPDATE_LOCK_DB_Y, + 0); + + /*restore master update lock*/ + REG_SET(OTG_MASTER_UPDATE_LOCK, 0, + OTG_MASTER_UPDATE_LOCK, master_update_lock); + + /* accessing slave OTG registers */ + optc1 = DCN10TG_FROM_TG(optc_slave); + /* restore slave to be controlled by it's own */ + REG_SET(OTG_GLOBAL_CONTROL0, 0, + OTG_MASTER_UPDATE_LOCK_SEL, optc_slave->inst); + +} + +void optc2_triplebuffer_lock(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET(OTG_GLOBAL_CONTROL0, 0, + OTG_MASTER_UPDATE_LOCK_SEL, optc->inst); + + REG_SET(OTG_VUPDATE_KEEPOUT, 0, + OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1); + + REG_SET(OTG_MASTER_UPDATE_LOCK, 0, + OTG_MASTER_UPDATE_LOCK, 1); + + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, 1, + 1, 10); +} + +void optc2_triplebuffer_unlock(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET(OTG_MASTER_UPDATE_LOCK, 0, + OTG_MASTER_UPDATE_LOCK, 0); + + REG_SET(OTG_VUPDATE_KEEPOUT, 0, + OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 0); + +} + +void optc2_lock_doublebuffer_enable(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t v_blank_start = 0; + uint32_t h_blank_start = 0; + + REG_UPDATE(OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_EN, 1); + + REG_UPDATE_2(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 1, + DIG_UPDATE_LOCATION, 20); + + REG_GET(OTG_V_BLANK_START_END, OTG_V_BLANK_START, &v_blank_start); + + REG_GET(OTG_H_BLANK_START_END, OTG_H_BLANK_START, &h_blank_start); + + REG_UPDATE_2(OTG_GLOBAL_CONTROL1, + MASTER_UPDATE_LOCK_DB_X, + (h_blank_start - 200 - 1) / optc1->opp_count, + MASTER_UPDATE_LOCK_DB_Y, + v_blank_start - 1); + + REG_SET_3(OTG_VUPDATE_KEEPOUT, 0, + MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0, + MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, 100, + OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1); +} + +void optc2_lock_doublebuffer_disable(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_UPDATE_2(OTG_GLOBAL_CONTROL1, + MASTER_UPDATE_LOCK_DB_X, + 0, + MASTER_UPDATE_LOCK_DB_Y, + 0); + + REG_UPDATE_2(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 0, + DIG_UPDATE_LOCATION, 0); + + REG_UPDATE(OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_EN, 0); +} + +void optc2_setup_manual_trigger(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + /* Set the min/max selectors unconditionally so that + * DMCUB fw may change OTG timings when necessary + * TODO: Remove the w/a after fixing the issue in DMCUB firmware + */ + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_V_TOTAL_MIN_SEL, 1, + OTG_V_TOTAL_MAX_SEL, 1, + OTG_FORCE_LOCK_ON_EVENT, 0, + OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */ + + REG_SET_8(OTG_TRIGA_CNTL, 0, + OTG_TRIGA_SOURCE_SELECT, 21, + OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst, + OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1, + OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0, + OTG_TRIGA_POLARITY_SELECT, 0, + OTG_TRIGA_FREQUENCY_SELECT, 0, + OTG_TRIGA_DELAY, 0, + OTG_TRIGA_CLEAR, 1); +} + +void optc2_program_manual_trigger(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET(OTG_TRIGA_MANUAL_TRIG, 0, + OTG_TRIGA_MANUAL_TRIG, 1); +} + +bool optc2_configure_crc(struct timing_generator *optc, + const struct crc_params *params) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET_2(OTG_CRC_CNTL2, 0, + OTG_CRC_DSC_MODE, params->dsc_mode, + OTG_CRC_DATA_STREAM_COMBINE_MODE, params->odm_mode); + + return optc1_configure_crc(optc, params); +} + + +void optc2_get_last_used_drr_vtotal(struct timing_generator *optc, uint32_t *refresh_rate) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_GET(OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, refresh_rate); +} + +static struct timing_generator_funcs dcn20_tg_funcs = { + .validate_timing = optc1_validate_timing, + .program_timing = optc1_program_timing, + .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, + .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, + .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, + .program_global_sync = optc1_program_global_sync, + .enable_crtc = optc2_enable_crtc, + .disable_crtc = optc1_disable_crtc, + /* used by enable_timing_synchronization. Not need for FPGA */ + .is_counter_moving = optc1_is_counter_moving, + .get_position = optc1_get_position, + .get_frame_count = optc1_get_vblank_counter, + .get_scanoutpos = optc1_get_crtc_scanoutpos, + .get_otg_active_size = optc1_get_otg_active_size, + .set_early_control = optc1_set_early_control, + /* used by enable_timing_synchronization. Not need for FPGA */ + .wait_for_state = optc1_wait_for_state, + .set_blank = optc1_set_blank, + .is_blanked = optc1_is_blanked, + .set_blank_color = optc1_program_blank_color, + .enable_reset_trigger = optc1_enable_reset_trigger, + .enable_crtc_reset = optc1_enable_crtc_reset, + .did_triggered_reset_occur = optc1_did_triggered_reset_occur, + .triplebuffer_lock = optc2_triplebuffer_lock, + .triplebuffer_unlock = optc2_triplebuffer_unlock, + .disable_reset_trigger = optc1_disable_reset_trigger, + .lock = optc1_lock, + .unlock = optc1_unlock, + .lock_doublebuffer_enable = optc2_lock_doublebuffer_enable, + .lock_doublebuffer_disable = optc2_lock_doublebuffer_disable, + .enable_optc_clock = optc1_enable_optc_clock, + .set_drr = optc1_set_drr, + .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, + .set_vtotal_min_max = optc1_set_vtotal_min_max, + .set_static_screen_control = optc1_set_static_screen_control, + .program_stereo = optc1_program_stereo, + .is_stereo_left_eye = optc1_is_stereo_left_eye, + .set_blank_data_double_buffer = optc1_set_blank_data_double_buffer, + .tg_init = optc1_tg_init, + .is_tg_enabled = optc1_is_tg_enabled, + .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, + .clear_optc_underflow = optc1_clear_optc_underflow, + .setup_global_swap_lock = NULL, + .get_crc = optc1_get_crc, + .configure_crc = optc2_configure_crc, + .set_dsc_config = optc2_set_dsc_config, + .get_dsc_status = optc2_get_dsc_status, + .set_dwb_source = optc2_set_dwb_source, + .set_odm_bypass = optc2_set_odm_bypass, + .set_odm_combine = optc2_set_odm_combine, + .get_optc_source = optc2_get_optc_source, + .set_gsl = optc2_set_gsl, + .set_gsl_source_select = optc2_set_gsl_source_select, + .set_vtg_params = optc1_set_vtg_params, + .program_manual_trigger = optc2_program_manual_trigger, + .setup_manual_trigger = optc2_setup_manual_trigger, + .get_hw_timing = optc1_get_hw_timing, + .align_vblanks = optc2_align_vblanks, +}; + +void dcn20_timing_generator_init(struct optc *optc1) +{ + optc1->base.funcs = &dcn20_tg_funcs; + + optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; + optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; + + optc1->min_h_blank = 32; + optc1->min_v_blank = 3; + optc1->min_v_blank_interlace = 5; + optc1->min_h_sync_width = 4;// Minimum HSYNC = 8 pixels asked By HW in the first place for no actual reason. Oculus Rift S will not light up with 8 as it's hsyncWidth is 6. Changing it to 4 to fix that issue. + optc1->min_v_sync_width = 1; +} diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h new file mode 100644 index 0000000000..c2e03ced39 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h @@ -0,0 +1,124 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_OPTC_DCN20_H__ +#define __DC_OPTC_DCN20_H__ + +#include "dcn10/dcn10_optc.h" + +#define TG_COMMON_REG_LIST_DCN2_0(inst) \ + TG_COMMON_REG_LIST_DCN(inst),\ + SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\ + SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\ + SRI(OTG_GSL_WINDOW_X, OTG, inst),\ + SRI(OTG_GSL_WINDOW_Y, OTG, inst),\ + SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\ + SRI(OTG_DSC_START_POSITION, OTG, inst),\ + SRI(OTG_CRC_CNTL2, OTG, inst),\ + SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\ + SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\ + SRI(OPTC_WIDTH_CONTROL, ODM, inst),\ + SRI(OPTC_MEMORY_CONFIG, ODM, inst),\ + SR(DWB_SOURCE_SELECT),\ + SRI(OTG_MANUAL_FLOW_CONTROL, OTG, inst), \ + SRI(OTG_DRR_CONTROL, OTG, inst) + +#define TG_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\ + TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_X, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_Y, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_EN, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL2, DIG_UPDATE_LOCATION, mask_sh),\ + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \ + SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \ + SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \ + SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \ + SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DSC_MODE, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\ + SF(ODM0_OPTC_MEMORY_CONFIG, OPTC_MEM_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, mask_sh),\ + SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\ + SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\ + SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\ + SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_SEGMENT_WIDTH, mask_sh),\ + SF(DWB_SOURCE_SELECT, OPTC_DWB0_SOURCE_SELECT, mask_sh),\ + SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh),\ + SF(OTG0_OTG_MANUAL_FLOW_CONTROL, MANUAL_FLOW_CONTROL, mask_sh), \ + SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh) + +void dcn20_timing_generator_init(struct optc *optc); + +void optc2_get_last_used_drr_vtotal(struct timing_generator *optc, + uint32_t *refresh_rate); + +bool optc2_enable_crtc(struct timing_generator *optc); + +void optc2_set_gsl(struct timing_generator *optc, + const struct gsl_params *params); + +void optc2_set_gsl_source_select(struct timing_generator *optc, + int group_idx, + uint32_t gsl_ready_signal); + +void optc2_set_dsc_config(struct timing_generator *optc, + enum optc_dsc_mode dsc_mode, + uint32_t dsc_bytes_per_pixel, + uint32_t dsc_slice_width); + +void optc2_get_dsc_status(struct timing_generator *optc, + uint32_t *dsc_mode); + +void optc2_set_odm_bypass(struct timing_generator *optc, + const struct dc_crtc_timing *dc_crtc_timing); + +void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, + struct dc_crtc_timing *timing); + +void optc2_get_optc_source(struct timing_generator *optc, + uint32_t *num_of_src_opp, + uint32_t *src_opp_id_0, + uint32_t *src_opp_id_1); + +void optc2_triplebuffer_lock(struct timing_generator *optc); +void optc2_triplebuffer_unlock(struct timing_generator *optc); +void optc2_lock_doublebuffer_disable(struct timing_generator *optc); +void optc2_lock_doublebuffer_enable(struct timing_generator *optc); +void optc2_setup_manual_trigger(struct timing_generator *optc); +void optc2_program_manual_trigger(struct timing_generator *optc); +bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing); +bool optc2_configure_crc(struct timing_generator *optc, + const struct crc_params *params); +#endif /* __DC_OPTC_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c new file mode 100644 index 0000000000..70fcbec03f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c @@ -0,0 +1,202 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "reg_helper.h" +#include "dcn201_optc.h" +#include "dcn10/dcn10_optc.h" +#include "dc.h" + +#define REG(reg)\ + optc1->tg_regs->reg + +#define CTX \ + optc1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + optc1->tg_shift->field_name, optc1->tg_mask->field_name + +/*TEMP: Need to figure out inheritance model here.*/ +bool optc201_is_two_pixels_per_containter(const struct dc_crtc_timing *timing) +{ + return optc1_is_two_pixels_per_containter(timing); +} + +static void optc201_triplebuffer_lock(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET(OTG_GLOBAL_CONTROL0, 0, + OTG_MASTER_UPDATE_LOCK_SEL, optc->inst); + REG_SET(OTG_VUPDATE_KEEPOUT, 0, + OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1); + REG_SET(OTG_MASTER_UPDATE_LOCK, 0, + OTG_MASTER_UPDATE_LOCK, 1); + + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, 1, + 1, 10); +} + +static void optc201_triplebuffer_unlock(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET(OTG_MASTER_UPDATE_LOCK, 0, + OTG_MASTER_UPDATE_LOCK, 0); + REG_SET(OTG_VUPDATE_KEEPOUT, 0, + OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 0); + +} + +static bool optc201_validate_timing( + struct timing_generator *optc, + const struct dc_crtc_timing *timing) +{ + uint32_t v_blank; + uint32_t h_blank; + uint32_t min_v_blank; + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + ASSERT(timing != NULL); + + v_blank = (timing->v_total - timing->v_addressable - + timing->v_border_top - timing->v_border_bottom); + + h_blank = (timing->h_total - timing->h_addressable - + timing->h_border_right - + timing->h_border_left); + + if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE && + timing->timing_3d_format != TIMING_3D_FORMAT_HW_FRAME_PACKING && + timing->timing_3d_format != TIMING_3D_FORMAT_TOP_AND_BOTTOM && + timing->timing_3d_format != TIMING_3D_FORMAT_SIDE_BY_SIDE && + timing->timing_3d_format != TIMING_3D_FORMAT_FRAME_ALTERNATE && + timing->timing_3d_format != TIMING_3D_FORMAT_INBAND_FA) + return false; + + /* Check maximum number of pixels supported by Timing Generator + * (Currently will never fail, in order to fail needs display which + * needs more than 8192 horizontal and + * more than 8192 vertical total pixels) + */ + if (timing->h_total > optc1->max_h_total || + timing->v_total > optc1->max_v_total) + return false; + + if (h_blank < optc1->min_h_blank) + return false; + + if (timing->h_sync_width < optc1->min_h_sync_width || + timing->v_sync_width < optc1->min_v_sync_width) + return false; + + min_v_blank = timing->flags.INTERLACE?optc1->min_v_blank_interlace:optc1->min_v_blank; + + if (v_blank < min_v_blank) + return false; + + return true; + +} + +static void optc201_get_optc_source(struct timing_generator *optc, + uint32_t *num_of_src_opp, + uint32_t *src_opp_id_0, + uint32_t *src_opp_id_1) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_GET(OPTC_DATA_SOURCE_SELECT, + OPTC_SEG0_SRC_SEL, src_opp_id_0); + + *num_of_src_opp = 1; +} + +static struct timing_generator_funcs dcn201_tg_funcs = { + .validate_timing = optc201_validate_timing, + .program_timing = optc1_program_timing, + .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, + .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, + .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, + .program_global_sync = optc1_program_global_sync, + .enable_crtc = optc2_enable_crtc, + .disable_crtc = optc1_disable_crtc, + /* used by enable_timing_synchronization. Not need for FPGA */ + .is_counter_moving = optc1_is_counter_moving, + .get_position = optc1_get_position, + .get_frame_count = optc1_get_vblank_counter, + .get_scanoutpos = optc1_get_crtc_scanoutpos, + .get_otg_active_size = optc1_get_otg_active_size, + .set_early_control = optc1_set_early_control, + /* used by enable_timing_synchronization. Not need for FPGA */ + .wait_for_state = optc1_wait_for_state, + .set_blank = optc1_set_blank, + .is_blanked = optc1_is_blanked, + .set_blank_color = optc1_program_blank_color, + .did_triggered_reset_occur = optc1_did_triggered_reset_occur, + .enable_reset_trigger = optc1_enable_reset_trigger, + .enable_crtc_reset = optc1_enable_crtc_reset, + .disable_reset_trigger = optc1_disable_reset_trigger, + .triplebuffer_lock = optc201_triplebuffer_lock, + .triplebuffer_unlock = optc201_triplebuffer_unlock, + .lock = optc1_lock, + .unlock = optc1_unlock, + .enable_optc_clock = optc1_enable_optc_clock, + .set_drr = optc1_set_drr, + .get_last_used_drr_vtotal = NULL, + .set_vtotal_min_max = optc1_set_vtotal_min_max, + .set_static_screen_control = optc1_set_static_screen_control, + .program_stereo = optc1_program_stereo, + .is_stereo_left_eye = optc1_is_stereo_left_eye, + .set_blank_data_double_buffer = optc1_set_blank_data_double_buffer, + .tg_init = optc1_tg_init, + .is_tg_enabled = optc1_is_tg_enabled, + .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, + .clear_optc_underflow = optc1_clear_optc_underflow, + .get_crc = optc1_get_crc, + .configure_crc = optc2_configure_crc, + .set_dsc_config = optc2_set_dsc_config, + .set_dwb_source = NULL, + .get_optc_source = optc201_get_optc_source, + .set_vtg_params = optc1_set_vtg_params, + .program_manual_trigger = optc2_program_manual_trigger, + .setup_manual_trigger = optc2_setup_manual_trigger, + .get_hw_timing = optc1_get_hw_timing, +}; + +void dcn201_timing_generator_init(struct optc *optc1) +{ + optc1->base.funcs = &dcn201_tg_funcs; + + optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; + optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; + + optc1->min_h_blank = 32; + optc1->min_v_blank = 3; + optc1->min_v_blank_interlace = 5; + optc1->min_h_sync_width = 8; + optc1->min_v_sync_width = 1; +} diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.h new file mode 100644 index 0000000000..e9545b7351 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.h @@ -0,0 +1,74 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_OPTC_DCN201_H__ +#define __DC_OPTC_DCN201_H__ + +#include "dcn20/dcn20_optc.h" + +#define TG_COMMON_REG_LIST_DCN201(inst) \ + TG_COMMON_REG_LIST_DCN(inst),\ + SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\ + SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\ + SRI(OTG_GSL_WINDOW_X, OTG, inst),\ + SRI(OTG_GSL_WINDOW_Y, OTG, inst),\ + SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\ + SRI(OTG_DSC_START_POSITION, OTG, inst),\ + SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\ + SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\ + SRI(OPTC_WIDTH_CONTROL, ODM, inst),\ + SR(DWB_SOURCE_SELECT) + +#define TG_COMMON_MASK_SH_LIST_DCN201(mask_sh)\ + TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_X, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_Y, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_EN, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\ + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \ + SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\ + SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \ + SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \ + SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \ + SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \ + SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \ + SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\ + SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\ + SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\ + SF(DWB_SOURCE_SELECT, OPTC_DWB0_SOURCE_SELECT, mask_sh),\ + SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh),\ + SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh) + +void dcn201_timing_generator_init(struct optc *optc); + +bool optc201_is_two_pixels_per_containter(const struct dc_crtc_timing *timing); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c new file mode 100644 index 0000000000..b97bdb868a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c @@ -0,0 +1,393 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "reg_helper.h" +#include "dcn30_optc.h" +#include "dc.h" +#include "dcn_calc_math.h" +#include "dc_dmub_srv.h" + +#include "dml/dcn30/dcn30_fpu.h" +#include "dc_trace.h" + +#define REG(reg)\ + optc1->tg_regs->reg + +#define CTX \ + optc1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + optc1->tg_shift->field_name, optc1->tg_mask->field_name + +void optc3_triplebuffer_lock(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_UPDATE(OTG_GLOBAL_CONTROL2, + OTG_MASTER_UPDATE_LOCK_SEL, optc->inst); + + REG_SET(OTG_VUPDATE_KEEPOUT, 0, + OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1); + + REG_SET(OTG_MASTER_UPDATE_LOCK, 0, + OTG_MASTER_UPDATE_LOCK, 1); + + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, 1, + 1, 10); + + TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true); +} + +void optc3_lock_doublebuffer_enable(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t v_blank_start = 0; + uint32_t v_blank_end = 0; + uint32_t h_blank_start = 0; + uint32_t h_blank_end = 0; + + REG_GET_2(OTG_V_BLANK_START_END, + OTG_V_BLANK_START, &v_blank_start, + OTG_V_BLANK_END, &v_blank_end); + REG_GET_2(OTG_H_BLANK_START_END, + OTG_H_BLANK_START, &h_blank_start, + OTG_H_BLANK_END, &h_blank_end); + + REG_UPDATE_2(OTG_GLOBAL_CONTROL1, + MASTER_UPDATE_LOCK_DB_START_Y, v_blank_start - 1, + MASTER_UPDATE_LOCK_DB_END_Y, v_blank_start); + REG_UPDATE_2(OTG_GLOBAL_CONTROL4, + DIG_UPDATE_POSITION_X, h_blank_start - 180 - 1, + DIG_UPDATE_POSITION_Y, v_blank_start - 1); + // there is a DIG_UPDATE_VCOUNT_MODE and it is 0. + + REG_UPDATE_3(OTG_GLOBAL_CONTROL0, + MASTER_UPDATE_LOCK_DB_START_X, h_blank_start - 200 - 1, + MASTER_UPDATE_LOCK_DB_END_X, h_blank_start - 180, + MASTER_UPDATE_LOCK_DB_EN, 1); + REG_UPDATE(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 1); + + REG_SET_3(OTG_VUPDATE_KEEPOUT, 0, + MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0, + MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, 100, + OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1); + + TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true); +} + +void optc3_lock_doublebuffer_disable(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_UPDATE_2(OTG_GLOBAL_CONTROL0, + MASTER_UPDATE_LOCK_DB_START_X, 0, + MASTER_UPDATE_LOCK_DB_END_X, 0); + REG_UPDATE_2(OTG_GLOBAL_CONTROL1, + MASTER_UPDATE_LOCK_DB_START_Y, 0, + MASTER_UPDATE_LOCK_DB_END_Y, 0); + + REG_UPDATE(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 0); + REG_UPDATE(OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_EN, 0); + + TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true); +} + +void optc3_lock(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_UPDATE(OTG_GLOBAL_CONTROL2, + OTG_MASTER_UPDATE_LOCK_SEL, optc->inst); + REG_SET(OTG_MASTER_UPDATE_LOCK, 0, + OTG_MASTER_UPDATE_LOCK, 1); + + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, 1, + 1, 10); + + TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true); +} + +void optc3_set_out_mux(struct timing_generator *optc, enum otg_out_mux_dest dest) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_UPDATE(OTG_CONTROL, OTG_OUT_MUX, dest); +} + +void optc3_program_blank_color(struct timing_generator *optc, + const struct tg_color *blank_color) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET_3(OTG_BLANK_DATA_COLOR, 0, + OTG_BLANK_DATA_COLOR_BLUE_CB, blank_color->color_b_cb, + OTG_BLANK_DATA_COLOR_GREEN_Y, blank_color->color_g_y, + OTG_BLANK_DATA_COLOR_RED_CR, blank_color->color_r_cr); + + REG_SET_3(OTG_BLANK_DATA_COLOR_EXT, 0, + OTG_BLANK_DATA_COLOR_BLUE_CB_EXT, blank_color->color_b_cb >> 10, + OTG_BLANK_DATA_COLOR_GREEN_Y_EXT, blank_color->color_g_y >> 10, + OTG_BLANK_DATA_COLOR_RED_CR_EXT, blank_color->color_r_cr >> 10); +} + +void optc3_set_drr_trigger_window(struct timing_generator *optc, + uint32_t window_start, uint32_t window_end) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET_2(OTG_DRR_TRIGGER_WINDOW, 0, + OTG_DRR_TRIGGER_WINDOW_START_X, window_start, + OTG_DRR_TRIGGER_WINDOW_END_X, window_end); +} + +void optc3_set_vtotal_change_limit(struct timing_generator *optc, + uint32_t limit) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + + REG_SET(OTG_DRR_V_TOTAL_CHANGE, 0, + OTG_DRR_V_TOTAL_CHANGE_LIMIT, limit); +} + + +/* Set DSC-related configuration. + * dsc_mode: 0 disables DSC, other values enable DSC in specified format + * sc_bytes_per_pixel: Bytes per pixel in u3.28 format + * dsc_slice_width: Slice width in pixels + */ +void optc3_set_dsc_config(struct timing_generator *optc, + enum optc_dsc_mode dsc_mode, + uint32_t dsc_bytes_per_pixel, + uint32_t dsc_slice_width) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + optc2_set_dsc_config(optc, dsc_mode, dsc_bytes_per_pixel, dsc_slice_width); + REG_UPDATE(OTG_V_SYNC_A_CNTL, OTG_V_SYNC_MODE, 0); +} + +void optc3_set_odm_bypass(struct timing_generator *optc, + const struct dc_crtc_timing *dc_crtc_timing) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + enum h_timing_div_mode h_div = H_TIMING_NO_DIV; + + REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, + OPTC_NUM_OF_INPUT_SEGMENT, 0, + OPTC_SEG0_SRC_SEL, optc->inst, + OPTC_SEG1_SRC_SEL, 0xf, + OPTC_SEG2_SRC_SEL, 0xf, + OPTC_SEG3_SRC_SEL, 0xf + ); + + h_div = optc1_is_two_pixels_per_containter(dc_crtc_timing); + REG_UPDATE(OTG_H_TIMING_CNTL, + OTG_H_TIMING_DIV_MODE, h_div); + + REG_SET(OPTC_MEMORY_CONFIG, 0, + OPTC_MEM_SEL, 0); + optc1->opp_count = 1; +} + +void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, + struct dc_crtc_timing *timing) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right) + / opp_cnt; + uint32_t memory_mask = 0; + + /* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic + * REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1); + * Program OTG register MASTER_UPDATE_LOCK_DB_X/Y to the position before DP frame start + * REG_SET_2(OTG_GLOBAL_CONTROL1, 0, + * MASTER_UPDATE_LOCK_DB_X, 160, + * MASTER_UPDATE_LOCK_DB_Y, 240); + */ + + ASSERT(opp_cnt == 2 || opp_cnt == 4); + + /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192, + * however, for ODM combine we can simplify by always using 4. + */ + if (opp_cnt == 2) { + /* To make sure there's no memory overlap, each instance "reserves" 2 + * memories and they are uniquely combined here. + */ + memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2); + } else if (opp_cnt == 4) { + /* To make sure there's no memory overlap, each instance "reserves" 1 + * memory and they are uniquely combined here. + */ + memory_mask = 0x1 << (opp_id[0] * 2) | 0x1 << (opp_id[1] * 2) | 0x1 << (opp_id[2] * 2) | 0x1 << (opp_id[3] * 2); + } + + if (REG(OPTC_MEMORY_CONFIG)) + REG_SET(OPTC_MEMORY_CONFIG, 0, + OPTC_MEM_SEL, memory_mask); + + if (opp_cnt == 2) { + REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0, + OPTC_NUM_OF_INPUT_SEGMENT, 1, + OPTC_SEG0_SRC_SEL, opp_id[0], + OPTC_SEG1_SRC_SEL, opp_id[1]); + } else if (opp_cnt == 4) { + REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, + OPTC_NUM_OF_INPUT_SEGMENT, 3, + OPTC_SEG0_SRC_SEL, opp_id[0], + OPTC_SEG1_SRC_SEL, opp_id[1], + OPTC_SEG2_SRC_SEL, opp_id[2], + OPTC_SEG3_SRC_SEL, opp_id[3]); + } + + REG_UPDATE(OPTC_WIDTH_CONTROL, + OPTC_SEGMENT_WIDTH, mpcc_hactive); + + REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_MODE, opp_cnt - 1); + optc1->opp_count = opp_cnt; +} + +/** + * optc3_set_timing_double_buffer() - DRR double buffering control + * + * Sets double buffer point for V_TOTAL, H_TOTAL, VTOTAL_MIN, + * VTOTAL_MAX, VTOTAL_MIN_SEL and VTOTAL_MAX_SEL registers. + * + * @optc: timing_generator instance. + * @enable: Enable DRR double buffering control if true, disable otherwise. + * + * Options: any time, start of frame, dp start of frame (range timing) + */ +static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool enable) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t mode = enable ? 2 : 0; + + REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL, + OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode); +} + +void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, 0, 2, 100000); /* 1 vupdate at 5hz */ + +} + +void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max) +{ + struct dc *dc = optc->ctx->dc; + + if (dc->caps.dmub_caps.mclk_sw && !dc->debug.disable_fams) + dc_dmub_srv_drr_update_cmd(dc, optc->inst, vtotal_min, vtotal_max); + else + optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max); +} + +void optc3_tg_init(struct timing_generator *optc) +{ + optc3_set_timing_double_buffer(optc, true); + optc1_clear_optc_underflow(optc); +} + +static struct timing_generator_funcs dcn30_tg_funcs = { + .validate_timing = optc1_validate_timing, + .program_timing = optc1_program_timing, + .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, + .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, + .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, + .program_global_sync = optc1_program_global_sync, + .enable_crtc = optc2_enable_crtc, + .disable_crtc = optc1_disable_crtc, + /* used by enable_timing_synchronization. Not need for FPGA */ + .is_counter_moving = optc1_is_counter_moving, + .get_position = optc1_get_position, + .get_frame_count = optc1_get_vblank_counter, + .get_scanoutpos = optc1_get_crtc_scanoutpos, + .get_otg_active_size = optc1_get_otg_active_size, + .set_early_control = optc1_set_early_control, + /* used by enable_timing_synchronization. Not need for FPGA */ + .wait_for_state = optc1_wait_for_state, + .set_blank_color = optc3_program_blank_color, + .did_triggered_reset_occur = optc1_did_triggered_reset_occur, + .triplebuffer_lock = optc3_triplebuffer_lock, + .triplebuffer_unlock = optc2_triplebuffer_unlock, + .enable_reset_trigger = optc1_enable_reset_trigger, + .enable_crtc_reset = optc1_enable_crtc_reset, + .disable_reset_trigger = optc1_disable_reset_trigger, + .lock = optc3_lock, + .unlock = optc1_unlock, + .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, + .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, + .enable_optc_clock = optc1_enable_optc_clock, + .set_drr = optc1_set_drr, + .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, + .set_vtotal_min_max = optc3_set_vtotal_min_max, + .set_static_screen_control = optc1_set_static_screen_control, + .program_stereo = optc1_program_stereo, + .is_stereo_left_eye = optc1_is_stereo_left_eye, + .tg_init = optc3_tg_init, + .is_tg_enabled = optc1_is_tg_enabled, + .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, + .clear_optc_underflow = optc1_clear_optc_underflow, + .setup_global_swap_lock = NULL, + .get_crc = optc1_get_crc, + .configure_crc = optc2_configure_crc, + .set_dsc_config = optc3_set_dsc_config, + .get_dsc_status = optc2_get_dsc_status, + .set_dwb_source = NULL, + .set_odm_bypass = optc3_set_odm_bypass, + .set_odm_combine = optc3_set_odm_combine, + .get_optc_source = optc2_get_optc_source, + .set_out_mux = optc3_set_out_mux, + .set_drr_trigger_window = optc3_set_drr_trigger_window, + .set_vtotal_change_limit = optc3_set_vtotal_change_limit, + .set_gsl = optc2_set_gsl, + .set_gsl_source_select = optc2_set_gsl_source_select, + .set_vtg_params = optc1_set_vtg_params, + .program_manual_trigger = optc2_program_manual_trigger, + .setup_manual_trigger = optc2_setup_manual_trigger, + .get_hw_timing = optc1_get_hw_timing, + .wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear, +}; + +void dcn30_timing_generator_init(struct optc *optc1) +{ + optc1->base.funcs = &dcn30_tg_funcs; + + optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; + optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; + + optc1->min_h_blank = 32; + optc1->min_v_blank = 3; + optc1->min_v_blank_interlace = 5; + optc1->min_h_sync_width = 4; + optc1->min_v_sync_width = 1; +} diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h new file mode 100644 index 0000000000..d3a056c12b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h @@ -0,0 +1,359 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_OPTC_DCN30_H__ +#define __DC_OPTC_DCN30_H__ + +#include "dcn20/dcn20_optc.h" + +#define V_TOTAL_REGS_DCN30_SRI(inst) + +#define OPTC_COMMON_REG_LIST_DCN3_BASE(inst) \ + SRI(OTG_VSTARTUP_PARAM, OTG, inst),\ + SRI(OTG_VUPDATE_PARAM, OTG, inst),\ + SRI(OTG_VREADY_PARAM, OTG, inst),\ + SRI(OTG_MASTER_UPDATE_LOCK, OTG, inst),\ + SRI(OTG_GLOBAL_CONTROL0, OTG, inst),\ + SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\ + SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\ + SRI(OTG_GLOBAL_CONTROL4, OTG, inst),\ + SRI(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst),\ + SRI(OTG_H_TOTAL, OTG, inst),\ + SRI(OTG_H_BLANK_START_END, OTG, inst),\ + SRI(OTG_H_SYNC_A, OTG, inst),\ + SRI(OTG_H_SYNC_A_CNTL, OTG, inst),\ + SRI(OTG_H_TIMING_CNTL, OTG, inst),\ + SRI(OTG_V_TOTAL, OTG, inst),\ + SRI(OTG_V_BLANK_START_END, OTG, inst),\ + SRI(OTG_V_SYNC_A, OTG, inst),\ + SRI(OTG_V_SYNC_A_CNTL, OTG, inst),\ + SRI(OTG_CONTROL, OTG, inst),\ + SRI(OTG_STEREO_CONTROL, OTG, inst),\ + SRI(OTG_3D_STRUCTURE_CONTROL, OTG, inst),\ + SRI(OTG_STEREO_STATUS, OTG, inst),\ + SRI(OTG_V_TOTAL_MAX, OTG, inst),\ + SRI(OTG_V_TOTAL_MIN, OTG, inst),\ + SRI(OTG_V_TOTAL_CONTROL, OTG, inst),\ + V_TOTAL_REGS_DCN30_SRI(inst)\ + SRI(OTG_TRIGA_CNTL, OTG, inst),\ + SRI(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\ + SRI(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\ + SRI(OTG_STATUS_FRAME_COUNT, OTG, inst),\ + SRI(OTG_STATUS, OTG, inst),\ + SRI(OTG_STATUS_POSITION, OTG, inst),\ + SRI(OTG_NOM_VERT_POSITION, OTG, inst),\ + SRI(OTG_BLANK_DATA_COLOR, OTG, inst),\ + SRI(OTG_BLANK_DATA_COLOR_EXT, OTG, inst),\ + SRI(OTG_M_CONST_DTO0, OTG, inst),\ + SRI(OTG_M_CONST_DTO1, OTG, inst),\ + SRI(OTG_CLOCK_CONTROL, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\ + SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\ + SRI(OPTC_DATA_SOURCE_SELECT, ODM, inst),\ + SRI(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\ + SRI(CONTROL, VTG, inst),\ + SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\ + SRI(OTG_GSL_CONTROL, OTG, inst),\ + SRI(OTG_CRC_CNTL, OTG, inst),\ + SRI(OTG_CRC_CNTL2, OTG, inst),\ + SRI(OTG_CRC0_DATA_RG, OTG, inst),\ + SRI(OTG_CRC0_DATA_B, OTG, inst),\ + SRI(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\ + SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\ + SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\ + SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst),\ + SR(GSL_SOURCE_SELECT),\ + SRI(OTG_TRIGA_MANUAL_TRIG, OTG, inst),\ + SRI(OTG_DRR_CONTROL, OTG, inst) + + +#define OPTC_COMMON_REG_LIST_DCN3_0(inst) \ + OPTC_COMMON_REG_LIST_DCN3_BASE(inst),\ + SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\ + SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\ + SRI(OTG_GSL_WINDOW_X, OTG, inst),\ + SRI(OTG_GSL_WINDOW_Y, OTG, inst),\ + SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\ + SRI(OTG_DSC_START_POSITION, OTG, inst),\ + SRI(OTG_CRC_CNTL2, OTG, inst),\ + SRI(OTG_DRR_TRIGGER_WINDOW, OTG, inst),\ + SRI(OTG_DRR_V_TOTAL_CHANGE, OTG, inst),\ + SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\ + SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\ + SRI(OPTC_WIDTH_CONTROL, ODM, inst),\ + SRI(OPTC_MEMORY_CONFIG, ODM, inst),\ + SR(DWB_SOURCE_SELECT) + +#define DCN30_VTOTAL_REGS_SF(mask_sh) + +#define OPTC_COMMON_MASK_SH_LIST_DCN3_BASE(mask_sh)\ + SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\ + SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_OFFSET, mask_sh),\ + SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_WIDTH, mask_sh),\ + SF(OTG0_OTG_VREADY_PARAM, VREADY_OFFSET, mask_sh),\ + SF(OTG0_OTG_MASTER_UPDATE_LOCK, OTG_MASTER_UPDATE_LOCK, mask_sh),\ + SF(OTG0_OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_START_X, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_END_X, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_EN, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_START_Y, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_END_Y, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL2, OTG_MASTER_UPDATE_LOCK_SEL, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_X, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_Y, mask_sh),\ + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\ + SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\ + SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\ + SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_START, mask_sh),\ + SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_END, mask_sh),\ + SF(OTG0_OTG_H_SYNC_A_CNTL, OTG_H_SYNC_A_POL, mask_sh),\ + SF(OTG0_OTG_V_TOTAL, OTG_V_TOTAL, mask_sh),\ + SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_START, mask_sh),\ + SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_END, mask_sh),\ + SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_START, mask_sh),\ + SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_END, mask_sh),\ + SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_A_POL, mask_sh),\ + SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_MODE, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_MASTER_EN, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_START_POINT_CNTL, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_OUT_MUX, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EYE_FLAG_POLARITY, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, mask_sh),\ + SF(OTG0_OTG_STEREO_STATUS, OTG_STEREO_CURRENT_EYE, mask_sh),\ + SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_EN, mask_sh),\ + SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_V_UPDATE_MODE, mask_sh),\ + SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_STEREO_SEL_OVR, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_MAX, OTG_V_TOTAL_MAX, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_MIN, OTG_V_TOTAL_MIN, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MIN_SEL, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MAX_SEL, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_FORCE_LOCK_ON_EVENT, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK_EN, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MIN_EN, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MAX_EN, mask_sh),\ + DCN30_VTOTAL_REGS_SF(mask_sh)\ + SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_CLEAR, mask_sh),\ + SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_MODE, mask_sh),\ + SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_OCCURRED, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_SELECT, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_PIPE_SELECT, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_RISING_EDGE_DETECT_CNTL, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_POLARITY_SELECT, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FREQUENCY_SELECT, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_DELAY, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_CLEAR, mask_sh),\ + SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_EVENT_MASK, mask_sh),\ + SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_FRAME_COUNT, mask_sh),\ + SF(OTG0_OTG_STATUS_FRAME_COUNT, OTG_FRAME_COUNT, mask_sh),\ + SF(OTG0_OTG_STATUS, OTG_V_BLANK, mask_sh),\ + SF(OTG0_OTG_STATUS, OTG_V_ACTIVE_DISP, mask_sh),\ + SF(OTG0_OTG_STATUS_POSITION, OTG_HORZ_COUNT, mask_sh),\ + SF(OTG0_OTG_STATUS_POSITION, OTG_VERT_COUNT, mask_sh),\ + SF(OTG0_OTG_NOM_VERT_POSITION, OTG_VERT_COUNT_NOM, mask_sh),\ + SF(OTG0_OTG_BLANK_DATA_COLOR, OTG_BLANK_DATA_COLOR_BLUE_CB, mask_sh),\ + SF(OTG0_OTG_BLANK_DATA_COLOR, OTG_BLANK_DATA_COLOR_GREEN_Y, mask_sh),\ + SF(OTG0_OTG_BLANK_DATA_COLOR, OTG_BLANK_DATA_COLOR_RED_CR, mask_sh),\ + SF(OTG0_OTG_BLANK_DATA_COLOR_EXT, OTG_BLANK_DATA_COLOR_BLUE_CB_EXT, mask_sh),\ + SF(OTG0_OTG_BLANK_DATA_COLOR_EXT, OTG_BLANK_DATA_COLOR_GREEN_Y_EXT, mask_sh),\ + SF(OTG0_OTG_BLANK_DATA_COLOR_EXT, OTG_BLANK_DATA_COLOR_RED_CR_EXT, mask_sh),\ + SF(OTG0_OTG_M_CONST_DTO0, OTG_M_CONST_DTO_PHASE, mask_sh),\ + SF(OTG0_OTG_M_CONST_DTO1, OTG_M_CONST_DTO_MODULO, mask_sh),\ + SF(OTG0_OTG_CLOCK_CONTROL, OTG_BUSY, mask_sh),\ + SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_EN, mask_sh),\ + SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_ON, mask_sh),\ + SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_START, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_END, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT1_POSITION, OTG_VERTICAL_INTERRUPT1_LINE_START, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\ + SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\ + SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_ON, mask_sh),\ + SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\ + SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\ + SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\ + SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\ + SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\ + SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\ + SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED, mask_sh),\ + SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_CLEAR, mask_sh),\ + SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_AUTO_FORCE_VSYNC_MODE, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL0_EN, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL1_EN, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DSC_MODE, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\ + SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\ + SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\ + SF(OTG0_OTG_CRC0_DATA_B, CRC0_B_CB, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_START, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_END, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_START, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_END, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_START, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh),\ + SF(OTG0_OTG_TRIGA_MANUAL_TRIG, OTG_TRIGA_MANUAL_TRIG, mask_sh),\ + SF(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, mask_sh),\ + SF(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, mask_sh),\ + SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL2, MANUAL_FLOW_CONTROL_SEL, mask_sh),\ + SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh) + +#define OPTC_COMMON_MASK_SH_LIST_DCN3_0(mask_sh)\ + OPTC_COMMON_MASK_SH_LIST_DCN3_BASE(mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \ + SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\ + SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \ + SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \ + SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \ + SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \ + SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \ + SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DSC_MODE, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\ + SF(ODM0_OPTC_MEMORY_CONFIG, OPTC_MEM_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, mask_sh),\ + SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\ + SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\ + SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\ + SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_SEGMENT_WIDTH, mask_sh),\ + SF(DWB_SOURCE_SELECT, OPTC_DWB0_SOURCE_SELECT, mask_sh),\ + SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh),\ + SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_START_X, mask_sh),\ + SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\ + SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\ + SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_BY2, mask_sh),\ + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\ + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_BLANK_DATA_DOUBLE_BUFFER_EN, mask_sh) + +#define OPTC_COMMON_MASK_SH_LIST_DCN30(mask_sh)\ + OPTC_COMMON_MASK_SH_LIST_DCN3_BASE(mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \ + SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\ + SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \ + SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \ + SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \ + SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \ + SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \ + SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DSC_MODE, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG2_SRC_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG3_SRC_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\ + SF(ODM0_OPTC_MEMORY_CONFIG, OPTC_MEM_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, mask_sh),\ + SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\ + SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\ + SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\ + SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_SEGMENT_WIDTH, mask_sh),\ + SF(DWB_SOURCE_SELECT, OPTC_DWB0_SOURCE_SELECT, mask_sh),\ + SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh),\ + SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_START_X, mask_sh),\ + SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\ + SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\ + SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\ + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh) + +void dcn30_timing_generator_init(struct optc *optc1); + +void optc3_set_out_mux(struct timing_generator *optc, enum otg_out_mux_dest dest); + +void optc3_lock(struct timing_generator *optc); + +void optc3_lock_doublebuffer_enable(struct timing_generator *optc); + +void optc3_lock_doublebuffer_disable(struct timing_generator *optc); + +void optc3_set_drr_trigger_window(struct timing_generator *optc, + uint32_t window_start, uint32_t window_end); + +void optc3_triplebuffer_lock(struct timing_generator *optc); + +void optc3_program_blank_color(struct timing_generator *optc, + const struct tg_color *blank_color); + +void optc3_set_vtotal_change_limit(struct timing_generator *optc, + uint32_t limit); + +void optc3_set_dsc_config(struct timing_generator *optc, + enum optc_dsc_mode dsc_mode, + uint32_t dsc_bytes_per_pixel, + uint32_t dsc_slice_width); + +void optc3_set_timing_db_mode(struct timing_generator *optc, bool enable); + +void optc3_set_odm_bypass(struct timing_generator *optc, + const struct dc_crtc_timing *dc_crtc_timing); +void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, + struct dc_crtc_timing *timing); +void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc); +void optc3_tg_init(struct timing_generator *optc); +void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max); +#endif /* __DC_OPTC_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c new file mode 100644 index 0000000000..b3cfcb8879 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c @@ -0,0 +1,185 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "reg_helper.h" +#include "dcn301_optc.h" +#include "dc.h" +#include "dcn_calc_math.h" +#include "dc_dmub_srv.h" + +#include "dml/dcn30/dcn30_fpu.h" +#include "dc_trace.h" + +#define REG(reg)\ + optc1->tg_regs->reg + +#define CTX \ + optc1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + optc1->tg_shift->field_name, optc1->tg_mask->field_name + + +/** + * optc301_set_drr() - Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*. + * + * @optc: timing_generator instance. + * @params: parameters used for Dynamic Refresh Rate. + */ +void optc301_set_drr( + struct timing_generator *optc, + const struct drr_params *params) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + if (params != NULL && + params->vertical_total_max > 0 && + params->vertical_total_min > 0) { + + if (params->vertical_total_mid != 0) { + + REG_SET(OTG_V_TOTAL_MID, 0, + OTG_V_TOTAL_MID, params->vertical_total_mid - 1); + + REG_UPDATE_2(OTG_V_TOTAL_CONTROL, + OTG_VTOTAL_MID_REPLACING_MAX_EN, 1, + OTG_VTOTAL_MID_FRAME_NUM, + (uint8_t)params->vertical_total_mid_frame_num); + + } + + optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1); + + REG_UPDATE_5(OTG_V_TOTAL_CONTROL, + OTG_V_TOTAL_MIN_SEL, 1, + OTG_V_TOTAL_MAX_SEL, 1, + OTG_FORCE_LOCK_ON_EVENT, 0, + OTG_SET_V_TOTAL_MIN_MASK_EN, 0, + OTG_SET_V_TOTAL_MIN_MASK, 0); + // Setup manual flow control for EOF via TRIG_A + optc->funcs->setup_manual_trigger(optc); + + } else { + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_SET_V_TOTAL_MIN_MASK, 0, + OTG_V_TOTAL_MIN_SEL, 0, + OTG_V_TOTAL_MAX_SEL, 0, + OTG_FORCE_LOCK_ON_EVENT, 0); + + optc->funcs->set_vtotal_min_max(optc, 0, 0); + } +} + + +void optc301_setup_manual_trigger(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET_8(OTG_TRIGA_CNTL, 0, + OTG_TRIGA_SOURCE_SELECT, 21, + OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst, + OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1, + OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0, + OTG_TRIGA_POLARITY_SELECT, 0, + OTG_TRIGA_FREQUENCY_SELECT, 0, + OTG_TRIGA_DELAY, 0, + OTG_TRIGA_CLEAR, 1); +} + +static struct timing_generator_funcs dcn30_tg_funcs = { + .validate_timing = optc1_validate_timing, + .program_timing = optc1_program_timing, + .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, + .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, + .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, + .program_global_sync = optc1_program_global_sync, + .enable_crtc = optc2_enable_crtc, + .disable_crtc = optc1_disable_crtc, + /* used by enable_timing_synchronization. Not need for FPGA */ + .is_counter_moving = optc1_is_counter_moving, + .get_position = optc1_get_position, + .get_frame_count = optc1_get_vblank_counter, + .get_scanoutpos = optc1_get_crtc_scanoutpos, + .get_otg_active_size = optc1_get_otg_active_size, + .set_early_control = optc1_set_early_control, + /* used by enable_timing_synchronization. Not need for FPGA */ + .wait_for_state = optc1_wait_for_state, + .set_blank_color = optc3_program_blank_color, + .did_triggered_reset_occur = optc1_did_triggered_reset_occur, + .triplebuffer_lock = optc3_triplebuffer_lock, + .triplebuffer_unlock = optc2_triplebuffer_unlock, + .enable_reset_trigger = optc1_enable_reset_trigger, + .enable_crtc_reset = optc1_enable_crtc_reset, + .disable_reset_trigger = optc1_disable_reset_trigger, + .lock = optc3_lock, + .unlock = optc1_unlock, + .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, + .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, + .enable_optc_clock = optc1_enable_optc_clock, + .set_drr = optc301_set_drr, + .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, + .set_vtotal_min_max = optc3_set_vtotal_min_max, + .set_static_screen_control = optc1_set_static_screen_control, + .program_stereo = optc1_program_stereo, + .is_stereo_left_eye = optc1_is_stereo_left_eye, + .tg_init = optc3_tg_init, + .is_tg_enabled = optc1_is_tg_enabled, + .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, + .clear_optc_underflow = optc1_clear_optc_underflow, + .setup_global_swap_lock = NULL, + .get_crc = optc1_get_crc, + .configure_crc = optc2_configure_crc, + .set_dsc_config = optc3_set_dsc_config, + .get_dsc_status = optc2_get_dsc_status, + .set_dwb_source = NULL, + .set_odm_bypass = optc3_set_odm_bypass, + .set_odm_combine = optc3_set_odm_combine, + .get_optc_source = optc2_get_optc_source, + .set_out_mux = optc3_set_out_mux, + .set_drr_trigger_window = optc3_set_drr_trigger_window, + .set_vtotal_change_limit = optc3_set_vtotal_change_limit, + .set_gsl = optc2_set_gsl, + .set_gsl_source_select = optc2_set_gsl_source_select, + .set_vtg_params = optc1_set_vtg_params, + .program_manual_trigger = optc2_program_manual_trigger, + .setup_manual_trigger = optc301_setup_manual_trigger, + .get_hw_timing = optc1_get_hw_timing, + .wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear, +}; + +void dcn301_timing_generator_init(struct optc *optc1) +{ + optc1->base.funcs = &dcn30_tg_funcs; + + optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; + optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; + + optc1->min_h_blank = 32; + optc1->min_v_blank = 3; + optc1->min_v_blank_interlace = 5; + optc1->min_h_sync_width = 4; + optc1->min_v_sync_width = 1; +} diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.h new file mode 100644 index 0000000000..b49585682a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.h @@ -0,0 +1,36 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_OPTC_DCN301_H__ +#define __DC_OPTC_DCN301_H__ + +#include "dcn20/dcn20_optc.h" +#include "dcn30/dcn30_optc.h" + +void dcn301_timing_generator_init(struct optc *optc1); +void optc301_setup_manual_trigger(struct timing_generator *optc); +void optc301_set_drr(struct timing_generator *optc, const struct drr_params *params); + +#endif /* __DC_OPTC_DCN301_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c new file mode 100644 index 0000000000..63a677c8ee --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c @@ -0,0 +1,310 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dcn31_optc.h" + +#include "dcn30/dcn30_optc.h" +#include "reg_helper.h" +#include "dc.h" +#include "dcn_calc_math.h" + +#define REG(reg)\ + optc1->tg_regs->reg + +#define CTX \ + optc1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + optc1->tg_shift->field_name, optc1->tg_mask->field_name + +static void optc31_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, + struct dc_crtc_timing *timing) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right) + / opp_cnt; + uint32_t memory_mask = 0; + int mem_count_per_opp = (mpcc_hactive + 2559) / 2560; + + /* Assume less than 6 pipes */ + if (opp_cnt == 4) { + if (mem_count_per_opp == 1) + memory_mask = 0xf; + else { + ASSERT(mem_count_per_opp == 2); + memory_mask = 0xff; + } + } else if (mem_count_per_opp == 1) + memory_mask = 0x1 << (opp_id[0] * 2) | 0x1 << (opp_id[1] * 2); + else if (mem_count_per_opp == 2) + memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2); + else if (mem_count_per_opp == 3) + memory_mask = 0x77; + else if (mem_count_per_opp == 4) + memory_mask = 0xff; + + if (REG(OPTC_MEMORY_CONFIG)) + REG_SET(OPTC_MEMORY_CONFIG, 0, + OPTC_MEM_SEL, memory_mask); + + if (opp_cnt == 2) { + REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0, + OPTC_NUM_OF_INPUT_SEGMENT, 1, + OPTC_SEG0_SRC_SEL, opp_id[0], + OPTC_SEG1_SRC_SEL, opp_id[1]); + } else if (opp_cnt == 4) { + REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, + OPTC_NUM_OF_INPUT_SEGMENT, 3, + OPTC_SEG0_SRC_SEL, opp_id[0], + OPTC_SEG1_SRC_SEL, opp_id[1], + OPTC_SEG2_SRC_SEL, opp_id[2], + OPTC_SEG3_SRC_SEL, opp_id[3]); + } + + REG_UPDATE(OPTC_WIDTH_CONTROL, + OPTC_SEGMENT_WIDTH, mpcc_hactive); + + REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_MODE, opp_cnt - 1); + optc1->opp_count = opp_cnt; +} + +/* + * Enable CRTC - call ASIC Control Object to enable Timing generator. + */ +static bool optc31_enable_crtc(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + /* opp instance for OTG, 1 to 1 mapping and odm will adjust */ + REG_UPDATE(OPTC_DATA_SOURCE_SELECT, + OPTC_SEG0_SRC_SEL, optc->inst); + + /* VTG enable first is for HW workaround */ + REG_UPDATE(CONTROL, + VTG0_ENABLE, 1); + + REG_SEQ_START(); + + /* Enable CRTC */ + REG_UPDATE_2(OTG_CONTROL, + OTG_DISABLE_POINT_CNTL, 2, + OTG_MASTER_EN, 1); + + REG_SEQ_SUBMIT(); + REG_SEQ_WAIT_DONE(); + + return true; +} + +/* disable_crtc - call ASIC Control Object to disable Timing generator. */ +static bool optc31_disable_crtc(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + /* disable otg request until end of the first line + * in the vertical blank region + */ + REG_UPDATE(OTG_CONTROL, + OTG_MASTER_EN, 0); + + REG_UPDATE(CONTROL, + VTG0_ENABLE, 0); + + /* CRTC disabled, so disable clock. */ + REG_WAIT(OTG_CLOCK_CONTROL, + OTG_BUSY, 0, + 1, 100000); + optc1_clear_optc_underflow(optc); + + return true; +} + +bool optc31_immediate_disable_crtc(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_UPDATE_2(OTG_CONTROL, + OTG_DISABLE_POINT_CNTL, 0, + OTG_MASTER_EN, 0); + + REG_UPDATE(CONTROL, + VTG0_ENABLE, 0); + + /* CRTC disabled, so disable clock. */ + REG_WAIT(OTG_CLOCK_CONTROL, + OTG_BUSY, 0, + 1, 100000); + + /* clear the false state */ + optc1_clear_optc_underflow(optc); + + return true; +} + +void optc31_set_drr( + struct timing_generator *optc, + const struct drr_params *params) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + if (params != NULL && + params->vertical_total_max > 0 && + params->vertical_total_min > 0) { + + if (params->vertical_total_mid != 0) { + + REG_SET(OTG_V_TOTAL_MID, 0, + OTG_V_TOTAL_MID, params->vertical_total_mid - 1); + + REG_UPDATE_2(OTG_V_TOTAL_CONTROL, + OTG_VTOTAL_MID_REPLACING_MAX_EN, 1, + OTG_VTOTAL_MID_FRAME_NUM, + (uint8_t)params->vertical_total_mid_frame_num); + + } + + optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1); + + /* + * MIN_MASK_EN is gone and MASK is now always enabled. + * + * To get it to it work with manual trigger we need to make sure + * we program the correct bit. + */ + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_V_TOTAL_MIN_SEL, 1, + OTG_V_TOTAL_MAX_SEL, 1, + OTG_FORCE_LOCK_ON_EVENT, 0, + OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */ + + // Setup manual flow control for EOF via TRIG_A + optc->funcs->setup_manual_trigger(optc); + } else { + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_SET_V_TOTAL_MIN_MASK, 0, + OTG_V_TOTAL_MIN_SEL, 0, + OTG_V_TOTAL_MAX_SEL, 0, + OTG_FORCE_LOCK_ON_EVENT, 0); + + optc->funcs->set_vtotal_min_max(optc, 0, 0); + } +} + +void optc3_init_odm(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, + OPTC_NUM_OF_INPUT_SEGMENT, 0, + OPTC_SEG0_SRC_SEL, optc->inst, + OPTC_SEG1_SRC_SEL, 0xf, + OPTC_SEG2_SRC_SEL, 0xf, + OPTC_SEG3_SRC_SEL, 0xf + ); + + REG_SET(OTG_H_TIMING_CNTL, 0, + OTG_H_TIMING_DIV_MODE, 0); + + REG_SET(OPTC_MEMORY_CONFIG, 0, + OPTC_MEM_SEL, 0); + optc1->opp_count = 1; +} + +static struct timing_generator_funcs dcn31_tg_funcs = { + .validate_timing = optc1_validate_timing, + .program_timing = optc1_program_timing, + .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, + .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, + .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, + .program_global_sync = optc1_program_global_sync, + .enable_crtc = optc31_enable_crtc, + .disable_crtc = optc31_disable_crtc, + .immediate_disable_crtc = optc31_immediate_disable_crtc, + /* used by enable_timing_synchronization. Not need for FPGA */ + .is_counter_moving = optc1_is_counter_moving, + .get_position = optc1_get_position, + .get_frame_count = optc1_get_vblank_counter, + .get_scanoutpos = optc1_get_crtc_scanoutpos, + .get_otg_active_size = optc1_get_otg_active_size, + .set_early_control = optc1_set_early_control, + /* used by enable_timing_synchronization. Not need for FPGA */ + .wait_for_state = optc1_wait_for_state, + .set_blank_color = optc3_program_blank_color, + .did_triggered_reset_occur = optc1_did_triggered_reset_occur, + .triplebuffer_lock = optc3_triplebuffer_lock, + .triplebuffer_unlock = optc2_triplebuffer_unlock, + .enable_reset_trigger = optc1_enable_reset_trigger, + .enable_crtc_reset = optc1_enable_crtc_reset, + .disable_reset_trigger = optc1_disable_reset_trigger, + .lock = optc3_lock, + .unlock = optc1_unlock, + .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, + .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, + .enable_optc_clock = optc1_enable_optc_clock, + .set_drr = optc31_set_drr, + .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, + .set_vtotal_min_max = optc1_set_vtotal_min_max, + .set_static_screen_control = optc1_set_static_screen_control, + .program_stereo = optc1_program_stereo, + .is_stereo_left_eye = optc1_is_stereo_left_eye, + .tg_init = optc3_tg_init, + .is_tg_enabled = optc1_is_tg_enabled, + .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, + .clear_optc_underflow = optc1_clear_optc_underflow, + .setup_global_swap_lock = NULL, + .get_crc = optc1_get_crc, + .configure_crc = optc2_configure_crc, + .set_dsc_config = optc3_set_dsc_config, + .get_dsc_status = optc2_get_dsc_status, + .set_dwb_source = NULL, + .set_odm_bypass = optc3_set_odm_bypass, + .set_odm_combine = optc31_set_odm_combine, + .get_optc_source = optc2_get_optc_source, + .set_out_mux = optc3_set_out_mux, + .set_drr_trigger_window = optc3_set_drr_trigger_window, + .set_vtotal_change_limit = optc3_set_vtotal_change_limit, + .set_gsl = optc2_set_gsl, + .set_gsl_source_select = optc2_set_gsl_source_select, + .set_vtg_params = optc1_set_vtg_params, + .program_manual_trigger = optc2_program_manual_trigger, + .setup_manual_trigger = optc2_setup_manual_trigger, + .get_hw_timing = optc1_get_hw_timing, + .init_odm = optc3_init_odm, +}; + +void dcn31_timing_generator_init(struct optc *optc1) +{ + optc1->base.funcs = &dcn31_tg_funcs; + + optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; + optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; + + optc1->min_h_blank = 32; + optc1->min_v_blank = 3; + optc1->min_v_blank_interlace = 5; + optc1->min_h_sync_width = 4; + optc1->min_v_sync_width = 1; +} + diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h new file mode 100644 index 0000000000..30b81a448c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h @@ -0,0 +1,267 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_OPTC_DCN31_H__ +#define __DC_OPTC_DCN31_H__ + +#include "dcn10/dcn10_optc.h" + +#define OPTC_COMMON_REG_LIST_DCN3_1(inst) \ + SRI(OTG_VSTARTUP_PARAM, OTG, inst),\ + SRI(OTG_VUPDATE_PARAM, OTG, inst),\ + SRI(OTG_VREADY_PARAM, OTG, inst),\ + SRI(OTG_MASTER_UPDATE_LOCK, OTG, inst),\ + SRI(OTG_GLOBAL_CONTROL0, OTG, inst),\ + SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\ + SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\ + SRI(OTG_GLOBAL_CONTROL4, OTG, inst),\ + SRI(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst),\ + SRI(OTG_H_TOTAL, OTG, inst),\ + SRI(OTG_H_BLANK_START_END, OTG, inst),\ + SRI(OTG_H_SYNC_A, OTG, inst),\ + SRI(OTG_H_SYNC_A_CNTL, OTG, inst),\ + SRI(OTG_H_TIMING_CNTL, OTG, inst),\ + SRI(OTG_V_TOTAL, OTG, inst),\ + SRI(OTG_V_BLANK_START_END, OTG, inst),\ + SRI(OTG_V_SYNC_A, OTG, inst),\ + SRI(OTG_V_SYNC_A_CNTL, OTG, inst),\ + SRI(OTG_CONTROL, OTG, inst),\ + SRI(OTG_STEREO_CONTROL, OTG, inst),\ + SRI(OTG_3D_STRUCTURE_CONTROL, OTG, inst),\ + SRI(OTG_STEREO_STATUS, OTG, inst),\ + SRI(OTG_V_TOTAL_MAX, OTG, inst),\ + SRI(OTG_V_TOTAL_MIN, OTG, inst),\ + SRI(OTG_V_TOTAL_CONTROL, OTG, inst),\ + SRI(OTG_TRIGA_CNTL, OTG, inst),\ + SRI(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\ + SRI(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\ + SRI(OTG_STATUS_FRAME_COUNT, OTG, inst),\ + SRI(OTG_STATUS, OTG, inst),\ + SRI(OTG_STATUS_POSITION, OTG, inst),\ + SRI(OTG_NOM_VERT_POSITION, OTG, inst),\ + SRI(OTG_M_CONST_DTO0, OTG, inst),\ + SRI(OTG_M_CONST_DTO1, OTG, inst),\ + SRI(OTG_CLOCK_CONTROL, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\ + SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\ + SRI(OPTC_DATA_SOURCE_SELECT, ODM, inst),\ + SRI(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\ + SRI(CONTROL, VTG, inst),\ + SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\ + SRI(OTG_GSL_CONTROL, OTG, inst),\ + SRI(OTG_CRC_CNTL, OTG, inst),\ + SRI(OTG_CRC0_DATA_RG, OTG, inst),\ + SRI(OTG_CRC0_DATA_B, OTG, inst),\ + SRI(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\ + SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\ + SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\ + SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst),\ + SR(GSL_SOURCE_SELECT),\ + SRI(OTG_TRIGA_MANUAL_TRIG, OTG, inst),\ + SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\ + SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\ + SRI(OTG_GSL_WINDOW_X, OTG, inst),\ + SRI(OTG_GSL_WINDOW_Y, OTG, inst),\ + SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\ + SRI(OTG_DSC_START_POSITION, OTG, inst),\ + SRI(OTG_DRR_TRIGGER_WINDOW, OTG, inst),\ + SRI(OTG_DRR_V_TOTAL_CHANGE, OTG, inst),\ + SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\ + SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\ + SRI(OPTC_WIDTH_CONTROL, ODM, inst),\ + SRI(OPTC_MEMORY_CONFIG, ODM, inst),\ + SRI(OTG_CRC_CNTL2, OTG, inst),\ + SR(DWB_SOURCE_SELECT),\ + SRI(OTG_DRR_CONTROL, OTG, inst) + +#define OPTC_COMMON_MASK_SH_LIST_DCN3_1(mask_sh)\ + SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\ + SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_OFFSET, mask_sh),\ + SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_WIDTH, mask_sh),\ + SF(OTG0_OTG_VREADY_PARAM, VREADY_OFFSET, mask_sh),\ + SF(OTG0_OTG_MASTER_UPDATE_LOCK, OTG_MASTER_UPDATE_LOCK, mask_sh),\ + SF(OTG0_OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_START_X, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_END_X, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_EN, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_START_Y, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_END_Y, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL2, OTG_MASTER_UPDATE_LOCK_SEL, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_X, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_Y, mask_sh),\ + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\ + SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\ + SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\ + SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_START, mask_sh),\ + SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_END, mask_sh),\ + SF(OTG0_OTG_H_SYNC_A_CNTL, OTG_H_SYNC_A_POL, mask_sh),\ + SF(OTG0_OTG_V_TOTAL, OTG_V_TOTAL, mask_sh),\ + SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_START, mask_sh),\ + SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_END, mask_sh),\ + SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_START, mask_sh),\ + SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_END, mask_sh),\ + SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_A_POL, mask_sh),\ + SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_MODE, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_MASTER_EN, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_START_POINT_CNTL, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_OUT_MUX, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EYE_FLAG_POLARITY, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, mask_sh),\ + SF(OTG0_OTG_STEREO_STATUS, OTG_STEREO_CURRENT_EYE, mask_sh),\ + SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_EN, mask_sh),\ + SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_V_UPDATE_MODE, mask_sh),\ + SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_STEREO_SEL_OVR, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_MAX, OTG_V_TOTAL_MAX, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_MIN, OTG_V_TOTAL_MIN, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MIN_SEL, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MAX_SEL, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_FORCE_LOCK_ON_EVENT, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MIN_EN, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MAX_EN, mask_sh),\ + SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_CLEAR, mask_sh),\ + SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_MODE, mask_sh),\ + SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_OCCURRED, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_SELECT, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_PIPE_SELECT, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_RISING_EDGE_DETECT_CNTL, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_POLARITY_SELECT, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FREQUENCY_SELECT, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_DELAY, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_CLEAR, mask_sh),\ + SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_EVENT_MASK, mask_sh),\ + SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_FRAME_COUNT, mask_sh),\ + SF(OTG0_OTG_STATUS_FRAME_COUNT, OTG_FRAME_COUNT, mask_sh),\ + SF(OTG0_OTG_STATUS, OTG_V_BLANK, mask_sh),\ + SF(OTG0_OTG_STATUS, OTG_V_ACTIVE_DISP, mask_sh),\ + SF(OTG0_OTG_STATUS_POSITION, OTG_HORZ_COUNT, mask_sh),\ + SF(OTG0_OTG_STATUS_POSITION, OTG_VERT_COUNT, mask_sh),\ + SF(OTG0_OTG_NOM_VERT_POSITION, OTG_VERT_COUNT_NOM, mask_sh),\ + SF(OTG0_OTG_M_CONST_DTO0, OTG_M_CONST_DTO_PHASE, mask_sh),\ + SF(OTG0_OTG_M_CONST_DTO1, OTG_M_CONST_DTO_MODULO, mask_sh),\ + SF(OTG0_OTG_CLOCK_CONTROL, OTG_BUSY, mask_sh),\ + SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_EN, mask_sh),\ + SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_ON, mask_sh),\ + SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_START, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_END, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT1_POSITION, OTG_VERTICAL_INTERRUPT1_LINE_START, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\ + SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\ + SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_ON, mask_sh),\ + SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\ + SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\ + SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\ + SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\ + SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\ + SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\ + SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED, mask_sh),\ + SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_CLEAR, mask_sh),\ + SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_AUTO_FORCE_VSYNC_MODE, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL0_EN, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL1_EN, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\ + SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\ + SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\ + SF(OTG0_OTG_CRC0_DATA_B, CRC0_B_CB, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_START, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_END, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_START, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_END, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_START, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh),\ + SF(OTG0_OTG_TRIGA_MANUAL_TRIG, OTG_TRIGA_MANUAL_TRIG, mask_sh),\ + SF(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, mask_sh),\ + SF(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, mask_sh),\ + SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL2, MANUAL_FLOW_CONTROL_SEL, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \ + SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\ + SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \ + SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \ + SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \ + SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \ + SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \ + SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG2_SRC_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG3_SRC_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\ + SF(ODM0_OPTC_MEMORY_CONFIG, OPTC_MEM_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, mask_sh),\ + SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\ + SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\ + SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\ + SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_SEGMENT_WIDTH, mask_sh),\ + SF(DWB_SOURCE_SELECT, OPTC_DWB0_SOURCE_SELECT, mask_sh),\ + SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh),\ + SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_START_X, mask_sh),\ + SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\ + SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\ + SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\ + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DSC_MODE, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\ + SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh) + +void dcn31_timing_generator_init(struct optc *optc1); + +bool optc31_immediate_disable_crtc(struct timing_generator *optc); + +void optc31_set_drr(struct timing_generator *optc, const struct drr_params *params); + +void optc3_init_odm(struct timing_generator *optc); + +#endif /* __DC_OPTC_DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c new file mode 100644 index 0000000000..0086cafb0f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dcn314_optc.h" + +#include "dcn30/dcn30_optc.h" +#include "dcn31/dcn31_optc.h" +#include "reg_helper.h" +#include "dc.h" +#include "dcn_calc_math.h" + +#define REG(reg)\ + optc1->tg_regs->reg + +#define CTX \ + optc1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + optc1->tg_shift->field_name, optc1->tg_mask->field_name + +/* + * Enable CRTC + * Enable CRTC - call ASIC Control Object to enable Timing generator. + */ + +static void optc314_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, + struct dc_crtc_timing *timing) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t memory_mask = 0; + int h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right; + int mpcc_hactive = h_active / opp_cnt; + /* Each memory instance is 2048x(314x2) bits to support half line of 4096 */ + int odm_mem_count = (h_active + 2047) / 2048; + + /* + * display <= 4k : 2 memories + 2 pipes + * 4k < display <= 8k : 4 memories + 2 pipes + * 8k < display <= 12k : 6 memories + 4 pipes + */ + if (opp_cnt == 4) { + if (odm_mem_count <= 2) + memory_mask = 0x3; + else if (odm_mem_count <= 4) + memory_mask = 0xf; + else + memory_mask = 0x3f; + } else { + if (odm_mem_count <= 2) + memory_mask = 0x1 << (opp_id[0] * 2) | 0x1 << (opp_id[1] * 2); + else if (odm_mem_count <= 4) + memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2); + else + memory_mask = 0x77; + } + + REG_SET(OPTC_MEMORY_CONFIG, 0, + OPTC_MEM_SEL, memory_mask); + + if (opp_cnt == 2) { + REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0, + OPTC_NUM_OF_INPUT_SEGMENT, 1, + OPTC_SEG0_SRC_SEL, opp_id[0], + OPTC_SEG1_SRC_SEL, opp_id[1]); + } else if (opp_cnt == 4) { + REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, + OPTC_NUM_OF_INPUT_SEGMENT, 3, + OPTC_SEG0_SRC_SEL, opp_id[0], + OPTC_SEG1_SRC_SEL, opp_id[1], + OPTC_SEG2_SRC_SEL, opp_id[2], + OPTC_SEG3_SRC_SEL, opp_id[3]); + } + + REG_UPDATE(OPTC_WIDTH_CONTROL, + OPTC_SEGMENT_WIDTH, mpcc_hactive); + + REG_UPDATE(OTG_H_TIMING_CNTL, + OTG_H_TIMING_DIV_MODE, opp_cnt - 1); + optc1->opp_count = opp_cnt; +} + +static bool optc314_enable_crtc(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + /* opp instance for OTG, 1 to 1 mapping and odm will adjust */ + REG_UPDATE(OPTC_DATA_SOURCE_SELECT, + OPTC_SEG0_SRC_SEL, optc->inst); + + /* VTG enable first is for HW workaround */ + REG_UPDATE(CONTROL, + VTG0_ENABLE, 1); + + REG_SEQ_START(); + + /* Enable CRTC */ + REG_UPDATE_2(OTG_CONTROL, + OTG_DISABLE_POINT_CNTL, 2, + OTG_MASTER_EN, 1); + + REG_SEQ_SUBMIT(); + REG_SEQ_WAIT_DONE(); + + return true; +} + +/* disable_crtc */ +static bool optc314_disable_crtc(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + /* disable otg request until end of the first line + * in the vertical blank region + */ + REG_UPDATE(OTG_CONTROL, + OTG_MASTER_EN, 0); + + REG_UPDATE(CONTROL, + VTG0_ENABLE, 0); + + /* CRTC disabled, so disable clock. */ + REG_WAIT(OTG_CLOCK_CONTROL, + OTG_BUSY, 0, + 1, 100000); + + return true; +} + +static void optc314_phantom_crtc_post_enable(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + /* Disable immediately. */ + REG_UPDATE_2(OTG_CONTROL, OTG_DISABLE_POINT_CNTL, 0, OTG_MASTER_EN, 0); + + /* CRTC disabled, so disable clock. */ + REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000); +} + +static void optc314_set_odm_bypass(struct timing_generator *optc, + const struct dc_crtc_timing *dc_crtc_timing) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + enum h_timing_div_mode h_div = H_TIMING_NO_DIV; + + REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, + OPTC_NUM_OF_INPUT_SEGMENT, 0, + OPTC_SEG0_SRC_SEL, optc->inst, + OPTC_SEG1_SRC_SEL, 0xf, + OPTC_SEG2_SRC_SEL, 0xf, + OPTC_SEG3_SRC_SEL, 0xf + ); + + h_div = optc1_is_two_pixels_per_containter(dc_crtc_timing); + REG_UPDATE(OTG_H_TIMING_CNTL, + OTG_H_TIMING_DIV_MODE, h_div); + + REG_SET(OPTC_MEMORY_CONFIG, 0, + OPTC_MEM_SEL, 0); + optc1->opp_count = 1; +} + +static void optc314_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_UPDATE(OTG_H_TIMING_CNTL, + OTG_H_TIMING_DIV_MODE_MANUAL, manual_mode ? 1 : 0); +} + + +static struct timing_generator_funcs dcn314_tg_funcs = { + .validate_timing = optc1_validate_timing, + .program_timing = optc1_program_timing, + .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, + .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, + .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, + .program_global_sync = optc1_program_global_sync, + .enable_crtc = optc314_enable_crtc, + .disable_crtc = optc314_disable_crtc, + .immediate_disable_crtc = optc31_immediate_disable_crtc, + .phantom_crtc_post_enable = optc314_phantom_crtc_post_enable, + /* used by enable_timing_synchronization. Not need for FPGA */ + .is_counter_moving = optc1_is_counter_moving, + .get_position = optc1_get_position, + .get_frame_count = optc1_get_vblank_counter, + .get_scanoutpos = optc1_get_crtc_scanoutpos, + .get_otg_active_size = optc1_get_otg_active_size, + .set_early_control = optc1_set_early_control, + /* used by enable_timing_synchronization. Not need for FPGA */ + .wait_for_state = optc1_wait_for_state, + .set_blank_color = optc3_program_blank_color, + .did_triggered_reset_occur = optc1_did_triggered_reset_occur, + .triplebuffer_lock = optc3_triplebuffer_lock, + .triplebuffer_unlock = optc2_triplebuffer_unlock, + .enable_reset_trigger = optc1_enable_reset_trigger, + .enable_crtc_reset = optc1_enable_crtc_reset, + .disable_reset_trigger = optc1_disable_reset_trigger, + .lock = optc3_lock, + .unlock = optc1_unlock, + .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, + .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, + .enable_optc_clock = optc1_enable_optc_clock, + .set_drr = optc31_set_drr, + .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, + .set_vtotal_min_max = optc1_set_vtotal_min_max, + .set_static_screen_control = optc1_set_static_screen_control, + .program_stereo = optc1_program_stereo, + .is_stereo_left_eye = optc1_is_stereo_left_eye, + .tg_init = optc3_tg_init, + .is_tg_enabled = optc1_is_tg_enabled, + .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, + .clear_optc_underflow = optc1_clear_optc_underflow, + .setup_global_swap_lock = NULL, + .get_crc = optc1_get_crc, + .configure_crc = optc1_configure_crc, + .set_dsc_config = optc3_set_dsc_config, + .get_dsc_status = optc2_get_dsc_status, + .set_dwb_source = NULL, + .get_optc_source = optc2_get_optc_source, + .set_out_mux = optc3_set_out_mux, + .set_drr_trigger_window = optc3_set_drr_trigger_window, + .set_vtotal_change_limit = optc3_set_vtotal_change_limit, + .set_gsl = optc2_set_gsl, + .set_gsl_source_select = optc2_set_gsl_source_select, + .set_vtg_params = optc1_set_vtg_params, + .program_manual_trigger = optc2_program_manual_trigger, + .setup_manual_trigger = optc2_setup_manual_trigger, + .get_hw_timing = optc1_get_hw_timing, + .init_odm = optc3_init_odm, + .set_odm_bypass = optc314_set_odm_bypass, + .set_odm_combine = optc314_set_odm_combine, + .set_h_timing_div_manual_mode = optc314_set_h_timing_div_manual_mode, +}; + +void dcn314_timing_generator_init(struct optc *optc1) +{ + optc1->base.funcs = &dcn314_tg_funcs; + + optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; + optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; + + optc1->min_h_blank = 32; + optc1->min_v_blank = 3; + optc1->min_v_blank_interlace = 5; + optc1->min_h_sync_width = 4; + optc1->min_v_sync_width = 1; +} + diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h new file mode 100644 index 0000000000..99c098e761 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h @@ -0,0 +1,255 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_OPTC_DCN314_H__ +#define __DC_OPTC_DCN314_H__ + +#include "dcn10/dcn10_optc.h" + +#define OPTC_COMMON_REG_LIST_DCN3_14(inst) \ + SRI(OTG_VSTARTUP_PARAM, OTG, inst),\ + SRI(OTG_VUPDATE_PARAM, OTG, inst),\ + SRI(OTG_VREADY_PARAM, OTG, inst),\ + SRI(OTG_MASTER_UPDATE_LOCK, OTG, inst),\ + SRI(OTG_GLOBAL_CONTROL0, OTG, inst),\ + SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\ + SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\ + SRI(OTG_GLOBAL_CONTROL4, OTG, inst),\ + SRI(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst),\ + SRI(OTG_H_TOTAL, OTG, inst),\ + SRI(OTG_H_BLANK_START_END, OTG, inst),\ + SRI(OTG_H_SYNC_A, OTG, inst),\ + SRI(OTG_H_SYNC_A_CNTL, OTG, inst),\ + SRI(OTG_H_TIMING_CNTL, OTG, inst),\ + SRI(OTG_V_TOTAL, OTG, inst),\ + SRI(OTG_V_BLANK_START_END, OTG, inst),\ + SRI(OTG_V_SYNC_A, OTG, inst),\ + SRI(OTG_V_SYNC_A_CNTL, OTG, inst),\ + SRI(OTG_CONTROL, OTG, inst),\ + SRI(OTG_STEREO_CONTROL, OTG, inst),\ + SRI(OTG_3D_STRUCTURE_CONTROL, OTG, inst),\ + SRI(OTG_STEREO_STATUS, OTG, inst),\ + SRI(OTG_V_TOTAL_MAX, OTG, inst),\ + SRI(OTG_V_TOTAL_MIN, OTG, inst),\ + SRI(OTG_V_TOTAL_CONTROL, OTG, inst),\ + SRI(OTG_TRIGA_CNTL, OTG, inst),\ + SRI(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\ + SRI(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\ + SRI(OTG_STATUS_FRAME_COUNT, OTG, inst),\ + SRI(OTG_STATUS, OTG, inst),\ + SRI(OTG_STATUS_POSITION, OTG, inst),\ + SRI(OTG_NOM_VERT_POSITION, OTG, inst),\ + SRI(OTG_M_CONST_DTO0, OTG, inst),\ + SRI(OTG_M_CONST_DTO1, OTG, inst),\ + SRI(OTG_CLOCK_CONTROL, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\ + SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\ + SRI(OPTC_DATA_SOURCE_SELECT, ODM, inst),\ + SRI(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\ + SRI(CONTROL, VTG, inst),\ + SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\ + SRI(OTG_GSL_CONTROL, OTG, inst),\ + SRI(OTG_CRC_CNTL, OTG, inst),\ + SRI(OTG_CRC0_DATA_RG, OTG, inst),\ + SRI(OTG_CRC0_DATA_B, OTG, inst),\ + SRI(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\ + SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\ + SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\ + SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst),\ + SR(GSL_SOURCE_SELECT),\ + SRI(OTG_TRIGA_MANUAL_TRIG, OTG, inst),\ + SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\ + SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\ + SRI(OTG_GSL_WINDOW_X, OTG, inst),\ + SRI(OTG_GSL_WINDOW_Y, OTG, inst),\ + SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\ + SRI(OTG_DSC_START_POSITION, OTG, inst),\ + SRI(OTG_DRR_TRIGGER_WINDOW, OTG, inst),\ + SRI(OTG_DRR_V_TOTAL_CHANGE, OTG, inst),\ + SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\ + SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\ + SRI(OPTC_WIDTH_CONTROL, ODM, inst),\ + SRI(OPTC_MEMORY_CONFIG, ODM, inst),\ + SRI(OTG_DRR_CONTROL, OTG, inst) + +#define OPTC_COMMON_MASK_SH_LIST_DCN3_14(mask_sh)\ + SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\ + SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_OFFSET, mask_sh),\ + SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_WIDTH, mask_sh),\ + SF(OTG0_OTG_VREADY_PARAM, VREADY_OFFSET, mask_sh),\ + SF(OTG0_OTG_MASTER_UPDATE_LOCK, OTG_MASTER_UPDATE_LOCK, mask_sh),\ + SF(OTG0_OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_START_X, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_END_X, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_EN, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_START_Y, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_END_Y, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL2, OTG_MASTER_UPDATE_LOCK_SEL, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_X, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_Y, mask_sh),\ + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\ + SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\ + SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\ + SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_START, mask_sh),\ + SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_END, mask_sh),\ + SF(OTG0_OTG_H_SYNC_A_CNTL, OTG_H_SYNC_A_POL, mask_sh),\ + SF(OTG0_OTG_V_TOTAL, OTG_V_TOTAL, mask_sh),\ + SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_START, mask_sh),\ + SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_END, mask_sh),\ + SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_START, mask_sh),\ + SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_END, mask_sh),\ + SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_A_POL, mask_sh),\ + SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_MODE, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_MASTER_EN, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_START_POINT_CNTL, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_OUT_MUX, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EYE_FLAG_POLARITY, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, mask_sh),\ + SF(OTG0_OTG_STEREO_STATUS, OTG_STEREO_CURRENT_EYE, mask_sh),\ + SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_EN, mask_sh),\ + SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_V_UPDATE_MODE, mask_sh),\ + SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_STEREO_SEL_OVR, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_MAX, OTG_V_TOTAL_MAX, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_MIN, OTG_V_TOTAL_MIN, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MIN_SEL, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MAX_SEL, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_FORCE_LOCK_ON_EVENT, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MIN_EN, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MAX_EN, mask_sh),\ + SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_CLEAR, mask_sh),\ + SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_MODE, mask_sh),\ + SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_OCCURRED, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_SELECT, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_PIPE_SELECT, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_RISING_EDGE_DETECT_CNTL, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_POLARITY_SELECT, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FREQUENCY_SELECT, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_DELAY, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_CLEAR, mask_sh),\ + SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_EVENT_MASK, mask_sh),\ + SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_FRAME_COUNT, mask_sh),\ + SF(OTG0_OTG_STATUS_FRAME_COUNT, OTG_FRAME_COUNT, mask_sh),\ + SF(OTG0_OTG_STATUS, OTG_V_BLANK, mask_sh),\ + SF(OTG0_OTG_STATUS, OTG_V_ACTIVE_DISP, mask_sh),\ + SF(OTG0_OTG_STATUS_POSITION, OTG_HORZ_COUNT, mask_sh),\ + SF(OTG0_OTG_STATUS_POSITION, OTG_VERT_COUNT, mask_sh),\ + SF(OTG0_OTG_NOM_VERT_POSITION, OTG_VERT_COUNT_NOM, mask_sh),\ + SF(OTG0_OTG_M_CONST_DTO0, OTG_M_CONST_DTO_PHASE, mask_sh),\ + SF(OTG0_OTG_M_CONST_DTO1, OTG_M_CONST_DTO_MODULO, mask_sh),\ + SF(OTG0_OTG_CLOCK_CONTROL, OTG_BUSY, mask_sh),\ + SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_EN, mask_sh),\ + SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_ON, mask_sh),\ + SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_START, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_END, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT1_POSITION, OTG_VERTICAL_INTERRUPT1_LINE_START, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\ + SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\ + SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_ON, mask_sh),\ + SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\ + SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\ + SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\ + SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\ + SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\ + SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\ + SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED, mask_sh),\ + SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_CLEAR, mask_sh),\ + SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_AUTO_FORCE_VSYNC_MODE, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL0_EN, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL1_EN, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\ + SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\ + SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\ + SF(OTG0_OTG_CRC0_DATA_B, CRC0_B_CB, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_START, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_END, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_START, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_END, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_START, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh),\ + SF(OTG0_OTG_TRIGA_MANUAL_TRIG, OTG_TRIGA_MANUAL_TRIG, mask_sh),\ + SF(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, mask_sh),\ + SF(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, mask_sh),\ + SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL2, MANUAL_FLOW_CONTROL_SEL, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \ + SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\ + SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \ + SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \ + SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \ + SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \ + SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \ + SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG2_SRC_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG3_SRC_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\ + SF(ODM0_OPTC_MEMORY_CONFIG, OPTC_MEM_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, mask_sh),\ + SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\ + SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\ + SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\ + SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_SEGMENT_WIDTH, mask_sh),\ + SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_START_X, mask_sh),\ + SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\ + SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\ + SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\ + SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE_MANUAL, mask_sh),\ + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\ + SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh) + +void dcn314_timing_generator_init(struct optc *optc1); + +#endif /* __DC_OPTC_DCN314_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c new file mode 100644 index 0000000000..52eab8fccb --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c @@ -0,0 +1,379 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dcn32_optc.h" + +#include "dcn30/dcn30_optc.h" +#include "dcn31/dcn31_optc.h" +#include "reg_helper.h" +#include "dc.h" +#include "dcn_calc_math.h" +#include "dc_dmub_srv.h" + +#define REG(reg)\ + optc1->tg_regs->reg + +#define CTX \ + optc1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + optc1->tg_shift->field_name, optc1->tg_mask->field_name + +static void optc32_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, + struct dc_crtc_timing *timing) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t memory_mask = 0; + int h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right; + int mpcc_hactive = h_active / opp_cnt; + /* Each memory instance is 2048x(32x2) bits to support half line of 4096 */ + int odm_mem_count = (h_active + 2047) / 2048; + + /* + * display <= 4k : 2 memories + 2 pipes + * 4k < display <= 8k : 4 memories + 2 pipes + * 8k < display <= 12k : 6 memories + 4 pipes + */ + if (opp_cnt == 4) { + if (odm_mem_count <= 2) + memory_mask = 0x3; + else if (odm_mem_count <= 4) + memory_mask = 0xf; + else + memory_mask = 0x3f; + } else { + if (odm_mem_count <= 2) + memory_mask = 0x1 << (opp_id[0] * 2) | 0x1 << (opp_id[1] * 2); + else if (odm_mem_count <= 4) + memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2); + else + memory_mask = 0x77; + } + + REG_SET(OPTC_MEMORY_CONFIG, 0, + OPTC_MEM_SEL, memory_mask); + + if (opp_cnt == 2) { + REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0, + OPTC_NUM_OF_INPUT_SEGMENT, 1, + OPTC_SEG0_SRC_SEL, opp_id[0], + OPTC_SEG1_SRC_SEL, opp_id[1]); + } else if (opp_cnt == 4) { + REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, + OPTC_NUM_OF_INPUT_SEGMENT, 3, + OPTC_SEG0_SRC_SEL, opp_id[0], + OPTC_SEG1_SRC_SEL, opp_id[1], + OPTC_SEG2_SRC_SEL, opp_id[2], + OPTC_SEG3_SRC_SEL, opp_id[3]); + } + + REG_UPDATE(OPTC_WIDTH_CONTROL, + OPTC_SEGMENT_WIDTH, mpcc_hactive); + + REG_UPDATE(OTG_H_TIMING_CNTL, + OTG_H_TIMING_DIV_MODE, opp_cnt - 1); + optc1->opp_count = opp_cnt; +} + +void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combine_segments) +{ + struct optc *optc1 = DCN10TG_FROM_TG(tg); + int segments; + + REG_GET(OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, &segments); + + switch (segments) { + case 0: + *odm_combine_segments = 1; + break; + case 1: + *odm_combine_segments = 2; + break; + case 3: + *odm_combine_segments = 4; + break; + /* 2 is reserved */ + case 2: + default: + *odm_combine_segments = -1; + } +} + +void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg) +{ + struct optc *optc1 = DCN10TG_FROM_TG(tg); + + REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING, 0, 2, 50000); +} + +void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_UPDATE(OTG_H_TIMING_CNTL, + OTG_H_TIMING_DIV_MODE_MANUAL, manual_mode ? 1 : 0); +} +/** + * optc32_enable_crtc() - Enable CRTC - call ASIC Control Object to enable Timing generator. + * + * @optc: timing_generator instance. + * + * Return: If CRTC is enabled, return true. + */ +static bool optc32_enable_crtc(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + /* opp instance for OTG, 1 to 1 mapping and odm will adjust */ + REG_UPDATE(OPTC_DATA_SOURCE_SELECT, + OPTC_SEG0_SRC_SEL, optc->inst); + + /* VTG enable first is for HW workaround */ + REG_UPDATE(CONTROL, + VTG0_ENABLE, 1); + + REG_SEQ_START(); + + /* Enable CRTC */ + REG_UPDATE_2(OTG_CONTROL, + OTG_DISABLE_POINT_CNTL, 2, + OTG_MASTER_EN, 1); + + REG_SEQ_SUBMIT(); + REG_SEQ_WAIT_DONE(); + + return true; +} + +/* disable_crtc */ +static bool optc32_disable_crtc(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT, + OPTC_SEG0_SRC_SEL, 0xf, + OPTC_SEG1_SRC_SEL, 0xf, + OPTC_SEG2_SRC_SEL, 0xf, + OPTC_SEG3_SRC_SEL, 0xf, + OPTC_NUM_OF_INPUT_SEGMENT, 0); + + REG_UPDATE(OPTC_MEMORY_CONFIG, + OPTC_MEM_SEL, 0); + + /* disable otg request until end of the first line + * in the vertical blank region + */ + REG_UPDATE(OTG_CONTROL, + OTG_MASTER_EN, 0); + + REG_UPDATE(CONTROL, + VTG0_ENABLE, 0); + + /* CRTC disabled, so disable clock. */ + REG_WAIT(OTG_CLOCK_CONTROL, + OTG_BUSY, 0, + 1, 150000); + + return true; +} + +static void optc32_phantom_crtc_post_enable(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + /* Disable immediately. */ + REG_UPDATE_2(OTG_CONTROL, OTG_DISABLE_POINT_CNTL, 0, OTG_MASTER_EN, 0); + + /* CRTC disabled, so disable clock. */ + REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000); +} + +static void optc32_disable_phantom_otg(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT, + OPTC_SEG0_SRC_SEL, 0xf, + OPTC_SEG1_SRC_SEL, 0xf, + OPTC_SEG2_SRC_SEL, 0xf, + OPTC_SEG3_SRC_SEL, 0xf, + OPTC_NUM_OF_INPUT_SEGMENT, 0); + + REG_UPDATE(OTG_CONTROL, OTG_MASTER_EN, 0); +} + +void optc32_set_odm_bypass(struct timing_generator *optc, + const struct dc_crtc_timing *dc_crtc_timing) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + enum h_timing_div_mode h_div = H_TIMING_NO_DIV; + + REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, + OPTC_NUM_OF_INPUT_SEGMENT, 0, + OPTC_SEG0_SRC_SEL, optc->inst, + OPTC_SEG1_SRC_SEL, 0xf, + OPTC_SEG2_SRC_SEL, 0xf, + OPTC_SEG3_SRC_SEL, 0xf + ); + + h_div = optc1_is_two_pixels_per_containter(dc_crtc_timing); + REG_UPDATE(OTG_H_TIMING_CNTL, + OTG_H_TIMING_DIV_MODE, h_div); + + REG_SET(OPTC_MEMORY_CONFIG, 0, + OPTC_MEM_SEL, 0); + optc1->opp_count = 1; +} + +static void optc32_setup_manual_trigger(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + struct dc *dc = optc->ctx->dc; + + if (dc->caps.dmub_caps.mclk_sw && !dc->debug.disable_fams) + dc_dmub_srv_set_drr_manual_trigger_cmd(dc, optc->inst); + else { + /* + * MIN_MASK_EN is gone and MASK is now always enabled. + * + * To get it to it work with manual trigger we need to make sure + * we program the correct bit. + */ + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_V_TOTAL_MIN_SEL, 1, + OTG_V_TOTAL_MAX_SEL, 1, + OTG_FORCE_LOCK_ON_EVENT, 0, + OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */ + } +} + +static void optc32_set_drr( + struct timing_generator *optc, + const struct drr_params *params) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + if (params != NULL && + params->vertical_total_max > 0 && + params->vertical_total_min > 0) { + + if (params->vertical_total_mid != 0) { + + REG_SET(OTG_V_TOTAL_MID, 0, + OTG_V_TOTAL_MID, params->vertical_total_mid - 1); + + REG_UPDATE_2(OTG_V_TOTAL_CONTROL, + OTG_VTOTAL_MID_REPLACING_MAX_EN, 1, + OTG_VTOTAL_MID_FRAME_NUM, + (uint8_t)params->vertical_total_mid_frame_num); + + } + + optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1); + } + + optc32_setup_manual_trigger(optc); +} + +static struct timing_generator_funcs dcn32_tg_funcs = { + .validate_timing = optc1_validate_timing, + .program_timing = optc1_program_timing, + .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, + .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, + .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, + .program_global_sync = optc1_program_global_sync, + .enable_crtc = optc32_enable_crtc, + .disable_crtc = optc32_disable_crtc, + .phantom_crtc_post_enable = optc32_phantom_crtc_post_enable, + .disable_phantom_crtc = optc32_disable_phantom_otg, + /* used by enable_timing_synchronization. Not need for FPGA */ + .is_counter_moving = optc1_is_counter_moving, + .get_position = optc1_get_position, + .get_frame_count = optc1_get_vblank_counter, + .get_scanoutpos = optc1_get_crtc_scanoutpos, + .get_otg_active_size = optc1_get_otg_active_size, + .set_early_control = optc1_set_early_control, + /* used by enable_timing_synchronization. Not need for FPGA */ + .wait_for_state = optc1_wait_for_state, + .set_blank_color = optc3_program_blank_color, + .did_triggered_reset_occur = optc1_did_triggered_reset_occur, + .triplebuffer_lock = optc3_triplebuffer_lock, + .triplebuffer_unlock = optc2_triplebuffer_unlock, + .enable_reset_trigger = optc1_enable_reset_trigger, + .enable_crtc_reset = optc1_enable_crtc_reset, + .disable_reset_trigger = optc1_disable_reset_trigger, + .lock = optc3_lock, + .unlock = optc1_unlock, + .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, + .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, + .enable_optc_clock = optc1_enable_optc_clock, + .set_drr = optc32_set_drr, + .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, + .set_vtotal_min_max = optc3_set_vtotal_min_max, + .set_static_screen_control = optc1_set_static_screen_control, + .program_stereo = optc1_program_stereo, + .is_stereo_left_eye = optc1_is_stereo_left_eye, + .tg_init = optc3_tg_init, + .is_tg_enabled = optc1_is_tg_enabled, + .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, + .clear_optc_underflow = optc1_clear_optc_underflow, + .setup_global_swap_lock = NULL, + .get_crc = optc1_get_crc, + .configure_crc = optc1_configure_crc, + .set_dsc_config = optc3_set_dsc_config, + .get_dsc_status = optc2_get_dsc_status, + .set_dwb_source = NULL, + .set_odm_bypass = optc32_set_odm_bypass, + .set_odm_combine = optc32_set_odm_combine, + .get_odm_combine_segments = optc32_get_odm_combine_segments, + .wait_odm_doublebuffer_pending_clear = optc32_wait_odm_doublebuffer_pending_clear, + .set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode, + .get_optc_source = optc2_get_optc_source, + .set_out_mux = optc3_set_out_mux, + .set_drr_trigger_window = optc3_set_drr_trigger_window, + .set_vtotal_change_limit = optc3_set_vtotal_change_limit, + .set_gsl = optc2_set_gsl, + .set_gsl_source_select = optc2_set_gsl_source_select, + .set_vtg_params = optc1_set_vtg_params, + .program_manual_trigger = optc2_program_manual_trigger, + .setup_manual_trigger = optc2_setup_manual_trigger, + .get_hw_timing = optc1_get_hw_timing, +}; + +void dcn32_timing_generator_init(struct optc *optc1) +{ + optc1->base.funcs = &dcn32_tg_funcs; + + optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; + optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; + + optc1->min_h_blank = 32; + optc1->min_v_blank = 3; + optc1->min_v_blank_interlace = 5; + optc1->min_h_sync_width = 4; + optc1->min_v_sync_width = 1; +} + diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h new file mode 100644 index 0000000000..0c2c146955 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h @@ -0,0 +1,188 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_OPTC_DCN32_H__ +#define __DC_OPTC_DCN32_H__ + +#include "dcn10/dcn10_optc.h" + +#define OPTC_COMMON_MASK_SH_LIST_DCN3_2(mask_sh)\ + SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\ + SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_OFFSET, mask_sh),\ + SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_WIDTH, mask_sh),\ + SF(OTG0_OTG_VREADY_PARAM, VREADY_OFFSET, mask_sh),\ + SF(OTG0_OTG_MASTER_UPDATE_LOCK, OTG_MASTER_UPDATE_LOCK, mask_sh),\ + SF(OTG0_OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_START_X, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_END_X, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_EN, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_START_Y, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_END_Y, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL2, OTG_MASTER_UPDATE_LOCK_SEL, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_X, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL4, DIG_UPDATE_POSITION_Y, mask_sh),\ + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\ + SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\ + SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\ + SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\ + SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_START, mask_sh),\ + SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_END, mask_sh),\ + SF(OTG0_OTG_H_SYNC_A_CNTL, OTG_H_SYNC_A_POL, mask_sh),\ + SF(OTG0_OTG_V_TOTAL, OTG_V_TOTAL, mask_sh),\ + SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_START, mask_sh),\ + SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_END, mask_sh),\ + SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_START, mask_sh),\ + SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_END, mask_sh),\ + SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_A_POL, mask_sh),\ + SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_MODE, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_MASTER_EN, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_START_POINT_CNTL, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\ + SF(OTG0_OTG_CONTROL, OTG_OUT_MUX, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EYE_FLAG_POLARITY, mask_sh),\ + SF(OTG0_OTG_STEREO_CONTROL, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, mask_sh),\ + SF(OTG0_OTG_STEREO_STATUS, OTG_STEREO_CURRENT_EYE, mask_sh),\ + SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_EN, mask_sh),\ + SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_V_UPDATE_MODE, mask_sh),\ + SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_STEREO_SEL_OVR, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_MAX, OTG_V_TOTAL_MAX, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_MIN, OTG_V_TOTAL_MIN, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MIN_SEL, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MAX_SEL, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_FORCE_LOCK_ON_EVENT, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MIN_EN, mask_sh),\ + SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_VTOTAL_MID_REPLACING_MAX_EN, mask_sh),\ + SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_CLEAR, mask_sh),\ + SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_MODE, mask_sh),\ + SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_OCCURRED, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_SELECT, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_PIPE_SELECT, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_RISING_EDGE_DETECT_CNTL, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_POLARITY_SELECT, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FREQUENCY_SELECT, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_DELAY, mask_sh),\ + SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_CLEAR, mask_sh),\ + SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_EVENT_MASK, mask_sh),\ + SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_FRAME_COUNT, mask_sh),\ + SF(OTG0_OTG_STATUS_FRAME_COUNT, OTG_FRAME_COUNT, mask_sh),\ + SF(OTG0_OTG_STATUS, OTG_V_BLANK, mask_sh),\ + SF(OTG0_OTG_STATUS, OTG_V_ACTIVE_DISP, mask_sh),\ + SF(OTG0_OTG_STATUS_POSITION, OTG_HORZ_COUNT, mask_sh),\ + SF(OTG0_OTG_STATUS_POSITION, OTG_VERT_COUNT, mask_sh),\ + SF(OTG0_OTG_NOM_VERT_POSITION, OTG_VERT_COUNT_NOM, mask_sh),\ + SF(OTG0_OTG_M_CONST_DTO0, OTG_M_CONST_DTO_PHASE, mask_sh),\ + SF(OTG0_OTG_M_CONST_DTO1, OTG_M_CONST_DTO_MODULO, mask_sh),\ + SF(OTG0_OTG_CLOCK_CONTROL, OTG_BUSY, mask_sh),\ + SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_EN, mask_sh),\ + SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_ON, mask_sh),\ + SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_START, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_END, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT1_POSITION, OTG_VERTICAL_INTERRUPT1_LINE_START, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\ + SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\ + SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_ON, mask_sh),\ + SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\ + SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\ + SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\ + SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\ + SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\ + SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\ + SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED, mask_sh),\ + SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_FORCE_VSYNC_NEXT_LINE_CLEAR, mask_sh),\ + SF(OTG0_OTG_VERT_SYNC_CONTROL, OTG_AUTO_FORCE_VSYNC_MODE, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL0_EN, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL1_EN, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\ + SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\ + SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\ + SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\ + SF(OTG0_OTG_CRC0_DATA_B, CRC0_B_CB, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_START, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_END, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_START, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_END, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_START, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh),\ + SF(OTG0_OTG_TRIGA_MANUAL_TRIG, OTG_TRIGA_MANUAL_TRIG, mask_sh),\ + SF(GSL_SOURCE_SELECT, GSL0_READY_SOURCE_SEL, mask_sh),\ + SF(GSL_SOURCE_SELECT, GSL1_READY_SOURCE_SEL, mask_sh),\ + SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL2, MANUAL_FLOW_CONTROL_SEL, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \ + SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\ + SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \ + SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \ + SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \ + SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \ + SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \ + SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG2_SRC_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG3_SRC_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\ + SF(ODM0_OPTC_MEMORY_CONFIG, OPTC_MEM_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, mask_sh),\ + SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\ + SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\ + SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\ + SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_SEGMENT_WIDTH, mask_sh),\ + SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_START_X, mask_sh),\ + SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\ + SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\ + SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\ + SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE_MANUAL, mask_sh),\ + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\ + SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh) + +void dcn32_timing_generator_init(struct optc *optc1); +void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode); +void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combine_segments); +void optc32_set_odm_bypass(struct timing_generator *optc, + const struct dc_crtc_timing *dc_crtc_timing); +void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg); + +#endif /* __DC_OPTC_DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c new file mode 100644 index 0000000000..5b15475088 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c @@ -0,0 +1,300 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dcn35_optc.h" + +#include "dcn30/dcn30_optc.h" +#include "dcn31/dcn31_optc.h" +#include "dcn32/dcn32_optc.h" +#include "reg_helper.h" +#include "dc.h" +#include "dcn_calc_math.h" + +#define REG(reg)\ + optc1->tg_regs->reg + +#define CTX \ + optc1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + optc1->tg_shift->field_name, optc1->tg_mask->field_name + +/** + * optc35_set_odm_combine() - Enable CRTC - call ASIC Control Object to enable Timing generator. + * + * @optc: Output Pipe Timing Combine instance reference. + * @opp_id: Output Plane Processor instance ID. + * @opp_cnt: Output Plane Processor count. + * @timing: Timing parameters used to configure DCN blocks. + * + * Return: void. + */ +static void optc35_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, + struct dc_crtc_timing *timing) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t memory_mask = 0; + int h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right; + int mpcc_hactive = h_active / opp_cnt; + /* Each memory instance is 2048x(314x2) bits to support half line of 4096 */ + int odm_mem_count = (h_active + 2047) / 2048; + + /* + * display <= 4k : 2 memories + 2 pipes + * 4k < display <= 8k : 4 memories + 2 pipes + * 8k < display <= 12k : 6 memories + 4 pipes + */ + if (opp_cnt == 4) { + if (odm_mem_count <= 2) + memory_mask = 0x3; + else if (odm_mem_count <= 4) + memory_mask = 0xf; + else + memory_mask = 0x3f; + } else { + if (odm_mem_count <= 2) + memory_mask = 0x1 << (opp_id[0] * 2) | 0x1 << (opp_id[1] * 2); + else if (odm_mem_count <= 4) + memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2); + else + memory_mask = 0x77; + } + + REG_SET(OPTC_MEMORY_CONFIG, 0, + OPTC_MEM_SEL, memory_mask); + + if (opp_cnt == 2) { + REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0, + OPTC_NUM_OF_INPUT_SEGMENT, 1, + OPTC_SEG0_SRC_SEL, opp_id[0], + OPTC_SEG1_SRC_SEL, opp_id[1]); + } else if (opp_cnt == 4) { + REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0, + OPTC_NUM_OF_INPUT_SEGMENT, 3, + OPTC_SEG0_SRC_SEL, opp_id[0], + OPTC_SEG1_SRC_SEL, opp_id[1], + OPTC_SEG2_SRC_SEL, opp_id[2], + OPTC_SEG3_SRC_SEL, opp_id[3]); + } + + REG_UPDATE(OPTC_WIDTH_CONTROL, + OPTC_SEGMENT_WIDTH, mpcc_hactive); + + REG_UPDATE(OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, opp_cnt - 1); + optc1->opp_count = opp_cnt; +} + +static bool optc35_enable_crtc(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + /* opp instance for OTG, 1 to 1 mapping and odm will adjust */ + REG_UPDATE(OPTC_DATA_SOURCE_SELECT, + OPTC_SEG0_SRC_SEL, optc->inst); + + /* VTG enable first is for HW workaround */ + REG_UPDATE(CONTROL, + VTG0_ENABLE, 1); + + REG_SEQ_START(); + + /* Enable CRTC */ + REG_UPDATE_2(OTG_CONTROL, + OTG_DISABLE_POINT_CNTL, 2, + OTG_MASTER_EN, 1); + + REG_SEQ_SUBMIT(); + REG_SEQ_WAIT_DONE(); + + return true; +} + +/* disable_crtc */ +static bool optc35_disable_crtc(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_UPDATE_5(OPTC_DATA_SOURCE_SELECT, + OPTC_SEG0_SRC_SEL, 0xf, + OPTC_SEG1_SRC_SEL, 0xf, + OPTC_SEG2_SRC_SEL, 0xf, + OPTC_SEG3_SRC_SEL, 0xf, + OPTC_NUM_OF_INPUT_SEGMENT, 0); + + REG_UPDATE(OPTC_MEMORY_CONFIG, + OPTC_MEM_SEL, 0); + + /* disable otg request until end of the first line + * in the vertical blank region + */ + REG_UPDATE(OTG_CONTROL, + OTG_MASTER_EN, 0); + + REG_UPDATE(CONTROL, + VTG0_ENABLE, 0); + + /* CRTC disabled, so disable clock. */ + REG_WAIT(OTG_CLOCK_CONTROL, + OTG_BUSY, 0, + 1, 100000); + optc1_clear_optc_underflow(optc); + + return true; +} + +static void optc35_phantom_crtc_post_enable(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + /* Disable immediately. */ + REG_UPDATE_2(OTG_CONTROL, OTG_DISABLE_POINT_CNTL, 0, OTG_MASTER_EN, 0); + + /* CRTC disabled, so disable clock. */ + REG_WAIT(OTG_CLOCK_CONTROL, OTG_BUSY, 0, 1, 100000); +} + +static bool optc35_configure_crc(struct timing_generator *optc, + const struct crc_params *params) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + if (!optc1_is_tg_enabled(optc)) + return false; + REG_WRITE(OTG_CRC_CNTL, 0); + if (!params->enable) + return true; + REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL, + OTG_CRC0_WINDOWA_X_START, params->windowa_x_start, + OTG_CRC0_WINDOWA_X_END, params->windowa_x_end); + REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL, + OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start, + OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end); + REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL, + OTG_CRC0_WINDOWB_X_START, params->windowb_x_start, + OTG_CRC0_WINDOWB_X_END, params->windowb_x_end); + REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL, + OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start, + OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end); + if (optc1->base.ctx->dc->debug.otg_crc_db && optc1->tg_mask->OTG_CRC_WINDOW_DB_EN != 0) { + REG_UPDATE_4(OTG_CRC_CNTL, + OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0, + OTG_CRC0_SELECT, params->selection, + OTG_CRC_EN, 1, + OTG_CRC_WINDOW_DB_EN, 1); + } else + REG_UPDATE_3(OTG_CRC_CNTL, + OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0, + OTG_CRC0_SELECT, params->selection, + OTG_CRC_EN, 1); + return true; +} + +static struct timing_generator_funcs dcn35_tg_funcs = { + .validate_timing = optc1_validate_timing, + .program_timing = optc1_program_timing, + .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, + .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, + .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, + .program_global_sync = optc1_program_global_sync, + .enable_crtc = optc35_enable_crtc, + .disable_crtc = optc35_disable_crtc, + .immediate_disable_crtc = optc31_immediate_disable_crtc, + .phantom_crtc_post_enable = optc35_phantom_crtc_post_enable, + /* used by enable_timing_synchronization. Not need for FPGA */ + .is_counter_moving = optc1_is_counter_moving, + .get_position = optc1_get_position, + .get_frame_count = optc1_get_vblank_counter, + .get_scanoutpos = optc1_get_crtc_scanoutpos, + .get_otg_active_size = optc1_get_otg_active_size, + .set_early_control = optc1_set_early_control, + /* used by enable_timing_synchronization. Not need for FPGA */ + .wait_for_state = optc1_wait_for_state, + .set_blank_color = optc3_program_blank_color, + .did_triggered_reset_occur = optc1_did_triggered_reset_occur, + .triplebuffer_lock = optc3_triplebuffer_lock, + .triplebuffer_unlock = optc2_triplebuffer_unlock, + .enable_reset_trigger = optc1_enable_reset_trigger, + .enable_crtc_reset = optc1_enable_crtc_reset, + .disable_reset_trigger = optc1_disable_reset_trigger, + .lock = optc3_lock, + .unlock = optc1_unlock, + .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, + .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, + .enable_optc_clock = optc1_enable_optc_clock, + .set_drr = optc31_set_drr, + .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, + .set_vtotal_min_max = optc1_set_vtotal_min_max, + .set_static_screen_control = optc1_set_static_screen_control, + .program_stereo = optc1_program_stereo, + .is_stereo_left_eye = optc1_is_stereo_left_eye, + .tg_init = optc3_tg_init, + .is_tg_enabled = optc1_is_tg_enabled, + .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, + .clear_optc_underflow = optc1_clear_optc_underflow, + .setup_global_swap_lock = NULL, + .get_crc = optc1_get_crc, + .configure_crc = optc35_configure_crc, + .set_dsc_config = optc3_set_dsc_config, + .get_dsc_status = optc2_get_dsc_status, + .set_dwb_source = NULL, + .set_odm_bypass = optc32_set_odm_bypass, + .set_odm_combine = optc35_set_odm_combine, + .get_optc_source = optc2_get_optc_source, + .set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode, + .set_out_mux = optc3_set_out_mux, + .set_drr_trigger_window = optc3_set_drr_trigger_window, + .set_vtotal_change_limit = optc3_set_vtotal_change_limit, + .set_gsl = optc2_set_gsl, + .set_gsl_source_select = optc2_set_gsl_source_select, + .set_vtg_params = optc1_set_vtg_params, + .program_manual_trigger = optc2_program_manual_trigger, + .setup_manual_trigger = optc2_setup_manual_trigger, + .get_hw_timing = optc1_get_hw_timing, + .init_odm = optc3_init_odm, +}; + +void dcn35_timing_generator_init(struct optc *optc1) +{ + optc1->base.funcs = &dcn35_tg_funcs; + + optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; + optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; + + optc1->min_h_blank = 32; + optc1->min_v_blank = 3; + optc1->min_v_blank_interlace = 5; + optc1->min_h_sync_width = 4; + optc1->min_v_sync_width = 1; + + dcn35_timing_generator_set_fgcg( + optc1, CTX->dc->debug.enable_fine_grain_clock_gating.bits.optc); +} + +void dcn35_timing_generator_set_fgcg(struct optc *optc1, bool enable) +{ + REG_UPDATE(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, !enable); +} diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h new file mode 100644 index 0000000000..1f422e4c46 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_OPTC_DCN35_H__ +#define __DC_OPTC_DCN35_H__ + +#include "dcn10/dcn10_optc.h" +#include "dcn32/dcn32_optc.h" +#define OPTC_COMMON_MASK_SH_LIST_DCN3_5(mask_sh)\ + OPTC_COMMON_MASK_SH_LIST_DCN3_2(mask_sh),\ + SF(OTG0_OTG_CRC_CNTL, OTG_CRC_WINDOW_DB_EN, mask_sh),\ + SF(OTG0_OTG_CRC1_DATA_RG, CRC1_R_CR, mask_sh),\ + SF(OTG0_OTG_CRC1_DATA_RG, CRC1_G_Y, mask_sh),\ + SF(OTG0_OTG_CRC1_DATA_B, CRC1_B_CB, mask_sh),\ + SF(OTG0_OTG_CRC2_DATA_RG, CRC2_R_CR, mask_sh),\ + SF(OTG0_OTG_CRC2_DATA_RG, CRC2_G_Y, mask_sh),\ + SF(OTG0_OTG_CRC2_DATA_B, CRC2_B_CB, mask_sh),\ + SF(OTG0_OTG_CRC3_DATA_RG, CRC3_R_CR, mask_sh),\ + SF(OTG0_OTG_CRC3_DATA_RG, CRC3_G_Y, mask_sh),\ + SF(OTG0_OTG_CRC3_DATA_B, CRC3_B_CB, mask_sh),\ + SF(OTG0_OTG_CRC1_WINDOWA_X_CONTROL, OTG_CRC1_WINDOWA_X_START, mask_sh),\ + SF(OTG0_OTG_CRC1_WINDOWA_X_CONTROL, OTG_CRC1_WINDOWA_X_END, mask_sh),\ + SF(OTG0_OTG_CRC1_WINDOWA_Y_CONTROL, OTG_CRC1_WINDOWA_Y_START, mask_sh),\ + SF(OTG0_OTG_CRC1_WINDOWA_Y_CONTROL, OTG_CRC1_WINDOWA_Y_END, mask_sh),\ + SF(OTG0_OTG_CRC1_WINDOWB_X_CONTROL, OTG_CRC1_WINDOWB_X_START, mask_sh),\ + SF(OTG0_OTG_CRC1_WINDOWB_X_CONTROL, OTG_CRC1_WINDOWB_X_END, mask_sh),\ + SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL, OTG_CRC1_WINDOWB_Y_START, mask_sh),\ + SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL, OTG_CRC1_WINDOWB_Y_END, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK, OTG_CRC0_WINDOWA_X_START_READBACK, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL_READBACK, OTG_CRC0_WINDOWA_X_END_READBACK, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK, OTG_CRC0_WINDOWA_Y_START_READBACK, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL_READBACK, OTG_CRC0_WINDOWA_Y_END_READBACK, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK, OTG_CRC0_WINDOWB_X_START_READBACK, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL_READBACK, OTG_CRC0_WINDOWB_X_END_READBACK, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK, OTG_CRC0_WINDOWB_Y_START_READBACK, mask_sh),\ + SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL_READBACK, OTG_CRC0_WINDOWB_Y_END_READBACK, mask_sh),\ + SF(OTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK, OTG_CRC1_WINDOWA_X_START_READBACK, mask_sh),\ + SF(OTG0_OTG_CRC1_WINDOWA_X_CONTROL_READBACK, OTG_CRC1_WINDOWA_X_END_READBACK, mask_sh),\ + SF(OTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK, OTG_CRC1_WINDOWA_Y_START_READBACK, mask_sh),\ + SF(OTG0_OTG_CRC1_WINDOWA_Y_CONTROL_READBACK, OTG_CRC1_WINDOWA_Y_END_READBACK, mask_sh),\ + SF(OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK, OTG_CRC1_WINDOWB_X_START_READBACK, mask_sh),\ + SF(OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK, OTG_CRC1_WINDOWB_X_END_READBACK, mask_sh),\ + SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG_CRC1_WINDOWB_Y_START_READBACK, mask_sh),\ + SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG_CRC1_WINDOWB_Y_END_READBACK, mask_sh),\ + SF(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, mask_sh) + +void dcn35_timing_generator_init(struct optc *optc1); + +void dcn35_timing_generator_set_fgcg(struct optc *optc1, bool enable); + +#endif /* __DC_OPTC_DCN35_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile new file mode 100644 index 0000000000..0a75ed8962 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile @@ -0,0 +1,199 @@ + +# Copyright 2022 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# Makefile for the 'resource' sub-component of DAL. +# + + +############################################################################### +# DCE +############################################################################### + +RESOURCE_DCE100 = dce100_resource.o + +AMD_DAL_RESOURCE_DCE100 = $(addprefix $(AMDDALPATH)/dc/resource/dce100/,$(RESOURCE_DCE100)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE100) + +############################################################################### + +RESOURCE_DCE110 = dce110_resource.o + +AMD_DAL_RESOURCE_DCE110 = $(addprefix $(AMDDALPATH)/dc/resource/dce110/,$(RESOURCE_DCE110)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE110) + +############################################################################### + +RESOURCE_DCE112 = dce112_resource.o + +AMD_DAL_RESOURCE_DCE112 = $(addprefix $(AMDDALPATH)/dc/resource/dce112/,$(RESOURCE_DCE112)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE112) + +############################################################################### + +RESOURCE_DCE120 = dce120_resource.o + +AMD_DAL_RESOURCE_DCE120 = $(addprefix $(AMDDALPATH)/dc/resource/dce120/,$(RESOURCE_DCE120)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE120) + +############################################################################### + +RESOURCE_DCE80 = dce80_resource.o + +AMD_DAL_RESOURCE_DCE80 = $(addprefix $(AMDDALPATH)/dc/resource/dce80/,$(RESOURCE_DCE80)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE80) + +ifdef CONFIG_DRM_AMD_DC_FP +############################################################################### +# DCN +############################################################################### + +RESOURCE_DCN10 = dcn10_resource.o + +AMD_DAL_RESOURCE_DCN10 = $(addprefix $(AMDDALPATH)/dc/resource/dcn10/,$(RESOURCE_DCN10)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN10) + +############################################################################### + +RESOURCE_DCN20 = dcn20_resource.o + +AMD_DAL_RESOURCE_DCN20 = $(addprefix $(AMDDALPATH)/dc/resource/dcn20/,$(RESOURCE_DCN20)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN20) + +############################################################################### + +RESOURCE_DCN201 = dcn201_resource.o + +AMD_DAL_RESOURCE_DCN201 = $(addprefix $(AMDDALPATH)/dc/resource/dcn201/,$(RESOURCE_DCN201)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN201) + +############################################################################### + +RESOURCE_DCN21 = dcn21_resource.o + +AMD_DAL_RESOURCE_DCN21 = $(addprefix $(AMDDALPATH)/dc/resource/dcn21/,$(RESOURCE_DCN21)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN21) + +############################################################################### + +############################################################################### + +############################################################################### + +RESOURCE_DCN30 = dcn30_resource.o + +AMD_DAL_RESOURCE_DCN30 = $(addprefix $(AMDDALPATH)/dc/resource/dcn30/,$(RESOURCE_DCN30)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN30) + +############################################################################### + +RESOURCE_DCN301 = dcn301_resource.o + +AMD_DAL_RESOURCE_DCN301 = $(addprefix $(AMDDALPATH)/dc/resource/dcn301/,$(RESOURCE_DCN301)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN301) + +############################################################################### + +RESOURCE_DCN302 = dcn302_resource.o + +AMD_DAL_RESOURCE_DCN302 = $(addprefix $(AMDDALPATH)/dc/resource/dcn302/,$(RESOURCE_DCN302)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN302) + +############################################################################### + +RESOURCE_DCN303 = dcn303_resource.o + +AMD_DAL_RESOURCE_DCN303 = $(addprefix $(AMDDALPATH)/dc/resource/dcn303/,$(RESOURCE_DCN303)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN303) + +############################################################################### + +RESOURCE_DCN31 = dcn31_resource.o + +AMD_DAL_RESOURCE_DCN31 = $(addprefix $(AMDDALPATH)/dc/resource/dcn31/,$(RESOURCE_DCN31)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN31) + +############################################################################### + +RESOURCE_DCN314 = dcn314_resource.o + +AMD_DAL_RESOURCE_DCN314 = $(addprefix $(AMDDALPATH)/dc/resource/dcn314/,$(RESOURCE_DCN314)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN314) + +############################################################################### + +RESOURCE_DCN315 = dcn315_resource.o + +AMD_DAL_RESOURCE_DCN315 = $(addprefix $(AMDDALPATH)/dc/resource/dcn315/,$(RESOURCE_DCN315)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN315) + +############################################################################### + +RESOURCE_DCN316 = dcn316_resource.o + +AMD_DAL_RESOURCE_DCN316 = $(addprefix $(AMDDALPATH)/dc/resource/dcn316/,$(RESOURCE_DCN316)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN316) + +############################################################################### + +RESOURCE_DCN32 = dcn32_resource.o + +AMD_DAL_RESOURCE_DCN32 = $(addprefix $(AMDDALPATH)/dc/resource/dcn32/,$(RESOURCE_DCN32)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN32) + +############################################################################### + +RESOURCE_DCN321 = dcn321_resource.o + +AMD_DAL_RESOURCE_DCN321 = $(addprefix $(AMDDALPATH)/dc/resource/dcn321/,$(RESOURCE_DCN321)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN321) + +############################################################################### + +RESOURCE_DCN35 = dcn35_resource.o + +AMD_DAL_RESOURCE_DCN35 = $(addprefix $(AMDDALPATH)/dc/resource/dcn35/,$(RESOURCE_DCN35)) + +AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN35) + +############################################################################### + +############################################################################### + +endif diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c new file mode 100644 index 0000000000..53a5f4cb64 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c @@ -0,0 +1,1179 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" + +#include "link_encoder.h" +#include "stream_encoder.h" + +#include "resource.h" +#include "include/irq_service_interface.h" +#include "virtual/virtual_stream_encoder.h" +#include "dce110/dce110_resource.h" +#include "dce110/dce110_timing_generator.h" +#include "irq/dce110/irq_service_dce110.h" +#include "dce/dce_link_encoder.h" +#include "dce/dce_stream_encoder.h" +#include "dce/dce_mem_input.h" +#include "dce/dce_ipp.h" +#include "dce/dce_transform.h" +#include "dce/dce_opp.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_audio.h" +#include "dce/dce_hwseq.h" +#include "dce100/dce100_hwseq.h" +#include "dce/dce_panel_cntl.h" + +#include "reg_helper.h" + +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" + +#include "dce/dce_dmcu.h" +#include "dce/dce_aux.h" +#include "dce/dce_abm.h" +#include "dce/dce_i2c.h" + +#include "dce100_resource.h" + +#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT +#include "gmc/gmc_8_2_d.h" +#include "gmc/gmc_8_2_sh_mask.h" +#endif + +#ifndef mmDP_DPHY_INTERNAL_CTRL + #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7 + #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7 + #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7 + #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7 + #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7 + #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7 + #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7 + #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7 + #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7 + #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7 +#endif + +#ifndef mmBIOS_SCRATCH_2 + #define mmBIOS_SCRATCH_2 0x05CB + #define mmBIOS_SCRATCH_3 0x05CC + #define mmBIOS_SCRATCH_6 0x05CF +#endif + +#ifndef mmDP_DPHY_BS_SR_SWAP_CNTL + #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC + #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC + #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC + #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC + #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC + #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC + #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC + #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC +#endif + +#ifndef mmDP_DPHY_FAST_TRAINING + #define mmDP_DPHY_FAST_TRAINING 0x4ABC + #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC + #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC + #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC + #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC + #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC + #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC + #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC +#endif + +static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = { + { + .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL), + }, + { + .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL), + }, + { + .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL), + }, + { + .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL), + }, + { + .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL), + }, + { + .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL), + } +}; + +/* set register offset */ +#define SR(reg_name)\ + .reg_name = mm ## reg_name + +/* set register offset with instance */ +#define SRI(reg_name, block, id)\ + .reg_name = mm ## block ## id ## _ ## reg_name + +#define ipp_regs(id)\ +[id] = {\ + IPP_DCE100_REG_LIST_DCE_BASE(id)\ +} + +static const struct dce_ipp_registers ipp_regs[] = { + ipp_regs(0), + ipp_regs(1), + ipp_regs(2), + ipp_regs(3), + ipp_regs(4), + ipp_regs(5) +}; + +static const struct dce_ipp_shift ipp_shift = { + IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) +}; + +static const struct dce_ipp_mask ipp_mask = { + IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) +}; + +#define transform_regs(id)\ +[id] = {\ + XFM_COMMON_REG_LIST_DCE100(id)\ +} + +static const struct dce_transform_registers xfm_regs[] = { + transform_regs(0), + transform_regs(1), + transform_regs(2), + transform_regs(3), + transform_regs(4), + transform_regs(5) +}; + +static const struct dce_transform_shift xfm_shift = { + XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT) +}; + +static const struct dce_transform_mask xfm_mask = { + XFM_COMMON_MASK_SH_LIST_DCE110(_MASK) +}; + +#define aux_regs(id)\ +[id] = {\ + AUX_REG_LIST(id)\ +} + +static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), + aux_regs(2), + aux_regs(3), + aux_regs(4), + aux_regs(5) +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), + hpd_regs(4), + hpd_regs(5) +}; + +#define link_regs(id)\ +[id] = {\ + LE_DCE100_REG_LIST(id)\ +} + +static const struct dce110_link_enc_registers link_enc_regs[] = { + link_regs(0), + link_regs(1), + link_regs(2), + link_regs(3), + link_regs(4), + link_regs(5), + link_regs(6), +}; + +#define stream_enc_regs(id)\ +[id] = {\ + SE_COMMON_REG_LIST_DCE_BASE(id),\ + .AFMT_CNTL = 0,\ +} + +static const struct dce110_stream_enc_registers stream_enc_regs[] = { + stream_enc_regs(0), + stream_enc_regs(1), + stream_enc_regs(2), + stream_enc_regs(3), + stream_enc_regs(4), + stream_enc_regs(5), + stream_enc_regs(6) +}; + +static const struct dce_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCE80_100(__SHIFT) +}; + +static const struct dce_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK) +}; + +static const struct dce_panel_cntl_registers panel_cntl_regs[] = { + { DCE_PANEL_CNTL_REG_LIST() } +}; + +static const struct dce_panel_cntl_shift panel_cntl_shift = { + DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_panel_cntl_mask panel_cntl_mask = { + DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) +}; + +#define opp_regs(id)\ +[id] = {\ + OPP_DCE_100_REG_LIST(id),\ +} + +static const struct dce_opp_registers opp_regs[] = { + opp_regs(0), + opp_regs(1), + opp_regs(2), + opp_regs(3), + opp_regs(4), + opp_regs(5) +}; + +static const struct dce_opp_shift opp_shift = { + OPP_COMMON_MASK_SH_LIST_DCE_100(__SHIFT) +}; + +static const struct dce_opp_mask opp_mask = { + OPP_COMMON_MASK_SH_LIST_DCE_100(_MASK) +}; +#define aux_engine_regs(id)\ +[id] = {\ + AUX_COMMON_REG_LIST(id), \ + .AUX_RESET_MASK = 0 \ +} + +static const struct dce110_aux_registers aux_engine_regs[] = { + aux_engine_regs(0), + aux_engine_regs(1), + aux_engine_regs(2), + aux_engine_regs(3), + aux_engine_regs(4), + aux_engine_regs(5) +}; + +#define audio_regs(id)\ +[id] = {\ + AUD_COMMON_REG_LIST(id)\ +} + +static const struct dce_audio_registers audio_regs[] = { + audio_regs(0), + audio_regs(1), + audio_regs(2), + audio_regs(3), + audio_regs(4), + audio_regs(5), + audio_regs(6), +}; + +static const struct dce_audio_shift audio_shift = { + AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +#define clk_src_regs(id)\ +[id] = {\ + CS_COMMON_REG_LIST_DCE_100_110(id),\ +} + +static const struct dce110_clk_src_regs clk_src_regs[] = { + clk_src_regs(0), + clk_src_regs(1), + clk_src_regs(2) +}; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) +}; + +static const struct dce_dmcu_registers dmcu_regs = { + DMCU_DCE110_COMMON_REG_LIST() +}; + +static const struct dce_dmcu_shift dmcu_shift = { + DMCU_MASK_SH_LIST_DCE110(__SHIFT) +}; + +static const struct dce_dmcu_mask dmcu_mask = { + DMCU_MASK_SH_LIST_DCE110(_MASK) +}; + +static const struct dce_abm_registers abm_regs = { + ABM_DCE110_COMMON_REG_LIST() +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCE110(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCE110(_MASK) +}; + +#define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03 + +static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, + .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 +}; + +static const struct resource_caps res_cap = { + .num_timing_generator = 6, + .num_audio = 6, + .num_stream_encoder = 6, + .num_pll = 3, + .num_ddc = 6, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCE_RGB, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = false, + .fp16 = true + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 1, + .fp16 = 1 + }, + + .max_downscale_factor = { + .argb8888 = 250, + .nv12 = 1, + .fp16 = 1 + } +}; + +static const struct dc_debug_options debug_defaults = { + .enable_legacy_fast_update = true, +}; + +#define CTX ctx +#define REG(reg) mm ## reg + +#ifndef mmCC_DC_HDMI_STRAPS +#define mmCC_DC_HDMI_STRAPS 0x1918 +#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40 +#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6 +#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700 +#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 +#endif + +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + case TRANSMITTER_UNIPHY_B: + return 1; + case TRANSMITTER_UNIPHY_C: + return 2; + case TRANSMITTER_UNIPHY_D: + return 3; + case TRANSMITTER_UNIPHY_E: + return 4; + case TRANSMITTER_UNIPHY_F: + return 5; + case TRANSMITTER_UNIPHY_G: + return 6; + default: + ASSERT(0); + return 0; + } +} + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + REG_GET_2(CC_DC_HDMI_STRAPS, + HDMI_DISABLE, &straps->hdmi_disable, + AUDIO_STREAM_NUMBER, &straps->audio_stream_number); + + REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio); +} + +static struct audio *create_audio( + struct dc_context *ctx, unsigned int inst) +{ + return dce_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + +static struct timing_generator *dce100_timing_generator_create( + struct dc_context *ctx, + uint32_t instance, + const struct dce110_timing_generator_offsets *offsets) +{ + struct dce110_timing_generator *tg110 = + kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); + + if (!tg110) + return NULL; + + dce110_timing_generator_construct(tg110, ctx, instance, offsets); + return &tg110->base; +} + +static struct stream_encoder *dce100_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dce110_stream_encoder *enc110 = + kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); + + if (!enc110) + return NULL; + + dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, + &stream_enc_regs[eng_id], &se_shift, &se_mask); + return &enc110->base; +} + +#define SRII(reg_name, block, id)\ + .reg_name[id] = mm ## block ## id ## _ ## reg_name + +static const struct dce_hwseq_registers hwseq_reg = { + HWSEQ_DCE10_REG_LIST() +}; + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCE10_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCE10_MASK_SH_LIST(_MASK) +}; + +static struct dce_hwseq *dce100_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + } + return hws; +} + +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = create_audio, + .create_stream_encoder = dce100_stream_encoder_create, + .create_hwseq = dce100_hwseq_create, +}; + +#define mi_inst_regs(id) { \ + MI_DCE8_REG_LIST(id), \ + .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \ +} +static const struct dce_mem_input_registers mi_regs[] = { + mi_inst_regs(0), + mi_inst_regs(1), + mi_inst_regs(2), + mi_inst_regs(3), + mi_inst_regs(4), + mi_inst_regs(5), +}; + +static const struct dce_mem_input_shift mi_shifts = { + MI_DCE8_MASK_SH_LIST(__SHIFT), + .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT +}; + +static const struct dce_mem_input_mask mi_masks = { + MI_DCE8_MASK_SH_LIST(_MASK), + .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK +}; + +static const struct dce110_aux_registers_shift aux_shift = { + DCE10_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE10_AUX_MASK_SH_LIST(_MASK) +}; + +static struct mem_input *dce100_mem_input_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), + GFP_KERNEL); + + if (!dce_mi) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); + dce_mi->wa.single_head_rdreq_dmif_limit = 2; + return &dce_mi->base; +} + +static void dce100_transform_destroy(struct transform **xfm) +{ + kfree(TO_DCE_TRANSFORM(*xfm)); + *xfm = NULL; +} + +static struct transform *dce100_transform_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_transform *transform = + kzalloc(sizeof(struct dce_transform), GFP_KERNEL); + + if (!transform) + return NULL; + + dce_transform_construct(transform, ctx, inst, + &xfm_regs[inst], &xfm_shift, &xfm_mask); + return &transform->base; +} + +static struct input_pixel_processor *dce100_ipp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); + + if (!ipp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dce_ipp_construct(ipp, ctx, inst, + &ipp_regs[inst], &ipp_shift, &ipp_mask); + return &ipp->base; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 300000, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true +}; + +static struct link_encoder *dce100_link_encoder_create( + struct dc_context *ctx, + const struct encoder_init_data *enc_init_data) +{ + struct dce110_link_encoder *enc110 = + kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; + + if (!enc110) + return NULL; + + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + + dce110_link_encoder_construct(enc110, + enc_init_data, + &link_enc_feature, + &link_enc_regs[link_regs_id], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source]); + return &enc110->base; +} + +static struct panel_cntl *dce100_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dce_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dce_panel_cntl_construct(panel_cntl, + init_data, + &panel_cntl_regs[init_data->inst], + &panel_cntl_shift, + &panel_cntl_mask); + + return &panel_cntl->base; +} + +static struct output_pixel_processor *dce100_opp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce110_opp *opp = + kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); + + if (!opp) + return NULL; + + dce110_opp_construct(opp, + ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); + return &opp->base; +} + +static struct dce_aux *dce100_aux_engine_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5), + i2c_inst_regs(6), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) +}; + +static struct dce_i2c_hw *dce100_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dce100_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} +static struct clock_source *dce100_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dce110_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + kfree(clk_src); + BREAK_TO_DEBUGGER(); + return NULL; +} + +static void dce100_clock_source_destroy(struct clock_source **clk_src) +{ + kfree(TO_DCE110_CLK_SRC(*clk_src)); + *clk_src = NULL; +} + +static void dce100_resource_destruct(struct dce110_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.opps[i] != NULL) + dce110_opp_destroy(&pool->base.opps[i]); + + if (pool->base.transforms[i] != NULL) + dce100_transform_destroy(&pool->base.transforms[i]); + + if (pool->base.ipps[i] != NULL) + dce_ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.mis[i] != NULL) { + kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); + pool->base.mis[i] = NULL; + } + + if (pool->base.timing_generators[i] != NULL) { + kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + if (pool->base.engines[i] != NULL) + dce110_engine_destroy(&pool->base.engines[i]); + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) + kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) + dce100_clock_source_destroy(&pool->base.clock_sources[i]); + } + + if (pool->base.dp_clock_source != NULL) + dce100_clock_source_destroy(&pool->base.dp_clock_source); + + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i] != NULL) + dce_aud_destroy(&pool->base.audios[i]); + } + + if (pool->base.abm != NULL) + dce_abm_destroy(&pool->base.abm); + + if (pool->base.dmcu != NULL) + dce_dmcu_destroy(&pool->base.dmcu); + + if (pool->base.irqs != NULL) + dal_irq_service_destroy(&pool->base.irqs); +} + +static enum dc_status build_mapped_resource( + const struct dc *dc, + struct dc_state *context, + struct dc_stream_state *stream) +{ + struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); + + if (!pipe_ctx) + return DC_ERROR_UNEXPECTED; + + dce110_resource_build_pipe_hw_param(pipe_ctx); + + resource_build_info_frame(pipe_ctx); + + return DC_OK; +} + +static bool dce100_validate_bandwidth( + struct dc *dc, + struct dc_state *context, + bool fast_validate) +{ + int i; + bool at_least_one_pipe = false; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (context->res_ctx.pipe_ctx[i].stream) + at_least_one_pipe = true; + } + + if (at_least_one_pipe) { + /* TODO implement when needed but for now hardcode max value*/ + context->bw_ctx.bw.dce.dispclk_khz = 681000; + context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; + } else { + context->bw_ctx.bw.dce.dispclk_khz = 0; + context->bw_ctx.bw.dce.yclk_khz = 0; + } + + return true; +} + +static bool dce100_validate_surface_sets( + struct dc_state *context) +{ + int i; + + for (i = 0; i < context->stream_count; i++) { + if (context->stream_status[i].plane_count == 0) + continue; + + if (context->stream_status[i].plane_count > 1) + return false; + + if (context->stream_status[i].plane_states[0]->format + >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) + return false; + } + + return true; +} + +static enum dc_status dce100_validate_global( + struct dc *dc, + struct dc_state *context) +{ + if (!dce100_validate_surface_sets(context)) + return DC_FAIL_SURFACE_VALIDATE; + + return DC_OK; +} + +enum dc_status dce100_add_stream_to_ctx( + struct dc *dc, + struct dc_state *new_ctx, + struct dc_stream_state *dc_stream) +{ + enum dc_status result = DC_ERROR_UNEXPECTED; + + result = resource_map_pool_resources(dc, new_ctx, dc_stream); + + if (result == DC_OK) + result = resource_map_clock_resources(dc, new_ctx, dc_stream); + + if (result == DC_OK) + result = build_mapped_resource(dc, new_ctx, dc_stream); + + return result; +} + +static void dce100_destroy_resource_pool(struct resource_pool **pool) +{ + struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); + + dce100_resource_destruct(dce110_pool); + kfree(dce110_pool); + *pool = NULL; +} + +enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps) +{ + + if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) + return DC_OK; + + return DC_FAIL_SURFACE_VALIDATE; +} + +struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct dc_stream_state *stream) +{ + int i; + int j = -1; + struct dc_link *link = stream->link; + + for (i = 0; i < pool->stream_enc_count; i++) { + if (!res_ctx->is_stream_enc_acquired[i] && + pool->stream_enc[i]) { + /* Store first available for MST second display + * in daisy chain use case + */ + j = i; + if (pool->stream_enc[i]->id == + link->link_enc->preferred_engine) + return pool->stream_enc[i]; + } + } + + /* + * below can happen in cases when stream encoder is acquired: + * 1) for second MST display in chain, so preferred engine already + * acquired; + * 2) for another link, which preferred engine already acquired by any + * MST configuration. + * + * If signal is of DP type and preferred engine not found, return last available + * + * TODO - This is just a patch up and a generic solution is + * required for non DP connectors. + */ + + if (j >= 0 && link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) + return pool->stream_enc[j]; + + return NULL; +} + +static const struct resource_funcs dce100_res_pool_funcs = { + .destroy = dce100_destroy_resource_pool, + .link_enc_create = dce100_link_encoder_create, + .panel_cntl_create = dce100_panel_cntl_create, + .validate_bandwidth = dce100_validate_bandwidth, + .validate_plane = dce100_validate_plane, + .add_stream_to_ctx = dce100_add_stream_to_ctx, + .validate_global = dce100_validate_global, + .find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link +}; + +static bool dce100_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dce110_resource_pool *pool) +{ + unsigned int i; + struct dc_context *ctx = dc->ctx; + struct dc_bios *bp; + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap; + pool->base.funcs = &dce100_res_pool_funcs; + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + + bp = ctx->dc_bios; + + if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { + pool->base.dp_clock_source = + dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); + + pool->base.clock_sources[0] = + dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); + pool->base.clock_sources[1] = + dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); + pool->base.clock_sources[2] = + dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); + pool->base.clk_src_count = 3; + + } else { + pool->base.dp_clock_source = + dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); + + pool->base.clock_sources[0] = + dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); + pool->base.clock_sources[1] = + dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); + pool->base.clk_src_count = 2; + } + + if (pool->base.dp_clock_source == NULL) { + dm_error("DC: failed to create dp clock source!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + } + + pool->base.dmcu = dce_dmcu_create(ctx, + &dmcu_regs, + &dmcu_shift, + &dmcu_mask); + if (pool->base.dmcu == NULL) { + dm_error("DC: failed to create dmcu!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + pool->base.abm = dce_abm_create(ctx, + &abm_regs, + &abm_shift, + &abm_mask); + if (pool->base.abm == NULL) { + dm_error("DC: failed to create abm!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + { + struct irq_service_init_data init_data; + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dce110_create(&init_data); + if (!pool->base.irqs) + goto res_create_fail; + } + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->base.pipe_count = res_cap.num_timing_generator; + pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; + dc->caps.max_downscale_ratio = 200; + dc->caps.i2c_speed_in_khz = 40; + dc->caps.i2c_speed_in_khz = 40; + dc->caps.max_cursor_size = 128; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dual_link_dvi = true; + dc->caps.disable_dp_clk_share = true; + dc->caps.extended_aux_timeout_support = false; + dc->debug = debug_defaults; + + for (i = 0; i < pool->base.pipe_count; i++) { + pool->base.timing_generators[i] = + dce100_timing_generator_create( + ctx, + i, + &dce100_tg_offsets[i]); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto res_create_fail; + } + + pool->base.mis[i] = dce100_mem_input_create(ctx, i); + if (pool->base.mis[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create memory input!\n"); + goto res_create_fail; + } + + pool->base.ipps[i] = dce100_ipp_create(ctx, i); + if (pool->base.ipps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create input pixel processor!\n"); + goto res_create_fail; + } + + pool->base.transforms[i] = dce100_transform_create(ctx, i); + if (pool->base.transforms[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create transform!\n"); + goto res_create_fail; + } + + pool->base.opps[i] = dce100_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + goto res_create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dce100_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto res_create_fail; + } + pool->base.hw_i2cs[i] = dce100_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create i2c engine!!\n"); + goto res_create_fail; + } + pool->base.sw_i2cs[i] = NULL; + } + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto res_create_fail; + + /* Create hardware sequencer */ + dce100_hw_sequencer_construct(dc); + return true; + +res_create_fail: + dce100_resource_destruct(pool); + + return false; +} + +struct resource_pool *dce100_create_resource_pool( + uint8_t num_virtual_links, + struct dc *dc) +{ + struct dce110_resource_pool *pool = + kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dce100_resource_construct(num_virtual_links, dc, pool)) + return &pool->base; + + kfree(pool); + BREAK_TO_DEBUGGER(); + return NULL; +} + diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h new file mode 100644 index 0000000000..fecab7c560 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h @@ -0,0 +1,54 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +/* + * dce100_resource.h + * + * Created on: 2016-01-20 + * Author: qyang + */ + +#ifndef DCE100_RESOURCE_H_ +#define DCE100_RESOURCE_H_ + +struct dc; +struct resource_pool; +struct dc_validation_set; + +struct resource_pool *dce100_create_resource_pool( + uint8_t num_virtual_links, + struct dc *dc); + +enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps); + +enum dc_status dce100_add_stream_to_ctx( + struct dc *dc, + struct dc_state *new_ctx, + struct dc_stream_state *dc_stream); + +struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct dc_stream_state *stream); + +#endif /* DCE100_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c new file mode 100644 index 0000000000..fe518fd27b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c @@ -0,0 +1,1551 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" + +#include "link_encoder.h" +#include "stream_encoder.h" + +#include "resource.h" +#include "dce110/dce110_resource.h" +#include "include/irq_service_interface.h" +#include "dce/dce_audio.h" +#include "dce110/dce110_timing_generator.h" +#include "irq/dce110/irq_service_dce110.h" +#include "dce110/dce110_timing_generator_v.h" +#include "dce/dce_link_encoder.h" +#include "dce/dce_stream_encoder.h" +#include "dce/dce_mem_input.h" +#include "dce110/dce110_mem_input_v.h" +#include "dce/dce_ipp.h" +#include "dce/dce_transform.h" +#include "dce110/dce110_transform_v.h" +#include "dce/dce_opp.h" +#include "dce110/dce110_opp_v.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_hwseq.h" +#include "dce110/dce110_hwseq.h" +#include "dce/dce_aux.h" +#include "dce/dce_abm.h" +#include "dce/dce_dmcu.h" +#include "dce/dce_i2c.h" +#include "dce/dce_panel_cntl.h" + +#define DC_LOGGER \ + dc->ctx->logger + +#include "dce110/dce110_compressor.h" + +#include "reg_helper.h" + +#include "dce/dce_11_0_d.h" +#include "dce/dce_11_0_sh_mask.h" + +#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT +#include "gmc/gmc_8_2_d.h" +#include "gmc/gmc_8_2_sh_mask.h" +#endif + +#ifndef mmDP_DPHY_INTERNAL_CTRL + #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7 + #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7 + #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7 + #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7 + #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7 + #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7 + #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7 + #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7 + #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7 + #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7 +#endif + +#ifndef mmBIOS_SCRATCH_2 + #define mmBIOS_SCRATCH_2 0x05CB + #define mmBIOS_SCRATCH_3 0x05CC + #define mmBIOS_SCRATCH_6 0x05CF +#endif + +#ifndef mmDP_DPHY_BS_SR_SWAP_CNTL + #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC + #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC + #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC + #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC + #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC + #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC + #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC + #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC +#endif + +#ifndef mmDP_DPHY_FAST_TRAINING + #define mmDP_DPHY_FAST_TRAINING 0x4ABC + #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC + #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC + #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC + #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC + #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC + #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC + #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC +#endif + +#ifndef DPHY_RX_FAST_TRAINING_CAPABLE + #define DPHY_RX_FAST_TRAINING_CAPABLE 0x1 +#endif + +static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = { + { + .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL), + }, + { + .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL), + }, + { + .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL), + }, + { + .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL), + }, + { + .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL), + }, + { + .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL), + } +}; + +/* set register offset */ +#define SR(reg_name)\ + .reg_name = mm ## reg_name + +/* set register offset with instance */ +#define SRI(reg_name, block, id)\ + .reg_name = mm ## block ## id ## _ ## reg_name + +static const struct dce_dmcu_registers dmcu_regs = { + DMCU_DCE110_COMMON_REG_LIST() +}; + +static const struct dce_dmcu_shift dmcu_shift = { + DMCU_MASK_SH_LIST_DCE110(__SHIFT) +}; + +static const struct dce_dmcu_mask dmcu_mask = { + DMCU_MASK_SH_LIST_DCE110(_MASK) +}; + +static const struct dce_abm_registers abm_regs = { + ABM_DCE110_COMMON_REG_LIST() +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCE110(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCE110(_MASK) +}; + +#define ipp_regs(id)\ +[id] = {\ + IPP_DCE110_REG_LIST_DCE_BASE(id)\ +} + +static const struct dce_ipp_registers ipp_regs[] = { + ipp_regs(0), + ipp_regs(1), + ipp_regs(2) +}; + +static const struct dce_ipp_shift ipp_shift = { + IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) +}; + +static const struct dce_ipp_mask ipp_mask = { + IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) +}; + +#define transform_regs(id)\ +[id] = {\ + XFM_COMMON_REG_LIST_DCE110(id)\ +} + +static const struct dce_transform_registers xfm_regs[] = { + transform_regs(0), + transform_regs(1), + transform_regs(2) +}; + +static const struct dce_transform_shift xfm_shift = { + XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT) +}; + +static const struct dce_transform_mask xfm_mask = { + XFM_COMMON_MASK_SH_LIST_DCE110(_MASK) +}; + +#define aux_regs(id)\ +[id] = {\ + AUX_REG_LIST(id)\ +} + +static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), + aux_regs(2), + aux_regs(3), + aux_regs(4), + aux_regs(5) +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), + hpd_regs(4), + hpd_regs(5) +}; + + +#define link_regs(id)\ +[id] = {\ + LE_DCE110_REG_LIST(id)\ +} + +static const struct dce110_link_enc_registers link_enc_regs[] = { + link_regs(0), + link_regs(1), + link_regs(2), + link_regs(3), + link_regs(4), + link_regs(5), + link_regs(6), +}; + +#define stream_enc_regs(id)\ +[id] = {\ + SE_COMMON_REG_LIST(id),\ + .TMDS_CNTL = 0,\ +} + +static const struct dce110_stream_enc_registers stream_enc_regs[] = { + stream_enc_regs(0), + stream_enc_regs(1), + stream_enc_regs(2) +}; + +static const struct dce_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCE110(__SHIFT) +}; + +static const struct dce_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCE110(_MASK) +}; + +static const struct dce_panel_cntl_registers panel_cntl_regs[] = { + { DCE_PANEL_CNTL_REG_LIST() } +}; + +static const struct dce_panel_cntl_shift panel_cntl_shift = { + DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_panel_cntl_mask panel_cntl_mask = { + DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) +}; + +static const struct dce110_aux_registers_shift aux_shift = { + DCE_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE_AUX_MASK_SH_LIST(_MASK) +}; + +#define opp_regs(id)\ +[id] = {\ + OPP_DCE_110_REG_LIST(id),\ +} + +static const struct dce_opp_registers opp_regs[] = { + opp_regs(0), + opp_regs(1), + opp_regs(2), + opp_regs(3), + opp_regs(4), + opp_regs(5) +}; + +static const struct dce_opp_shift opp_shift = { + OPP_COMMON_MASK_SH_LIST_DCE_110(__SHIFT) +}; + +static const struct dce_opp_mask opp_mask = { + OPP_COMMON_MASK_SH_LIST_DCE_110(_MASK) +}; + +#define aux_engine_regs(id)\ +[id] = {\ + AUX_COMMON_REG_LIST(id), \ + .AUX_RESET_MASK = 0 \ +} + +static const struct dce110_aux_registers aux_engine_regs[] = { + aux_engine_regs(0), + aux_engine_regs(1), + aux_engine_regs(2), + aux_engine_regs(3), + aux_engine_regs(4), + aux_engine_regs(5) +}; + +#define audio_regs(id)\ +[id] = {\ + AUD_COMMON_REG_LIST(id)\ +} + +static const struct dce_audio_registers audio_regs[] = { + audio_regs(0), + audio_regs(1), + audio_regs(2), + audio_regs(3), + audio_regs(4), + audio_regs(5), + audio_regs(6), +}; + +static const struct dce_audio_shift audio_shift = { + AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +/* AG TBD Needs to be reduced back to 3 pipes once dce10 hw sequencer implemented. */ + + +#define clk_src_regs(id)\ +[id] = {\ + CS_COMMON_REG_LIST_DCE_100_110(id),\ +} + +static const struct dce110_clk_src_regs clk_src_regs[] = { + clk_src_regs(0), + clk_src_regs(1), + clk_src_regs(2) +}; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) +}; + +static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, + .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 +}; + +static const struct resource_caps carrizo_resource_cap = { + .num_timing_generator = 3, + .num_video_plane = 1, + .num_audio = 3, + .num_stream_encoder = 3, + .num_pll = 2, + .num_ddc = 3, +}; + +static const struct resource_caps stoney_resource_cap = { + .num_timing_generator = 2, + .num_video_plane = 1, + .num_audio = 3, + .num_stream_encoder = 3, + .num_pll = 2, + .num_ddc = 3, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCE_RGB, + .per_pixel_alpha = 1, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = false, + .fp16 = true + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 1, + .fp16 = 1 + }, + + .max_downscale_factor = { + .argb8888 = 250, + .nv12 = 1, + .fp16 = 1 + }, + 64, + 64 +}; + +static const struct dc_debug_options debug_defaults = { + .enable_legacy_fast_update = true, +}; + +static const struct dc_plane_cap underlay_plane_cap = { + .type = DC_PLANE_TYPE_DCE_UNDERLAY, + .per_pixel_alpha = 1, + + .pixel_format_support = { + .argb8888 = false, + .nv12 = true, + .fp16 = false + }, + + .max_upscale_factor = { + .argb8888 = 1, + .nv12 = 16000, + .fp16 = 1 + }, + + .max_downscale_factor = { + .argb8888 = 1, + .nv12 = 250, + .fp16 = 1 + }, + 64, + 64 +}; + +#define CTX ctx +#define REG(reg) mm ## reg + +#ifndef mmCC_DC_HDMI_STRAPS +#define mmCC_DC_HDMI_STRAPS 0x4819 +#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40 +#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6 +#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700 +#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 +#endif + +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + case TRANSMITTER_UNIPHY_B: + return 1; + case TRANSMITTER_UNIPHY_C: + return 2; + case TRANSMITTER_UNIPHY_D: + return 3; + case TRANSMITTER_UNIPHY_E: + return 4; + case TRANSMITTER_UNIPHY_F: + return 5; + case TRANSMITTER_UNIPHY_G: + return 6; + default: + ASSERT(0); + return 0; + } +} + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + REG_GET_2(CC_DC_HDMI_STRAPS, + HDMI_DISABLE, &straps->hdmi_disable, + AUDIO_STREAM_NUMBER, &straps->audio_stream_number); + + REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio); +} + +static struct audio *create_audio( + struct dc_context *ctx, unsigned int inst) +{ + return dce_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + +static struct timing_generator *dce110_timing_generator_create( + struct dc_context *ctx, + uint32_t instance, + const struct dce110_timing_generator_offsets *offsets) +{ + struct dce110_timing_generator *tg110 = + kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); + + if (!tg110) + return NULL; + + dce110_timing_generator_construct(tg110, ctx, instance, offsets); + return &tg110->base; +} + +static struct stream_encoder *dce110_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dce110_stream_encoder *enc110 = + kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); + + if (!enc110) + return NULL; + + dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, + &stream_enc_regs[eng_id], + &se_shift, &se_mask); + return &enc110->base; +} + +#define SRII(reg_name, block, id)\ + .reg_name[id] = mm ## block ## id ## _ ## reg_name + +static const struct dce_hwseq_registers hwseq_stoney_reg = { + HWSEQ_ST_REG_LIST() +}; + +static const struct dce_hwseq_registers hwseq_cz_reg = { + HWSEQ_CZ_REG_LIST() +}; + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCE11_MASK_SH_LIST(__SHIFT), +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCE11_MASK_SH_LIST(_MASK), +}; + +static struct dce_hwseq *dce110_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + + if (hws) { + hws->ctx = ctx; + hws->regs = ASIC_REV_IS_STONEY(ctx->asic_id.hw_internal_rev) ? + &hwseq_stoney_reg : &hwseq_cz_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + hws->wa.blnd_crtc_trigger = true; + } + return hws; +} + +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = create_audio, + .create_stream_encoder = dce110_stream_encoder_create, + .create_hwseq = dce110_hwseq_create, +}; + +#define mi_inst_regs(id) { \ + MI_DCE11_REG_LIST(id), \ + .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \ +} +static const struct dce_mem_input_registers mi_regs[] = { + mi_inst_regs(0), + mi_inst_regs(1), + mi_inst_regs(2), +}; + +static const struct dce_mem_input_shift mi_shifts = { + MI_DCE11_MASK_SH_LIST(__SHIFT), + .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT +}; + +static const struct dce_mem_input_mask mi_masks = { + MI_DCE11_MASK_SH_LIST(_MASK), + .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK +}; + + +static struct mem_input *dce110_mem_input_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), + GFP_KERNEL); + + if (!dce_mi) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); + dce_mi->wa.single_head_rdreq_dmif_limit = 3; + return &dce_mi->base; +} + +static void dce110_transform_destroy(struct transform **xfm) +{ + kfree(TO_DCE_TRANSFORM(*xfm)); + *xfm = NULL; +} + +static struct transform *dce110_transform_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_transform *transform = + kzalloc(sizeof(struct dce_transform), GFP_KERNEL); + + if (!transform) + return NULL; + + dce_transform_construct(transform, ctx, inst, + &xfm_regs[inst], &xfm_shift, &xfm_mask); + return &transform->base; +} + +static struct input_pixel_processor *dce110_ipp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); + + if (!ipp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dce_ipp_construct(ipp, ctx, inst, + &ipp_regs[inst], &ipp_shift, &ipp_mask); + return &ipp->base; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 300000, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true +}; + +static struct link_encoder *dce110_link_encoder_create( + struct dc_context *ctx, + const struct encoder_init_data *enc_init_data) +{ + struct dce110_link_encoder *enc110 = + kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; + + if (!enc110) + return NULL; + + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + + dce110_link_encoder_construct(enc110, + enc_init_data, + &link_enc_feature, + &link_enc_regs[link_regs_id], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source]); + return &enc110->base; +} + +static struct panel_cntl *dce110_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dce_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dce_panel_cntl_construct(panel_cntl, + init_data, + &panel_cntl_regs[init_data->inst], + &panel_cntl_shift, + &panel_cntl_mask); + + return &panel_cntl->base; +} + +static struct output_pixel_processor *dce110_opp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce110_opp *opp = + kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); + + if (!opp) + return NULL; + + dce110_opp_construct(opp, + ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); + return &opp->base; +} + +static struct dce_aux *dce110_aux_engine_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5), + i2c_inst_regs(6), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) +}; + +static struct dce_i2c_hw *dce110_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dce100_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} +static struct clock_source *dce110_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dce110_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + kfree(clk_src); + BREAK_TO_DEBUGGER(); + return NULL; +} + +static void dce110_clock_source_destroy(struct clock_source **clk_src) +{ + struct dce110_clk_src *dce110_clk_src; + + if (!clk_src) + return; + + dce110_clk_src = TO_DCE110_CLK_SRC(*clk_src); + + kfree(dce110_clk_src->dp_ss_params); + kfree(dce110_clk_src->hdmi_ss_params); + kfree(dce110_clk_src->dvi_ss_params); + + kfree(dce110_clk_src); + *clk_src = NULL; +} + +static void dce110_resource_destruct(struct dce110_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.opps[i] != NULL) + dce110_opp_destroy(&pool->base.opps[i]); + + if (pool->base.transforms[i] != NULL) + dce110_transform_destroy(&pool->base.transforms[i]); + + if (pool->base.ipps[i] != NULL) + dce_ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.mis[i] != NULL) { + kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); + pool->base.mis[i] = NULL; + } + + if (pool->base.timing_generators[i] != NULL) { + kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + if (pool->base.engines[i] != NULL) + dce110_engine_destroy(&pool->base.engines[i]); + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) + kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) { + dce110_clock_source_destroy(&pool->base.clock_sources[i]); + } + } + + if (pool->base.dp_clock_source != NULL) + dce110_clock_source_destroy(&pool->base.dp_clock_source); + + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i] != NULL) { + dce_aud_destroy(&pool->base.audios[i]); + } + } + + if (pool->base.abm != NULL) + dce_abm_destroy(&pool->base.abm); + + if (pool->base.dmcu != NULL) + dce_dmcu_destroy(&pool->base.dmcu); + + if (pool->base.irqs != NULL) { + dal_irq_service_destroy(&pool->base.irqs); + } +} + + +static void get_pixel_clock_parameters( + const struct pipe_ctx *pipe_ctx, + struct pixel_clk_params *pixel_clk_params) +{ + const struct dc_stream_state *stream = pipe_ctx->stream; + + /*TODO: is this halved for YCbCr 420? in that case we might want to move + * the pixel clock normalization for hdmi up to here instead of doing it + * in pll_adjust_pix_clk + */ + pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; + pixel_clk_params->encoder_object_id = stream->link->link_enc->id; + pixel_clk_params->signal_type = pipe_ctx->stream->signal; + pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; + /* TODO: un-hardcode*/ + pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * + LINK_RATE_REF_FREQ_IN_KHZ; + pixel_clk_params->flags.ENABLE_SS = 0; + pixel_clk_params->color_depth = + stream->timing.display_color_depth; + pixel_clk_params->flags.DISPLAY_BLANKED = 1; + pixel_clk_params->flags.SUPPORT_YCBCR420 = (stream->timing.pixel_encoding == + PIXEL_ENCODING_YCBCR420); + pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding; + if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) { + pixel_clk_params->color_depth = COLOR_DEPTH_888; + } + if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) { + pixel_clk_params->requested_pix_clk_100hz = pixel_clk_params->requested_pix_clk_100hz / 2; + } + if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) + pixel_clk_params->requested_pix_clk_100hz *= 2; + +} + +void dce110_resource_build_pipe_hw_param(struct pipe_ctx *pipe_ctx) +{ + get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); + pipe_ctx->clock_source->funcs->get_pix_clk_dividers( + pipe_ctx->clock_source, + &pipe_ctx->stream_res.pix_clk_params, + &pipe_ctx->pll_settings); + resource_build_bit_depth_reduction_params(pipe_ctx->stream, + &pipe_ctx->stream->bit_depth_params); + pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; +} + +static bool is_surface_pixel_format_supported(struct pipe_ctx *pipe_ctx, unsigned int underlay_idx) +{ + if (pipe_ctx->pipe_idx != underlay_idx) + return true; + if (!pipe_ctx->plane_state) + return false; + if (pipe_ctx->plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) + return false; + return true; +} + +static enum dc_status build_mapped_resource( + const struct dc *dc, + struct dc_state *context, + struct dc_stream_state *stream) +{ + struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); + + if (!pipe_ctx) + return DC_ERROR_UNEXPECTED; + + if (!is_surface_pixel_format_supported(pipe_ctx, + dc->res_pool->underlay_pipe_index)) + return DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED; + + dce110_resource_build_pipe_hw_param(pipe_ctx); + + /* TODO: validate audio ASIC caps, encoder */ + + resource_build_info_frame(pipe_ctx); + + return DC_OK; +} + +static bool dce110_validate_bandwidth( + struct dc *dc, + struct dc_state *context, + bool fast_validate) +{ + bool result = false; + + DC_LOG_BANDWIDTH_CALCS( + "%s: start", + __func__); + + if (bw_calcs( + dc->ctx, + dc->bw_dceip, + dc->bw_vbios, + context->res_ctx.pipe_ctx, + dc->res_pool->pipe_count, + &context->bw_ctx.bw.dce)) + result = true; + + if (!result) + DC_LOG_BANDWIDTH_VALIDATION("%s: %dx%d@%d Bandwidth validation failed!\n", + __func__, + context->streams[0]->timing.h_addressable, + context->streams[0]->timing.v_addressable, + context->streams[0]->timing.pix_clk_100hz / 10); + + if (memcmp(&dc->current_state->bw_ctx.bw.dce, + &context->bw_ctx.bw.dce, sizeof(context->bw_ctx.bw.dce))) { + + DC_LOG_BANDWIDTH_CALCS( + "%s: finish,\n" + "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" + "stutMark_b: %d stutMark_a: %d\n" + "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" + "stutMark_b: %d stutMark_a: %d\n" + "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" + "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n" + "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n" + "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n" + , + __func__, + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].b_mark, + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].a_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[0].b_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[0].a_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].b_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].a_mark, + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].b_mark, + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].a_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[1].b_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[1].a_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].b_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].a_mark, + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].b_mark, + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].a_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[2].b_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[2].a_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].b_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].a_mark, + context->bw_ctx.bw.dce.stutter_mode_enable, + context->bw_ctx.bw.dce.cpuc_state_change_enable, + context->bw_ctx.bw.dce.cpup_state_change_enable, + context->bw_ctx.bw.dce.nbp_state_change_enable, + context->bw_ctx.bw.dce.all_displays_in_sync, + context->bw_ctx.bw.dce.dispclk_khz, + context->bw_ctx.bw.dce.sclk_khz, + context->bw_ctx.bw.dce.sclk_deep_sleep_khz, + context->bw_ctx.bw.dce.yclk_khz, + context->bw_ctx.bw.dce.blackout_recovery_time_us); + } + return result; +} + +static enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state, + struct dc_caps *caps) +{ + if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) || + ((plane_state->dst_rect.height * 2) < plane_state->src_rect.height)) + return DC_FAIL_SURFACE_VALIDATE; + + return DC_OK; +} + +static bool dce110_validate_surface_sets( + struct dc_state *context) +{ + int i, j; + + for (i = 0; i < context->stream_count; i++) { + if (context->stream_status[i].plane_count == 0) + continue; + + if (context->stream_status[i].plane_count > 2) + return false; + + for (j = 0; j < context->stream_status[i].plane_count; j++) { + struct dc_plane_state *plane = + context->stream_status[i].plane_states[j]; + + /* underlay validation */ + if (plane->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { + + if ((plane->src_rect.width > 1920 || + plane->src_rect.height > 1080)) + return false; + + /* we don't have the logic to support underlay + * only yet so block the use case where we get + * NV12 plane as top layer + */ + if (j == 0) + return false; + + /* irrespective of plane format, + * stream should be RGB encoded + */ + if (context->streams[i]->timing.pixel_encoding + != PIXEL_ENCODING_RGB) + return false; + + } + + } + } + + return true; +} + +static enum dc_status dce110_validate_global( + struct dc *dc, + struct dc_state *context) +{ + if (!dce110_validate_surface_sets(context)) + return DC_FAIL_SURFACE_VALIDATE; + + return DC_OK; +} + +static enum dc_status dce110_add_stream_to_ctx( + struct dc *dc, + struct dc_state *new_ctx, + struct dc_stream_state *dc_stream) +{ + enum dc_status result = DC_ERROR_UNEXPECTED; + + result = resource_map_pool_resources(dc, new_ctx, dc_stream); + + if (result == DC_OK) + result = resource_map_clock_resources(dc, new_ctx, dc_stream); + + + if (result == DC_OK) + result = build_mapped_resource(dc, new_ctx, dc_stream); + + return result; +} + +static struct pipe_ctx *dce110_acquire_underlay( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *opp_head_pipe) +{ + struct dc_stream_state *stream = opp_head_pipe->stream; + struct dc *dc = stream->ctx->dc; + struct dce_hwseq *hws = dc->hwseq; + struct resource_context *res_ctx = &new_ctx->res_ctx; + unsigned int underlay_idx = pool->underlay_pipe_index; + struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[underlay_idx]; + + if (res_ctx->pipe_ctx[underlay_idx].stream) + return NULL; + + pipe_ctx->stream_res.tg = pool->timing_generators[underlay_idx]; + pipe_ctx->plane_res.mi = pool->mis[underlay_idx]; + /*pipe_ctx->plane_res.ipp = res_ctx->pool->ipps[underlay_idx];*/ + pipe_ctx->plane_res.xfm = pool->transforms[underlay_idx]; + pipe_ctx->stream_res.opp = pool->opps[underlay_idx]; + pipe_ctx->pipe_idx = underlay_idx; + + pipe_ctx->stream = stream; + + if (!dc->current_state->res_ctx.pipe_ctx[underlay_idx].stream) { + struct tg_color black_color = {0}; + struct dc_bios *dcb = dc->ctx->dc_bios; + + hws->funcs.enable_display_power_gating( + dc, + pipe_ctx->stream_res.tg->inst, + dcb, PIPE_GATING_CONTROL_DISABLE); + + /* + * This is for powering on underlay, so crtc does not + * need to be enabled + */ + + pipe_ctx->stream_res.tg->funcs->program_timing(pipe_ctx->stream_res.tg, + &stream->timing, + 0, + 0, + 0, + 0, + pipe_ctx->stream->signal, + false); + + pipe_ctx->stream_res.tg->funcs->enable_advanced_request( + pipe_ctx->stream_res.tg, + true, + &stream->timing); + + pipe_ctx->plane_res.mi->funcs->allocate_mem_input(pipe_ctx->plane_res.mi, + stream->timing.h_total, + stream->timing.v_total, + stream->timing.pix_clk_100hz / 10, + new_ctx->stream_count); + + color_space_to_black_color(dc, + COLOR_SPACE_YCBCR601, &black_color); + pipe_ctx->stream_res.tg->funcs->set_blank_color( + pipe_ctx->stream_res.tg, + &black_color); + } + + return pipe_ctx; +} + +static void dce110_destroy_resource_pool(struct resource_pool **pool) +{ + struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); + + dce110_resource_destruct(dce110_pool); + kfree(dce110_pool); + *pool = NULL; +} + +struct stream_encoder *dce110_find_first_free_match_stream_enc_for_link( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct dc_stream_state *stream) +{ + int i; + int j = -1; + struct dc_link *link = stream->link; + + for (i = 0; i < pool->stream_enc_count; i++) { + if (!res_ctx->is_stream_enc_acquired[i] && + pool->stream_enc[i]) { + /* Store first available for MST second display + * in daisy chain use case + */ + j = i; + if (pool->stream_enc[i]->id == + link->link_enc->preferred_engine) + return pool->stream_enc[i]; + } + } + + /* + * For CZ and later, we can allow DIG FE and BE to differ for all display types + */ + + if (j >= 0) + return pool->stream_enc[j]; + + return NULL; +} + + +static const struct resource_funcs dce110_res_pool_funcs = { + .destroy = dce110_destroy_resource_pool, + .link_enc_create = dce110_link_encoder_create, + .panel_cntl_create = dce110_panel_cntl_create, + .validate_bandwidth = dce110_validate_bandwidth, + .validate_plane = dce110_validate_plane, + .acquire_free_pipe_as_secondary_dpp_pipe = dce110_acquire_underlay, + .add_stream_to_ctx = dce110_add_stream_to_ctx, + .validate_global = dce110_validate_global, + .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link +}; + +static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool) +{ + struct dce110_timing_generator *dce110_tgv = kzalloc(sizeof(*dce110_tgv), + GFP_KERNEL); + struct dce_transform *dce110_xfmv = kzalloc(sizeof(*dce110_xfmv), + GFP_KERNEL); + struct dce_mem_input *dce110_miv = kzalloc(sizeof(*dce110_miv), + GFP_KERNEL); + struct dce110_opp *dce110_oppv = kzalloc(sizeof(*dce110_oppv), + GFP_KERNEL); + + if (!dce110_tgv || !dce110_xfmv || !dce110_miv || !dce110_oppv) { + kfree(dce110_tgv); + kfree(dce110_xfmv); + kfree(dce110_miv); + kfree(dce110_oppv); + return false; + } + + dce110_opp_v_construct(dce110_oppv, ctx); + + dce110_timing_generator_v_construct(dce110_tgv, ctx); + dce110_mem_input_v_construct(dce110_miv, ctx); + dce110_transform_v_construct(dce110_xfmv, ctx); + + pool->opps[pool->pipe_count] = &dce110_oppv->base; + pool->timing_generators[pool->pipe_count] = &dce110_tgv->base; + pool->mis[pool->pipe_count] = &dce110_miv->base; + pool->transforms[pool->pipe_count] = &dce110_xfmv->base; + pool->pipe_count++; + + /* update the public caps to indicate an underlay is available */ + ctx->dc->caps.max_slave_planes = 1; + ctx->dc->caps.max_slave_yuv_planes = 1; + ctx->dc->caps.max_slave_rgb_planes = 0; + + return true; +} + +static void bw_calcs_data_update_from_pplib(struct dc *dc) +{ + struct dm_pp_clock_levels clks = {0}; + + /*do system clock*/ + dm_pp_get_clock_levels_by_type( + dc->ctx, + DM_PP_CLOCK_TYPE_ENGINE_CLK, + &clks); + /* convert all the clock fro kHz to fix point mHz */ + dc->bw_vbios->high_sclk = bw_frc_to_fixed( + clks.clocks_in_khz[clks.num_levels-1], 1000); + dc->bw_vbios->mid1_sclk = bw_frc_to_fixed( + clks.clocks_in_khz[clks.num_levels/8], 1000); + dc->bw_vbios->mid2_sclk = bw_frc_to_fixed( + clks.clocks_in_khz[clks.num_levels*2/8], 1000); + dc->bw_vbios->mid3_sclk = bw_frc_to_fixed( + clks.clocks_in_khz[clks.num_levels*3/8], 1000); + dc->bw_vbios->mid4_sclk = bw_frc_to_fixed( + clks.clocks_in_khz[clks.num_levels*4/8], 1000); + dc->bw_vbios->mid5_sclk = bw_frc_to_fixed( + clks.clocks_in_khz[clks.num_levels*5/8], 1000); + dc->bw_vbios->mid6_sclk = bw_frc_to_fixed( + clks.clocks_in_khz[clks.num_levels*6/8], 1000); + dc->bw_vbios->low_sclk = bw_frc_to_fixed( + clks.clocks_in_khz[0], 1000); + dc->sclk_lvls = clks; + + /*do display clock*/ + dm_pp_get_clock_levels_by_type( + dc->ctx, + DM_PP_CLOCK_TYPE_DISPLAY_CLK, + &clks); + dc->bw_vbios->high_voltage_max_dispclk = bw_frc_to_fixed( + clks.clocks_in_khz[clks.num_levels-1], 1000); + dc->bw_vbios->mid_voltage_max_dispclk = bw_frc_to_fixed( + clks.clocks_in_khz[clks.num_levels>>1], 1000); + dc->bw_vbios->low_voltage_max_dispclk = bw_frc_to_fixed( + clks.clocks_in_khz[0], 1000); + + /*do memory clock*/ + dm_pp_get_clock_levels_by_type( + dc->ctx, + DM_PP_CLOCK_TYPE_MEMORY_CLK, + &clks); + + dc->bw_vbios->low_yclk = bw_frc_to_fixed( + clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); + dc->bw_vbios->mid_yclk = bw_frc_to_fixed( + clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ, + 1000); + dc->bw_vbios->high_yclk = bw_frc_to_fixed( + clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ, + 1000); +} + +static const struct resource_caps *dce110_resource_cap( + struct hw_asic_id *asic_id) +{ + if (ASIC_REV_IS_STONEY(asic_id->hw_internal_rev)) + return &stoney_resource_cap; + else + return &carrizo_resource_cap; +} + +static bool dce110_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dce110_resource_pool *pool, + struct hw_asic_id asic_id) +{ + unsigned int i; + struct dc_context *ctx = dc->ctx; + struct dc_bios *bp; + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = dce110_resource_cap(&ctx->asic_id); + pool->base.funcs = &dce110_res_pool_funcs; + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + + pool->base.pipe_count = pool->base.res_cap->num_timing_generator; + pool->base.underlay_pipe_index = pool->base.pipe_count; + pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; + dc->caps.max_downscale_ratio = 150; + dc->caps.i2c_speed_in_khz = 40; + dc->caps.i2c_speed_in_khz_hdcp = 40; + dc->caps.max_cursor_size = 128; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.is_apu = true; + dc->caps.extended_aux_timeout_support = false; + dc->debug = debug_defaults; + + /************************************************* + * Create resources * + *************************************************/ + + bp = ctx->dc_bios; + + if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { + pool->base.dp_clock_source = + dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); + + pool->base.clock_sources[0] = + dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, + &clk_src_regs[0], false); + pool->base.clock_sources[1] = + dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, + &clk_src_regs[1], false); + + pool->base.clk_src_count = 2; + + /* TODO: find out if CZ support 3 PLLs */ + } + + if (pool->base.dp_clock_source == NULL) { + dm_error("DC: failed to create dp clock source!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + } + + pool->base.dmcu = dce_dmcu_create(ctx, + &dmcu_regs, + &dmcu_shift, + &dmcu_mask); + if (pool->base.dmcu == NULL) { + dm_error("DC: failed to create dmcu!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + pool->base.abm = dce_abm_create(ctx, + &abm_regs, + &abm_shift, + &abm_mask); + if (pool->base.abm == NULL) { + dm_error("DC: failed to create abm!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + { + struct irq_service_init_data init_data; + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dce110_create(&init_data); + if (!pool->base.irqs) + goto res_create_fail; + } + + for (i = 0; i < pool->base.pipe_count; i++) { + pool->base.timing_generators[i] = dce110_timing_generator_create( + ctx, i, &dce110_tg_offsets[i]); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto res_create_fail; + } + + pool->base.mis[i] = dce110_mem_input_create(ctx, i); + if (pool->base.mis[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create memory input!\n"); + goto res_create_fail; + } + + pool->base.ipps[i] = dce110_ipp_create(ctx, i); + if (pool->base.ipps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create input pixel processor!\n"); + goto res_create_fail; + } + + pool->base.transforms[i] = dce110_transform_create(ctx, i); + if (pool->base.transforms[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create transform!\n"); + goto res_create_fail; + } + + pool->base.opps[i] = dce110_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + goto res_create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dce110_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto res_create_fail; + } + pool->base.hw_i2cs[i] = dce110_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create i2c engine!!\n"); + goto res_create_fail; + } + pool->base.sw_i2cs[i] = NULL; + } + + if (dc->config.fbc_support) + dc->fbc_compressor = dce110_compressor_create(ctx); + + if (!underlay_create(ctx, &pool->base)) + goto res_create_fail; + + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto res_create_fail; + + /* Create hardware sequencer */ + dce110_hw_sequencer_construct(dc); + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < pool->base.underlay_pipe_index; ++i) + dc->caps.planes[i] = plane_cap; + + dc->caps.planes[pool->base.underlay_pipe_index] = underlay_plane_cap; + + bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id); + + bw_calcs_data_update_from_pplib(dc); + + return true; + +res_create_fail: + dce110_resource_destruct(pool); + return false; +} + +struct resource_pool *dce110_create_resource_pool( + uint8_t num_virtual_links, + struct dc *dc, + struct hw_asic_id asic_id) +{ + struct dce110_resource_pool *pool = + kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dce110_resource_construct(num_virtual_links, dc, pool, asic_id)) + return &pool->base; + + kfree(pool); + BREAK_TO_DEBUGGER(); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.h new file mode 100644 index 0000000000..aa4531e080 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.h @@ -0,0 +1,54 @@ +/* +* Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_RESOURCE_DCE110_H__ +#define __DC_RESOURCE_DCE110_H__ + +#include "core_types.h" + +struct dc; +struct resource_pool; + +#define TO_DCE110_RES_POOL(pool)\ + container_of(pool, struct dce110_resource_pool, base) + +struct dce110_resource_pool { + struct resource_pool base; +}; + +void dce110_resource_build_pipe_hw_param(struct pipe_ctx *pipe_ctx); + +struct resource_pool *dce110_create_resource_pool( + uint8_t num_virtual_links, + struct dc *dc, + struct hw_asic_id asic_id); + +struct stream_encoder *dce110_find_first_free_match_stream_enc_for_link( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct dc_stream_state *stream); + +#endif /* __DC_RESOURCE_DCE110_H__ */ + diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c new file mode 100644 index 0000000000..d1edac46c9 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c @@ -0,0 +1,1431 @@ +/* +* Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" + +#include "link_encoder.h" +#include "stream_encoder.h" + +#include "resource.h" +#include "include/irq_service_interface.h" +#include "dce110/dce110_resource.h" +#include "dce110/dce110_timing_generator.h" + +#include "irq/dce110/irq_service_dce110.h" +#include "dce/dce_mem_input.h" +#include "dce/dce_transform.h" +#include "dce/dce_link_encoder.h" +#include "dce/dce_stream_encoder.h" +#include "dce/dce_audio.h" +#include "dce/dce_opp.h" +#include "dce/dce_ipp.h" +#include "dce/dce_clock_source.h" + +#include "dce/dce_hwseq.h" +#include "dce112/dce112_hwseq.h" +#include "dce/dce_abm.h" +#include "dce/dce_dmcu.h" +#include "dce/dce_aux.h" +#include "dce/dce_i2c.h" +#include "dce/dce_panel_cntl.h" + +#include "reg_helper.h" + +#include "dce/dce_11_2_d.h" +#include "dce/dce_11_2_sh_mask.h" + +#include "dce100/dce100_resource.h" +#include "dce112_resource.h" + +#define DC_LOGGER \ + dc->ctx->logger + +#ifndef mmDP_DPHY_INTERNAL_CTRL + #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7 + #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7 + #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7 + #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7 + #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7 + #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7 + #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7 + #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7 + #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7 + #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7 +#endif + +#ifndef mmBIOS_SCRATCH_2 + #define mmBIOS_SCRATCH_2 0x05CB + #define mmBIOS_SCRATCH_3 0x05CC + #define mmBIOS_SCRATCH_6 0x05CF +#endif + +#ifndef mmDP_DPHY_BS_SR_SWAP_CNTL + #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC + #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC + #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC + #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC + #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC + #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC + #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC + #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC +#endif + +#ifndef mmDP_DPHY_FAST_TRAINING + #define mmDP_DPHY_FAST_TRAINING 0x4ABC + #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC + #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC + #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC + #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC + #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC + #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC + #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC +#endif + +enum dce112_clk_src_array_id { + DCE112_CLK_SRC_PLL0, + DCE112_CLK_SRC_PLL1, + DCE112_CLK_SRC_PLL2, + DCE112_CLK_SRC_PLL3, + DCE112_CLK_SRC_PLL4, + DCE112_CLK_SRC_PLL5, + + DCE112_CLK_SRC_TOTAL +}; + +static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = { + { + .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL), + }, + { + .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL), + }, + { + .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL), + }, + { + .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL), + }, + { + .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL), + }, + { + .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL), + } +}; + +/* set register offset */ +#define SR(reg_name)\ + .reg_name = mm ## reg_name + +/* set register offset with instance */ +#define SRI(reg_name, block, id)\ + .reg_name = mm ## block ## id ## _ ## reg_name + +static const struct dce_dmcu_registers dmcu_regs = { + DMCU_DCE110_COMMON_REG_LIST() +}; + +static const struct dce_dmcu_shift dmcu_shift = { + DMCU_MASK_SH_LIST_DCE110(__SHIFT) +}; + +static const struct dce_dmcu_mask dmcu_mask = { + DMCU_MASK_SH_LIST_DCE110(_MASK) +}; + +static const struct dce_abm_registers abm_regs = { + ABM_DCE110_COMMON_REG_LIST() +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCE110(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCE110(_MASK) +}; + +static const struct dce110_aux_registers_shift aux_shift = { + DCE_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE_AUX_MASK_SH_LIST(_MASK) +}; + +#define ipp_regs(id)\ +[id] = {\ + IPP_DCE110_REG_LIST_DCE_BASE(id)\ +} + +static const struct dce_ipp_registers ipp_regs[] = { + ipp_regs(0), + ipp_regs(1), + ipp_regs(2), + ipp_regs(3), + ipp_regs(4), + ipp_regs(5) +}; + +static const struct dce_ipp_shift ipp_shift = { + IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) +}; + +static const struct dce_ipp_mask ipp_mask = { + IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) +}; + +#define transform_regs(id)\ +[id] = {\ + XFM_COMMON_REG_LIST_DCE110(id)\ +} + +static const struct dce_transform_registers xfm_regs[] = { + transform_regs(0), + transform_regs(1), + transform_regs(2), + transform_regs(3), + transform_regs(4), + transform_regs(5) +}; + +static const struct dce_transform_shift xfm_shift = { + XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT) +}; + +static const struct dce_transform_mask xfm_mask = { + XFM_COMMON_MASK_SH_LIST_DCE110(_MASK) +}; + +#define aux_regs(id)\ +[id] = {\ + AUX_REG_LIST(id)\ +} + +static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), + aux_regs(2), + aux_regs(3), + aux_regs(4), + aux_regs(5) +}; + +static const struct dce_panel_cntl_registers panel_cntl_regs[] = { + { DCE_PANEL_CNTL_REG_LIST() } +}; + +static const struct dce_panel_cntl_shift panel_cntl_shift = { + DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_panel_cntl_mask panel_cntl_mask = { + DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), + hpd_regs(4), + hpd_regs(5) +}; + +#define link_regs(id)\ +[id] = {\ + LE_DCE110_REG_LIST(id)\ +} + +static const struct dce110_link_enc_registers link_enc_regs[] = { + link_regs(0), + link_regs(1), + link_regs(2), + link_regs(3), + link_regs(4), + link_regs(5), + link_regs(6), +}; + +#define stream_enc_regs(id)\ +[id] = {\ + SE_COMMON_REG_LIST(id),\ + .TMDS_CNTL = 0,\ +} + +static const struct dce110_stream_enc_registers stream_enc_regs[] = { + stream_enc_regs(0), + stream_enc_regs(1), + stream_enc_regs(2), + stream_enc_regs(3), + stream_enc_regs(4), + stream_enc_regs(5) +}; + +static const struct dce_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCE112(__SHIFT) +}; + +static const struct dce_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCE112(_MASK) +}; + +#define opp_regs(id)\ +[id] = {\ + OPP_DCE_112_REG_LIST(id),\ +} + +static const struct dce_opp_registers opp_regs[] = { + opp_regs(0), + opp_regs(1), + opp_regs(2), + opp_regs(3), + opp_regs(4), + opp_regs(5) +}; + +static const struct dce_opp_shift opp_shift = { + OPP_COMMON_MASK_SH_LIST_DCE_112(__SHIFT) +}; + +static const struct dce_opp_mask opp_mask = { + OPP_COMMON_MASK_SH_LIST_DCE_112(_MASK) +}; + +#define aux_engine_regs(id)\ +[id] = {\ + AUX_COMMON_REG_LIST(id), \ + .AUX_RESET_MASK = 0 \ +} + +static const struct dce110_aux_registers aux_engine_regs[] = { + aux_engine_regs(0), + aux_engine_regs(1), + aux_engine_regs(2), + aux_engine_regs(3), + aux_engine_regs(4), + aux_engine_regs(5) +}; + +#define audio_regs(id)\ +[id] = {\ + AUD_COMMON_REG_LIST(id)\ +} + +static const struct dce_audio_registers audio_regs[] = { + audio_regs(0), + audio_regs(1), + audio_regs(2), + audio_regs(3), + audio_regs(4), + audio_regs(5) +}; + +static const struct dce_audio_shift audio_shift = { + AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +#define clk_src_regs(index, id)\ +[index] = {\ + CS_COMMON_REG_LIST_DCE_112(id),\ +} + +static const struct dce110_clk_src_regs clk_src_regs[] = { + clk_src_regs(0, A), + clk_src_regs(1, B), + clk_src_regs(2, C), + clk_src_regs(3, D), + clk_src_regs(4, E), + clk_src_regs(5, F) +}; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCE_112(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCE_112(_MASK) +}; + +static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, + .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 +}; + +static const struct resource_caps polaris_10_resource_cap = { + .num_timing_generator = 6, + .num_audio = 6, + .num_stream_encoder = 6, + .num_pll = 8, /* why 8? 6 combo PHY PLL + 2 regular PLLs? */ + .num_ddc = 6, +}; + +static const struct resource_caps polaris_11_resource_cap = { + .num_timing_generator = 5, + .num_audio = 5, + .num_stream_encoder = 5, + .num_pll = 8, /* why 8? 6 combo PHY PLL + 2 regular PLLs? */ + .num_ddc = 5, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCE_RGB, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = false, + .fp16 = true + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 1, + .fp16 = 1 + }, + + .max_downscale_factor = { + .argb8888 = 250, + .nv12 = 1, + .fp16 = 1 + }, + 64, + 64 +}; + +static const struct dc_debug_options debug_defaults = { + .enable_legacy_fast_update = true, +}; + +#define CTX ctx +#define REG(reg) mm ## reg + +#ifndef mmCC_DC_HDMI_STRAPS +#define mmCC_DC_HDMI_STRAPS 0x4819 +#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40 +#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6 +#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700 +#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 +#endif + +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + case TRANSMITTER_UNIPHY_B: + return 1; + case TRANSMITTER_UNIPHY_C: + return 2; + case TRANSMITTER_UNIPHY_D: + return 3; + case TRANSMITTER_UNIPHY_E: + return 4; + case TRANSMITTER_UNIPHY_F: + return 5; + case TRANSMITTER_UNIPHY_G: + return 6; + default: + ASSERT(0); + return 0; + } +} + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + REG_GET_2(CC_DC_HDMI_STRAPS, + HDMI_DISABLE, &straps->hdmi_disable, + AUDIO_STREAM_NUMBER, &straps->audio_stream_number); + + REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio); +} + +static struct audio *create_audio( + struct dc_context *ctx, unsigned int inst) +{ + return dce_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + + +static struct timing_generator *dce112_timing_generator_create( + struct dc_context *ctx, + uint32_t instance, + const struct dce110_timing_generator_offsets *offsets) +{ + struct dce110_timing_generator *tg110 = + kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); + + if (!tg110) + return NULL; + + dce110_timing_generator_construct(tg110, ctx, instance, offsets); + return &tg110->base; +} + +static struct stream_encoder *dce112_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dce110_stream_encoder *enc110 = + kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); + + if (!enc110) + return NULL; + + dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, + &stream_enc_regs[eng_id], + &se_shift, &se_mask); + return &enc110->base; +} + +#define SRII(reg_name, block, id)\ + .reg_name[id] = mm ## block ## id ## _ ## reg_name + +static const struct dce_hwseq_registers hwseq_reg = { + HWSEQ_DCE112_REG_LIST() +}; + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCE112_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCE112_MASK_SH_LIST(_MASK) +}; + +static struct dce_hwseq *dce112_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + } + return hws; +} + +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = create_audio, + .create_stream_encoder = dce112_stream_encoder_create, + .create_hwseq = dce112_hwseq_create, +}; + +#define mi_inst_regs(id) { MI_DCE11_2_REG_LIST(id) } +static const struct dce_mem_input_registers mi_regs[] = { + mi_inst_regs(0), + mi_inst_regs(1), + mi_inst_regs(2), + mi_inst_regs(3), + mi_inst_regs(4), + mi_inst_regs(5), +}; + +static const struct dce_mem_input_shift mi_shifts = { + MI_DCE11_2_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_mem_input_mask mi_masks = { + MI_DCE11_2_MASK_SH_LIST(_MASK) +}; + +static struct mem_input *dce112_mem_input_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), + GFP_KERNEL); + + if (!dce_mi) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dce112_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); + return &dce_mi->base; +} + +static void dce112_transform_destroy(struct transform **xfm) +{ + kfree(TO_DCE_TRANSFORM(*xfm)); + *xfm = NULL; +} + +static struct transform *dce112_transform_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_transform *transform = + kzalloc(sizeof(struct dce_transform), GFP_KERNEL); + + if (!transform) + return NULL; + + dce_transform_construct(transform, ctx, inst, + &xfm_regs[inst], &xfm_shift, &xfm_mask); + transform->lb_memory_size = 0x1404; /*5124*/ + return &transform->base; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = false, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + +static struct link_encoder *dce112_link_encoder_create( + struct dc_context *ctx, + const struct encoder_init_data *enc_init_data) +{ + struct dce110_link_encoder *enc110 = + kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; + + if (!enc110) + return NULL; + + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + + dce110_link_encoder_construct(enc110, + enc_init_data, + &link_enc_feature, + &link_enc_regs[link_regs_id], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source]); + return &enc110->base; +} + +static struct panel_cntl *dce112_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dce_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dce_panel_cntl_construct(panel_cntl, + init_data, + &panel_cntl_regs[init_data->inst], + &panel_cntl_shift, + &panel_cntl_mask); + + return &panel_cntl->base; +} + +static struct input_pixel_processor *dce112_ipp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); + + if (!ipp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dce_ipp_construct(ipp, ctx, inst, + &ipp_regs[inst], &ipp_shift, &ipp_mask); + return &ipp->base; +} + +static struct output_pixel_processor *dce112_opp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce110_opp *opp = + kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); + + if (!opp) + return NULL; + + dce110_opp_construct(opp, + ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); + return &opp->base; +} + +static struct dce_aux *dce112_aux_engine_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5), + i2c_inst_regs(6), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) +}; + +static struct dce_i2c_hw *dce112_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dce112_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} +static struct clock_source *dce112_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dce112_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + kfree(clk_src); + BREAK_TO_DEBUGGER(); + return NULL; +} + +static void dce112_clock_source_destroy(struct clock_source **clk_src) +{ + kfree(TO_DCE110_CLK_SRC(*clk_src)); + *clk_src = NULL; +} + +static void dce112_resource_destruct(struct dce110_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.opps[i] != NULL) + dce110_opp_destroy(&pool->base.opps[i]); + + if (pool->base.transforms[i] != NULL) + dce112_transform_destroy(&pool->base.transforms[i]); + + if (pool->base.ipps[i] != NULL) + dce_ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.mis[i] != NULL) { + kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); + pool->base.mis[i] = NULL; + } + + if (pool->base.timing_generators[i] != NULL) { + kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + if (pool->base.engines[i] != NULL) + dce110_engine_destroy(&pool->base.engines[i]); + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) + kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) { + dce112_clock_source_destroy(&pool->base.clock_sources[i]); + } + } + + if (pool->base.dp_clock_source != NULL) + dce112_clock_source_destroy(&pool->base.dp_clock_source); + + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i] != NULL) { + dce_aud_destroy(&pool->base.audios[i]); + } + } + + if (pool->base.abm != NULL) + dce_abm_destroy(&pool->base.abm); + + if (pool->base.dmcu != NULL) + dce_dmcu_destroy(&pool->base.dmcu); + + if (pool->base.irqs != NULL) { + dal_irq_service_destroy(&pool->base.irqs); + } +} + +static struct clock_source *find_matching_pll( + struct resource_context *res_ctx, + const struct resource_pool *pool, + const struct dc_stream_state *const stream) +{ + switch (stream->link->link_enc->transmitter) { + case TRANSMITTER_UNIPHY_A: + return pool->clock_sources[DCE112_CLK_SRC_PLL0]; + case TRANSMITTER_UNIPHY_B: + return pool->clock_sources[DCE112_CLK_SRC_PLL1]; + case TRANSMITTER_UNIPHY_C: + return pool->clock_sources[DCE112_CLK_SRC_PLL2]; + case TRANSMITTER_UNIPHY_D: + return pool->clock_sources[DCE112_CLK_SRC_PLL3]; + case TRANSMITTER_UNIPHY_E: + return pool->clock_sources[DCE112_CLK_SRC_PLL4]; + case TRANSMITTER_UNIPHY_F: + return pool->clock_sources[DCE112_CLK_SRC_PLL5]; + default: + return NULL; + } + + return NULL; +} + +static enum dc_status build_mapped_resource( + const struct dc *dc, + struct dc_state *context, + struct dc_stream_state *stream) +{ + struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); + + if (!pipe_ctx) + return DC_ERROR_UNEXPECTED; + + dce110_resource_build_pipe_hw_param(pipe_ctx); + + resource_build_info_frame(pipe_ctx); + + return DC_OK; +} + +bool dce112_validate_bandwidth( + struct dc *dc, + struct dc_state *context, + bool fast_validate) +{ + bool result = false; + + DC_LOG_BANDWIDTH_CALCS( + "%s: start", + __func__); + + if (bw_calcs( + dc->ctx, + dc->bw_dceip, + dc->bw_vbios, + context->res_ctx.pipe_ctx, + dc->res_pool->pipe_count, + &context->bw_ctx.bw.dce)) + result = true; + + if (!result) + DC_LOG_BANDWIDTH_VALIDATION( + "%s: Bandwidth validation failed!", + __func__); + + if (memcmp(&dc->current_state->bw_ctx.bw.dce, + &context->bw_ctx.bw.dce, sizeof(context->bw_ctx.bw.dce))) { + + DC_LOG_BANDWIDTH_CALCS( + "%s: finish,\n" + "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" + "stutMark_b: %d stutMark_a: %d\n" + "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" + "stutMark_b: %d stutMark_a: %d\n" + "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n" + "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n" + "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n" + "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n" + , + __func__, + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].b_mark, + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].a_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[0].b_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[0].a_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].b_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].a_mark, + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].b_mark, + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].a_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[1].b_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[1].a_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].b_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].a_mark, + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].b_mark, + context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].a_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[2].b_mark, + context->bw_ctx.bw.dce.urgent_wm_ns[2].a_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].b_mark, + context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].a_mark, + context->bw_ctx.bw.dce.stutter_mode_enable, + context->bw_ctx.bw.dce.cpuc_state_change_enable, + context->bw_ctx.bw.dce.cpup_state_change_enable, + context->bw_ctx.bw.dce.nbp_state_change_enable, + context->bw_ctx.bw.dce.all_displays_in_sync, + context->bw_ctx.bw.dce.dispclk_khz, + context->bw_ctx.bw.dce.sclk_khz, + context->bw_ctx.bw.dce.sclk_deep_sleep_khz, + context->bw_ctx.bw.dce.yclk_khz, + context->bw_ctx.bw.dce.blackout_recovery_time_us); + } + return result; +} + +enum dc_status resource_map_phy_clock_resources( + const struct dc *dc, + struct dc_state *context, + struct dc_stream_state *stream) +{ + + /* acquire new resources */ + struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream( + &context->res_ctx, stream); + + if (!pipe_ctx) + return DC_ERROR_UNEXPECTED; + + if (dc_is_dp_signal(pipe_ctx->stream->signal) + || dc_is_virtual_signal(pipe_ctx->stream->signal)) + pipe_ctx->clock_source = + dc->res_pool->dp_clock_source; + else { + if (stream && stream->link && stream->link->link_enc) + pipe_ctx->clock_source = find_matching_pll( + &context->res_ctx, dc->res_pool, + stream); + } + + if (pipe_ctx->clock_source == NULL) + return DC_NO_CLOCK_SOURCE_RESOURCE; + + resource_reference_clock_source( + &context->res_ctx, + dc->res_pool, + pipe_ctx->clock_source); + + return DC_OK; +} + +static bool dce112_validate_surface_sets( + struct dc_state *context) +{ + int i; + + for (i = 0; i < context->stream_count; i++) { + if (context->stream_status[i].plane_count == 0) + continue; + + if (context->stream_status[i].plane_count > 1) + return false; + + if (context->stream_status[i].plane_states[0]->format + >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) + return false; + } + + return true; +} + +enum dc_status dce112_add_stream_to_ctx( + struct dc *dc, + struct dc_state *new_ctx, + struct dc_stream_state *dc_stream) +{ + enum dc_status result; + + result = resource_map_pool_resources(dc, new_ctx, dc_stream); + + if (result == DC_OK) + result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream); + + + if (result == DC_OK) + result = build_mapped_resource(dc, new_ctx, dc_stream); + + return result; +} + +static enum dc_status dce112_validate_global( + struct dc *dc, + struct dc_state *context) +{ + if (!dce112_validate_surface_sets(context)) + return DC_FAIL_SURFACE_VALIDATE; + + return DC_OK; +} + +static void dce112_destroy_resource_pool(struct resource_pool **pool) +{ + struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); + + dce112_resource_destruct(dce110_pool); + kfree(dce110_pool); + *pool = NULL; +} + +static const struct resource_funcs dce112_res_pool_funcs = { + .destroy = dce112_destroy_resource_pool, + .link_enc_create = dce112_link_encoder_create, + .panel_cntl_create = dce112_panel_cntl_create, + .validate_bandwidth = dce112_validate_bandwidth, + .validate_plane = dce100_validate_plane, + .add_stream_to_ctx = dce112_add_stream_to_ctx, + .validate_global = dce112_validate_global, + .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link +}; + +static void bw_calcs_data_update_from_pplib(struct dc *dc) +{ + struct dm_pp_clock_levels_with_latency eng_clks = {0}; + struct dm_pp_clock_levels_with_latency mem_clks = {0}; + struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0}; + struct dm_pp_clock_levels clks = {0}; + int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ; + + if (dc->bw_vbios && dc->bw_vbios->memory_type == bw_def_hbm) + memory_type_multiplier = MEMORY_TYPE_HBM; + + /*do system clock TODO PPLIB: after PPLIB implement, + * then remove old way + */ + if (!dm_pp_get_clock_levels_by_type_with_latency( + dc->ctx, + DM_PP_CLOCK_TYPE_ENGINE_CLK, + &eng_clks)) { + + /* This is only for temporary */ + dm_pp_get_clock_levels_by_type( + dc->ctx, + DM_PP_CLOCK_TYPE_ENGINE_CLK, + &clks); + /* convert all the clock fro kHz to fix point mHz */ + dc->bw_vbios->high_sclk = bw_frc_to_fixed( + clks.clocks_in_khz[clks.num_levels-1], 1000); + dc->bw_vbios->mid1_sclk = bw_frc_to_fixed( + clks.clocks_in_khz[clks.num_levels/8], 1000); + dc->bw_vbios->mid2_sclk = bw_frc_to_fixed( + clks.clocks_in_khz[clks.num_levels*2/8], 1000); + dc->bw_vbios->mid3_sclk = bw_frc_to_fixed( + clks.clocks_in_khz[clks.num_levels*3/8], 1000); + dc->bw_vbios->mid4_sclk = bw_frc_to_fixed( + clks.clocks_in_khz[clks.num_levels*4/8], 1000); + dc->bw_vbios->mid5_sclk = bw_frc_to_fixed( + clks.clocks_in_khz[clks.num_levels*5/8], 1000); + dc->bw_vbios->mid6_sclk = bw_frc_to_fixed( + clks.clocks_in_khz[clks.num_levels*6/8], 1000); + dc->bw_vbios->low_sclk = bw_frc_to_fixed( + clks.clocks_in_khz[0], 1000); + + /*do memory clock*/ + dm_pp_get_clock_levels_by_type( + dc->ctx, + DM_PP_CLOCK_TYPE_MEMORY_CLK, + &clks); + + dc->bw_vbios->low_yclk = bw_frc_to_fixed( + clks.clocks_in_khz[0] * memory_type_multiplier, 1000); + dc->bw_vbios->mid_yclk = bw_frc_to_fixed( + clks.clocks_in_khz[clks.num_levels>>1] * memory_type_multiplier, + 1000); + dc->bw_vbios->high_yclk = bw_frc_to_fixed( + clks.clocks_in_khz[clks.num_levels-1] * memory_type_multiplier, + 1000); + + return; + } + + /* convert all the clock fro kHz to fix point mHz TODO: wloop data */ + dc->bw_vbios->high_sclk = bw_frc_to_fixed( + eng_clks.data[eng_clks.num_levels-1].clocks_in_khz, 1000); + dc->bw_vbios->mid1_sclk = bw_frc_to_fixed( + eng_clks.data[eng_clks.num_levels/8].clocks_in_khz, 1000); + dc->bw_vbios->mid2_sclk = bw_frc_to_fixed( + eng_clks.data[eng_clks.num_levels*2/8].clocks_in_khz, 1000); + dc->bw_vbios->mid3_sclk = bw_frc_to_fixed( + eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz, 1000); + dc->bw_vbios->mid4_sclk = bw_frc_to_fixed( + eng_clks.data[eng_clks.num_levels*4/8].clocks_in_khz, 1000); + dc->bw_vbios->mid5_sclk = bw_frc_to_fixed( + eng_clks.data[eng_clks.num_levels*5/8].clocks_in_khz, 1000); + dc->bw_vbios->mid6_sclk = bw_frc_to_fixed( + eng_clks.data[eng_clks.num_levels*6/8].clocks_in_khz, 1000); + dc->bw_vbios->low_sclk = bw_frc_to_fixed( + eng_clks.data[0].clocks_in_khz, 1000); + + /*do memory clock*/ + dm_pp_get_clock_levels_by_type_with_latency( + dc->ctx, + DM_PP_CLOCK_TYPE_MEMORY_CLK, + &mem_clks); + + /* we don't need to call PPLIB for validation clock since they + * also give us the highest sclk and highest mclk (UMA clock). + * ALSO always convert UMA clock (from PPLIB) to YCLK (HW formula): + * YCLK = UMACLK*m_memoryTypeMultiplier + */ + dc->bw_vbios->low_yclk = bw_frc_to_fixed( + mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000); + dc->bw_vbios->mid_yclk = bw_frc_to_fixed( + mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier, + 1000); + dc->bw_vbios->high_yclk = bw_frc_to_fixed( + mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier, + 1000); + + /* Now notify PPLib/SMU about which Watermarks sets they should select + * depending on DPM state they are in. And update BW MGR GFX Engine and + * Memory clock member variables for Watermarks calculations for each + * Watermark Set + */ + clk_ranges.num_wm_sets = 4; + clk_ranges.wm_clk_ranges[0].wm_set_id = WM_SET_A; + clk_ranges.wm_clk_ranges[0].wm_min_eng_clk_in_khz = + eng_clks.data[0].clocks_in_khz; + clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz = + eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1; + clk_ranges.wm_clk_ranges[0].wm_min_mem_clk_in_khz = + mem_clks.data[0].clocks_in_khz; + clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz = + mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1; + + clk_ranges.wm_clk_ranges[1].wm_set_id = WM_SET_B; + clk_ranges.wm_clk_ranges[1].wm_min_eng_clk_in_khz = + eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz; + /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */ + clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000; + clk_ranges.wm_clk_ranges[1].wm_min_mem_clk_in_khz = + mem_clks.data[0].clocks_in_khz; + clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz = + mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1; + + clk_ranges.wm_clk_ranges[2].wm_set_id = WM_SET_C; + clk_ranges.wm_clk_ranges[2].wm_min_eng_clk_in_khz = + eng_clks.data[0].clocks_in_khz; + clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz = + eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1; + clk_ranges.wm_clk_ranges[2].wm_min_mem_clk_in_khz = + mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz; + /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */ + clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000; + + clk_ranges.wm_clk_ranges[3].wm_set_id = WM_SET_D; + clk_ranges.wm_clk_ranges[3].wm_min_eng_clk_in_khz = + eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz; + /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */ + clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000; + clk_ranges.wm_clk_ranges[3].wm_min_mem_clk_in_khz = + mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz; + /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */ + clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000; + + /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ + dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges); +} + +static const struct resource_caps *dce112_resource_cap( + struct hw_asic_id *asic_id) +{ + if (ASIC_REV_IS_POLARIS11_M(asic_id->hw_internal_rev) || + ASIC_REV_IS_POLARIS12_V(asic_id->hw_internal_rev)) + return &polaris_11_resource_cap; + else + return &polaris_10_resource_cap; +} + +static bool dce112_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dce110_resource_pool *pool) +{ + unsigned int i; + struct dc_context *ctx = dc->ctx; + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = dce112_resource_cap(&ctx->asic_id); + pool->base.funcs = &dce112_res_pool_funcs; + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->base.pipe_count = pool->base.res_cap->num_timing_generator; + pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; + dc->caps.max_downscale_ratio = 200; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a not applied by default*/ + dc->caps.max_cursor_size = 128; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dual_link_dvi = true; + dc->caps.extended_aux_timeout_support = false; + dc->debug = debug_defaults; + + /************************************************* + * Create resources * + *************************************************/ + + pool->base.clock_sources[DCE112_CLK_SRC_PLL0] = + dce112_clock_source_create( + ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->base.clock_sources[DCE112_CLK_SRC_PLL1] = + dce112_clock_source_create( + ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + pool->base.clock_sources[DCE112_CLK_SRC_PLL2] = + dce112_clock_source_create( + ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs[2], false); + pool->base.clock_sources[DCE112_CLK_SRC_PLL3] = + dce112_clock_source_create( + ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + pool->base.clock_sources[DCE112_CLK_SRC_PLL4] = + dce112_clock_source_create( + ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL4, + &clk_src_regs[4], false); + pool->base.clock_sources[DCE112_CLK_SRC_PLL5] = + dce112_clock_source_create( + ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL5, + &clk_src_regs[5], false); + pool->base.clk_src_count = DCE112_CLK_SRC_TOTAL; + + pool->base.dp_clock_source = dce112_clock_source_create( + ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, &clk_src_regs[0], true); + + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + } + + pool->base.dmcu = dce_dmcu_create(ctx, + &dmcu_regs, + &dmcu_shift, + &dmcu_mask); + if (pool->base.dmcu == NULL) { + dm_error("DC: failed to create dmcu!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + pool->base.abm = dce_abm_create(ctx, + &abm_regs, + &abm_shift, + &abm_mask); + if (pool->base.abm == NULL) { + dm_error("DC: failed to create abm!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + { + struct irq_service_init_data init_data; + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dce110_create(&init_data); + if (!pool->base.irqs) + goto res_create_fail; + } + + for (i = 0; i < pool->base.pipe_count; i++) { + pool->base.timing_generators[i] = + dce112_timing_generator_create( + ctx, + i, + &dce112_tg_offsets[i]); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto res_create_fail; + } + + pool->base.mis[i] = dce112_mem_input_create(ctx, i); + if (pool->base.mis[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create memory input!\n"); + goto res_create_fail; + } + + pool->base.ipps[i] = dce112_ipp_create(ctx, i); + if (pool->base.ipps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create input pixel processor!\n"); + goto res_create_fail; + } + + pool->base.transforms[i] = dce112_transform_create(ctx, i); + if (pool->base.transforms[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create transform!\n"); + goto res_create_fail; + } + + pool->base.opps[i] = dce112_opp_create( + ctx, + i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create output pixel processor!\n"); + goto res_create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dce112_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto res_create_fail; + } + pool->base.hw_i2cs[i] = dce112_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create i2c engine!!\n"); + goto res_create_fail; + } + pool->base.sw_i2cs[i] = NULL; + } + + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto res_create_fail; + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + /* Create hardware sequencer */ + dce112_hw_sequencer_construct(dc); + + bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id); + + bw_calcs_data_update_from_pplib(dc); + + return true; + +res_create_fail: + dce112_resource_destruct(pool); + return false; +} + +struct resource_pool *dce112_create_resource_pool( + uint8_t num_virtual_links, + struct dc *dc) +{ + struct dce110_resource_pool *pool = + kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dce112_resource_construct(num_virtual_links, dc, pool)) + return &pool->base; + + kfree(pool); + BREAK_TO_DEBUGGER(); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h new file mode 100644 index 0000000000..1f57ebc6f9 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h @@ -0,0 +1,57 @@ +/* +* Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_RESOURCE_DCE112_H__ +#define __DC_RESOURCE_DCE112_H__ + +#include "core_types.h" + +struct dc; +struct resource_pool; + +struct resource_pool *dce112_create_resource_pool( + uint8_t num_virtual_links, + struct dc *dc); + +enum dc_status dce112_validate_with_context( + struct dc *dc, + const struct dc_validation_set set[], + int set_count, + struct dc_state *context, + struct dc_state *old_context); + +bool dce112_validate_bandwidth( + struct dc *dc, + struct dc_state *context, + bool fast_validate); + +enum dc_status dce112_add_stream_to_ctx( + struct dc *dc, + struct dc_state *new_ctx, + struct dc_stream_state *dc_stream); + + +#endif /* __DC_RESOURCE_DCE112_H__ */ + diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c new file mode 100644 index 0000000000..20662edd0a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c @@ -0,0 +1,1288 @@ +/* +* Copyright 2012-15 Advanced Micro Devices, Inc.cls +* + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" + + +#include "stream_encoder.h" +#include "resource.h" +#include "include/irq_service_interface.h" +#include "dce120_resource.h" + +#include "dce112/dce112_resource.h" + +#include "dce110/dce110_resource.h" +#include "virtual/virtual_stream_encoder.h" +#include "dce120/dce120_timing_generator.h" +#include "irq/dce120/irq_service_dce120.h" +#include "dce/dce_opp.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_ipp.h" +#include "dce/dce_mem_input.h" +#include "dce/dce_panel_cntl.h" + +#include "dce110/dce110_hwseq.h" +#include "dce120/dce120_hwseq.h" +#include "dce/dce_transform.h" +#include "clk_mgr.h" +#include "dce/dce_audio.h" +#include "dce/dce_link_encoder.h" +#include "dce/dce_stream_encoder.h" +#include "dce/dce_hwseq.h" +#include "dce/dce_abm.h" +#include "dce/dce_dmcu.h" +#include "dce/dce_aux.h" +#include "dce/dce_i2c.h" + +#include "dce/dce_12_0_offset.h" +#include "dce/dce_12_0_sh_mask.h" +#include "soc15_hw_ip.h" +#include "vega10_ip_offset.h" +#include "nbio/nbio_6_1_offset.h" +#include "mmhub/mmhub_1_0_offset.h" +#include "mmhub/mmhub_1_0_sh_mask.h" +#include "reg_helper.h" + +#include "dce100/dce100_resource.h" + +#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL + #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f + #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 + #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f + #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 + #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f + #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 + #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f + #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 + #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f + #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 + #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f + #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 + #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f + #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 +#endif + +enum dce120_clk_src_array_id { + DCE120_CLK_SRC_PLL0, + DCE120_CLK_SRC_PLL1, + DCE120_CLK_SRC_PLL2, + DCE120_CLK_SRC_PLL3, + DCE120_CLK_SRC_PLL4, + DCE120_CLK_SRC_PLL5, + + DCE120_CLK_SRC_TOTAL +}; + +static const struct dce110_timing_generator_offsets dce120_tg_offsets[] = { + { + .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), + }, + { + .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), + }, + { + .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), + }, + { + .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), + }, + { + .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), + }, + { + .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL), + } +}; + +/* begin ********************* + * macros to expend register list macro defined in HW object header file */ + +#define BASE_INNER(seg) \ + DCE_BASE__INST0_SEG ## seg + +#define NBIO_BASE_INNER(seg) \ + NBIF_BASE__INST0_SEG ## seg + +#define NBIO_BASE(seg) \ + NBIO_BASE_INNER(seg) + +/* compile time expand base address. */ +#define BASE(seg) \ + BASE_INNER(seg) + +#define SR(reg_name)\ + .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +#define SRI(reg_name, block, id)\ + .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +/* MMHUB */ +#define MMHUB_BASE_INNER(seg) \ + MMHUB_BASE__INST0_SEG ## seg + +#define MMHUB_BASE(seg) \ + MMHUB_BASE_INNER(seg) + +#define MMHUB_SR(reg_name)\ + .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +/* macros to expend register list macro defined in HW object header file + * end *********************/ + + +static const struct dce_dmcu_registers dmcu_regs = { + DMCU_DCE110_COMMON_REG_LIST() +}; + +static const struct dce_dmcu_shift dmcu_shift = { + DMCU_MASK_SH_LIST_DCE110(__SHIFT) +}; + +static const struct dce_dmcu_mask dmcu_mask = { + DMCU_MASK_SH_LIST_DCE110(_MASK) +}; + +static const struct dce_abm_registers abm_regs = { + ABM_DCE110_COMMON_REG_LIST() +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCE110(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCE110(_MASK) +}; + +#define ipp_regs(id)\ +[id] = {\ + IPP_DCE110_REG_LIST_DCE_BASE(id)\ +} + +static const struct dce_ipp_registers ipp_regs[] = { + ipp_regs(0), + ipp_regs(1), + ipp_regs(2), + ipp_regs(3), + ipp_regs(4), + ipp_regs(5) +}; + +static const struct dce_ipp_shift ipp_shift = { + IPP_DCE120_MASK_SH_LIST_SOC_BASE(__SHIFT) +}; + +static const struct dce_ipp_mask ipp_mask = { + IPP_DCE120_MASK_SH_LIST_SOC_BASE(_MASK) +}; + +#define transform_regs(id)\ +[id] = {\ + XFM_COMMON_REG_LIST_DCE110(id)\ +} + +static const struct dce_transform_registers xfm_regs[] = { + transform_regs(0), + transform_regs(1), + transform_regs(2), + transform_regs(3), + transform_regs(4), + transform_regs(5) +}; + +static const struct dce_transform_shift xfm_shift = { + XFM_COMMON_MASK_SH_LIST_SOC_BASE(__SHIFT) +}; + +static const struct dce_transform_mask xfm_mask = { + XFM_COMMON_MASK_SH_LIST_SOC_BASE(_MASK) +}; + +#define aux_regs(id)\ +[id] = {\ + AUX_REG_LIST(id)\ +} + +static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), + aux_regs(2), + aux_regs(3), + aux_regs(4), + aux_regs(5) +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), + hpd_regs(4), + hpd_regs(5) +}; + +#define link_regs(id)\ +[id] = {\ + LE_DCE120_REG_LIST(id), \ + SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ +} + +static const struct dce110_link_enc_registers link_enc_regs[] = { + link_regs(0), + link_regs(1), + link_regs(2), + link_regs(3), + link_regs(4), + link_regs(5), + link_regs(6), +}; + + +#define stream_enc_regs(id)\ +[id] = {\ + SE_COMMON_REG_LIST(id),\ + .TMDS_CNTL = 0,\ +} + +static const struct dce110_stream_enc_registers stream_enc_regs[] = { + stream_enc_regs(0), + stream_enc_regs(1), + stream_enc_regs(2), + stream_enc_regs(3), + stream_enc_regs(4), + stream_enc_regs(5) +}; + +static const struct dce_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCE120(__SHIFT) +}; + +static const struct dce_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCE120(_MASK) +}; + +static const struct dce_panel_cntl_registers panel_cntl_regs[] = { + { DCE_PANEL_CNTL_REG_LIST() } +}; + +static const struct dce_panel_cntl_shift panel_cntl_shift = { + DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_panel_cntl_mask panel_cntl_mask = { + DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) +}; + +static const struct dce110_aux_registers_shift aux_shift = { + DCE12_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE12_AUX_MASK_SH_LIST(_MASK) +}; + +#define opp_regs(id)\ +[id] = {\ + OPP_DCE_120_REG_LIST(id),\ +} + +static const struct dce_opp_registers opp_regs[] = { + opp_regs(0), + opp_regs(1), + opp_regs(2), + opp_regs(3), + opp_regs(4), + opp_regs(5) +}; + +static const struct dce_opp_shift opp_shift = { + OPP_COMMON_MASK_SH_LIST_DCE_120(__SHIFT) +}; + +static const struct dce_opp_mask opp_mask = { + OPP_COMMON_MASK_SH_LIST_DCE_120(_MASK) +}; + #define aux_engine_regs(id)\ +[id] = {\ + AUX_COMMON_REG_LIST(id), \ + .AUX_RESET_MASK = 0 \ +} + +static const struct dce110_aux_registers aux_engine_regs[] = { + aux_engine_regs(0), + aux_engine_regs(1), + aux_engine_regs(2), + aux_engine_regs(3), + aux_engine_regs(4), + aux_engine_regs(5) +}; + +#define audio_regs(id)\ +[id] = {\ + AUD_COMMON_REG_LIST(id)\ +} + +static const struct dce_audio_registers audio_regs[] = { + audio_regs(0), + audio_regs(1), + audio_regs(2), + audio_regs(3), + audio_regs(4), + audio_regs(5), + audio_regs(6), +}; + +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ + AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) + +static const struct dce_audio_shift audio_shift = { + DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + case TRANSMITTER_UNIPHY_B: + return 1; + case TRANSMITTER_UNIPHY_C: + return 2; + case TRANSMITTER_UNIPHY_D: + return 3; + case TRANSMITTER_UNIPHY_E: + return 4; + case TRANSMITTER_UNIPHY_F: + return 5; + case TRANSMITTER_UNIPHY_G: + return 6; + default: + ASSERT(0); + return 0; + } +} + +#define clk_src_regs(index, id)\ +[index] = {\ + CS_COMMON_REG_LIST_DCE_112(id),\ +} + +static const struct dce110_clk_src_regs clk_src_regs[] = { + clk_src_regs(0, A), + clk_src_regs(1, B), + clk_src_regs(2, C), + clk_src_regs(3, D), + clk_src_regs(4, E), + clk_src_regs(5, F) +}; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCE_112(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCE_112(_MASK) +}; + +static struct output_pixel_processor *dce120_opp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce110_opp *opp = + kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); + + if (!opp) + return NULL; + + dce110_opp_construct(opp, + ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); + return &opp->base; +} +static struct dce_aux *dce120_aux_engine_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5), + i2c_inst_regs(6), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) +}; + +static struct dce_i2c_hw *dce120_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dce112_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} +static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3 + NBIO_BASE(mmBIOS_SCRATCH_3_BASE_IDX), + .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX) +}; + +static const struct resource_caps res_cap = { + .num_timing_generator = 6, + .num_audio = 7, + .num_stream_encoder = 6, + .num_pll = 6, + .num_ddc = 6, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCE_RGB, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = false, + .fp16 = true + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 1, + .fp16 = 1 + }, + + .max_downscale_factor = { + .argb8888 = 250, + .nv12 = 1, + .fp16 = 1 + } +}; + +static const struct dc_debug_options debug_defaults = { + .disable_clock_gate = true, + .enable_legacy_fast_update = true, +}; + +static struct clock_source *dce120_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(*clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dce112_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + kfree(clk_src); + BREAK_TO_DEBUGGER(); + return NULL; +} + +static void dce120_clock_source_destroy(struct clock_source **clk_src) +{ + kfree(TO_DCE110_CLK_SRC(*clk_src)); + *clk_src = NULL; +} + + +static bool dce120_hw_sequencer_create(struct dc *dc) +{ + /* All registers used by dce11.2 match those in dce11 in offset and + * structure + */ + dce120_hw_sequencer_construct(dc); + + /*TODO Move to separate file and Override what is needed */ + + return true; +} + +static struct timing_generator *dce120_timing_generator_create( + struct dc_context *ctx, + uint32_t instance, + const struct dce110_timing_generator_offsets *offsets) +{ + struct dce110_timing_generator *tg110 = + kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); + + if (!tg110) + return NULL; + + dce120_timing_generator_construct(tg110, ctx, instance, offsets); + return &tg110->base; +} + +static void dce120_transform_destroy(struct transform **xfm) +{ + kfree(TO_DCE_TRANSFORM(*xfm)); + *xfm = NULL; +} + +static void dce120_resource_destruct(struct dce110_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.opps[i] != NULL) + dce110_opp_destroy(&pool->base.opps[i]); + + if (pool->base.transforms[i] != NULL) + dce120_transform_destroy(&pool->base.transforms[i]); + + if (pool->base.ipps[i] != NULL) + dce_ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.mis[i] != NULL) { + kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); + pool->base.mis[i] = NULL; + } + + if (pool->base.irqs != NULL) { + dal_irq_service_destroy(&pool->base.irqs); + } + + if (pool->base.timing_generators[i] != NULL) { + kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + if (pool->base.engines[i] != NULL) + dce110_engine_destroy(&pool->base.engines[i]); + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i]) + dce_aud_destroy(&pool->base.audios[i]); + } + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) + kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) + dce120_clock_source_destroy( + &pool->base.clock_sources[i]); + } + + if (pool->base.dp_clock_source != NULL) + dce120_clock_source_destroy(&pool->base.dp_clock_source); + + if (pool->base.abm != NULL) + dce_abm_destroy(&pool->base.abm); + + if (pool->base.dmcu != NULL) + dce_dmcu_destroy(&pool->base.dmcu); +} + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + uint32_t reg_val = dm_read_reg_soc15(ctx, mmCC_DC_MISC_STRAPS, 0); + + straps->audio_stream_number = get_reg_field_value(reg_val, + CC_DC_MISC_STRAPS, + AUDIO_STREAM_NUMBER); + straps->hdmi_disable = get_reg_field_value(reg_val, + CC_DC_MISC_STRAPS, + HDMI_DISABLE); + + reg_val = dm_read_reg_soc15(ctx, mmDC_PINSTRAPS, 0); + straps->dc_pinstraps_audio = get_reg_field_value(reg_val, + DC_PINSTRAPS, + DC_PINSTRAPS_AUDIO); +} + +static struct audio *create_audio( + struct dc_context *ctx, unsigned int inst) +{ + return dce_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = false, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true, +}; + +static struct link_encoder *dce120_link_encoder_create( + struct dc_context *ctx, + const struct encoder_init_data *enc_init_data) +{ + struct dce110_link_encoder *enc110 = + kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; + + if (!enc110) + return NULL; + + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + + dce110_link_encoder_construct(enc110, + enc_init_data, + &link_enc_feature, + &link_enc_regs[link_regs_id], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source]); + + return &enc110->base; +} + +static struct panel_cntl *dce120_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dce_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dce_panel_cntl_construct(panel_cntl, + init_data, + &panel_cntl_regs[init_data->inst], + &panel_cntl_shift, + &panel_cntl_mask); + + return &panel_cntl->base; +} + +static struct input_pixel_processor *dce120_ipp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); + + if (!ipp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dce_ipp_construct(ipp, ctx, inst, + &ipp_regs[inst], &ipp_shift, &ipp_mask); + return &ipp->base; +} + +static struct stream_encoder *dce120_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dce110_stream_encoder *enc110 = + kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); + + if (!enc110) + return NULL; + + dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, + &stream_enc_regs[eng_id], + &se_shift, &se_mask); + return &enc110->base; +} + +#define SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +static const struct dce_hwseq_registers hwseq_reg = { + HWSEQ_DCE120_REG_LIST() +}; + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCE12_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCE12_MASK_SH_LIST(_MASK) +}; + +/* HWSEQ regs for VG20 */ +static const struct dce_hwseq_registers dce121_hwseq_reg = { + HWSEQ_VG20_REG_LIST() +}; + +static const struct dce_hwseq_shift dce121_hwseq_shift = { + HWSEQ_VG20_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask dce121_hwseq_mask = { + HWSEQ_VG20_MASK_SH_LIST(_MASK) +}; + +static struct dce_hwseq *dce120_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + } + return hws; +} + +static struct dce_hwseq *dce121_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + + if (hws) { + hws->ctx = ctx; + hws->regs = &dce121_hwseq_reg; + hws->shifts = &dce121_hwseq_shift; + hws->masks = &dce121_hwseq_mask; + } + return hws; +} + +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = create_audio, + .create_stream_encoder = dce120_stream_encoder_create, + .create_hwseq = dce120_hwseq_create, +}; + +static const struct resource_create_funcs dce121_res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = create_audio, + .create_stream_encoder = dce120_stream_encoder_create, + .create_hwseq = dce121_hwseq_create, +}; + + +#define mi_inst_regs(id) { MI_DCE12_REG_LIST(id) } +static const struct dce_mem_input_registers mi_regs[] = { + mi_inst_regs(0), + mi_inst_regs(1), + mi_inst_regs(2), + mi_inst_regs(3), + mi_inst_regs(4), + mi_inst_regs(5), +}; + +static const struct dce_mem_input_shift mi_shifts = { + MI_DCE12_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_mem_input_mask mi_masks = { + MI_DCE12_MASK_SH_LIST(_MASK) +}; + +static struct mem_input *dce120_mem_input_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), + GFP_KERNEL); + + if (!dce_mi) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dce120_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); + return &dce_mi->base; +} + +static struct transform *dce120_transform_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_transform *transform = + kzalloc(sizeof(struct dce_transform), GFP_KERNEL); + + if (!transform) + return NULL; + + dce_transform_construct(transform, ctx, inst, + &xfm_regs[inst], &xfm_shift, &xfm_mask); + transform->lb_memory_size = 0x1404; /*5124*/ + return &transform->base; +} + +static void dce120_destroy_resource_pool(struct resource_pool **pool) +{ + struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); + + dce120_resource_destruct(dce110_pool); + kfree(dce110_pool); + *pool = NULL; +} + +static const struct resource_funcs dce120_res_pool_funcs = { + .destroy = dce120_destroy_resource_pool, + .link_enc_create = dce120_link_encoder_create, + .panel_cntl_create = dce120_panel_cntl_create, + .validate_bandwidth = dce112_validate_bandwidth, + .validate_plane = dce100_validate_plane, + .add_stream_to_ctx = dce112_add_stream_to_ctx, + .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link +}; + +static void bw_calcs_data_update_from_pplib(struct dc *dc) +{ + struct dm_pp_clock_levels_with_latency eng_clks = {0}; + struct dm_pp_clock_levels_with_latency mem_clks = {0}; + struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0}; + int i; + unsigned int clk; + unsigned int latency; + /*original logic in dal3*/ + int memory_type_multiplier = MEMORY_TYPE_MULTIPLIER_CZ; + + /*do system clock*/ + if (!dm_pp_get_clock_levels_by_type_with_latency( + dc->ctx, + DM_PP_CLOCK_TYPE_ENGINE_CLK, + &eng_clks) || eng_clks.num_levels == 0) { + + eng_clks.num_levels = 8; + clk = 300000; + + for (i = 0; i < eng_clks.num_levels; i++) { + eng_clks.data[i].clocks_in_khz = clk; + clk += 100000; + } + } + + /* convert all the clock fro kHz to fix point mHz TODO: wloop data */ + dc->bw_vbios->high_sclk = bw_frc_to_fixed( + eng_clks.data[eng_clks.num_levels-1].clocks_in_khz, 1000); + dc->bw_vbios->mid1_sclk = bw_frc_to_fixed( + eng_clks.data[eng_clks.num_levels/8].clocks_in_khz, 1000); + dc->bw_vbios->mid2_sclk = bw_frc_to_fixed( + eng_clks.data[eng_clks.num_levels*2/8].clocks_in_khz, 1000); + dc->bw_vbios->mid3_sclk = bw_frc_to_fixed( + eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz, 1000); + dc->bw_vbios->mid4_sclk = bw_frc_to_fixed( + eng_clks.data[eng_clks.num_levels*4/8].clocks_in_khz, 1000); + dc->bw_vbios->mid5_sclk = bw_frc_to_fixed( + eng_clks.data[eng_clks.num_levels*5/8].clocks_in_khz, 1000); + dc->bw_vbios->mid6_sclk = bw_frc_to_fixed( + eng_clks.data[eng_clks.num_levels*6/8].clocks_in_khz, 1000); + dc->bw_vbios->low_sclk = bw_frc_to_fixed( + eng_clks.data[0].clocks_in_khz, 1000); + + /*do memory clock*/ + if (!dm_pp_get_clock_levels_by_type_with_latency( + dc->ctx, + DM_PP_CLOCK_TYPE_MEMORY_CLK, + &mem_clks) || mem_clks.num_levels == 0) { + + mem_clks.num_levels = 3; + clk = 250000; + latency = 45; + + for (i = 0; i < eng_clks.num_levels; i++) { + mem_clks.data[i].clocks_in_khz = clk; + mem_clks.data[i].latency_in_us = latency; + clk += 500000; + latency -= 5; + } + + } + + /* we don't need to call PPLIB for validation clock since they + * also give us the highest sclk and highest mclk (UMA clock). + * ALSO always convert UMA clock (from PPLIB) to YCLK (HW formula): + * YCLK = UMACLK*m_memoryTypeMultiplier + */ + if (dc->bw_vbios->memory_type == bw_def_hbm) + memory_type_multiplier = MEMORY_TYPE_HBM; + + dc->bw_vbios->low_yclk = bw_frc_to_fixed( + mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000); + dc->bw_vbios->mid_yclk = bw_frc_to_fixed( + mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier, + 1000); + dc->bw_vbios->high_yclk = bw_frc_to_fixed( + mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier, + 1000); + + /* Now notify PPLib/SMU about which Watermarks sets they should select + * depending on DPM state they are in. And update BW MGR GFX Engine and + * Memory clock member variables for Watermarks calculations for each + * Watermark Set + */ + clk_ranges.num_wm_sets = 4; + clk_ranges.wm_clk_ranges[0].wm_set_id = WM_SET_A; + clk_ranges.wm_clk_ranges[0].wm_min_eng_clk_in_khz = + eng_clks.data[0].clocks_in_khz; + clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz = + eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1; + clk_ranges.wm_clk_ranges[0].wm_min_mem_clk_in_khz = + mem_clks.data[0].clocks_in_khz; + clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz = + mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1; + + clk_ranges.wm_clk_ranges[1].wm_set_id = WM_SET_B; + clk_ranges.wm_clk_ranges[1].wm_min_eng_clk_in_khz = + eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz; + /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */ + clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000; + clk_ranges.wm_clk_ranges[1].wm_min_mem_clk_in_khz = + mem_clks.data[0].clocks_in_khz; + clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz = + mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1; + + clk_ranges.wm_clk_ranges[2].wm_set_id = WM_SET_C; + clk_ranges.wm_clk_ranges[2].wm_min_eng_clk_in_khz = + eng_clks.data[0].clocks_in_khz; + clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz = + eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1; + clk_ranges.wm_clk_ranges[2].wm_min_mem_clk_in_khz = + mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz; + /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */ + clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000; + + clk_ranges.wm_clk_ranges[3].wm_set_id = WM_SET_D; + clk_ranges.wm_clk_ranges[3].wm_min_eng_clk_in_khz = + eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz; + /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */ + clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000; + clk_ranges.wm_clk_ranges[3].wm_min_mem_clk_in_khz = + mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz; + /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */ + clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000; + + /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ + dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges); +} + +static uint32_t read_pipe_fuses(struct dc_context *ctx) +{ + uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0); + /* VG20 support max 6 pipes */ + value = value & 0x3f; + return value; +} + +static bool dce120_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dce110_resource_pool *pool) +{ + unsigned int i; + int j; + struct dc_context *ctx = dc->ctx; + struct irq_service_init_data irq_init_data; + static const struct resource_create_funcs *res_funcs; + bool is_vg20 = ASICREV_IS_VEGA20_P(ctx->asic_id.hw_internal_rev); + uint32_t pipe_fuses; + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap; + pool->base.funcs = &dce120_res_pool_funcs; + + /* TODO: Fill more data from GreenlandAsicCapability.cpp */ + pool->base.pipe_count = res_cap.num_timing_generator; + pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + + dc->caps.max_downscale_ratio = 200; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a not applied by default*/ + dc->caps.max_cursor_size = 128; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dual_link_dvi = true; + dc->caps.psp_setup_panel_mode = true; + dc->caps.extended_aux_timeout_support = false; + dc->debug = debug_defaults; + + /************************************************* + * Create resources * + *************************************************/ + + pool->base.clock_sources[DCE120_CLK_SRC_PLL0] = + dce120_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->base.clock_sources[DCE120_CLK_SRC_PLL1] = + dce120_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + pool->base.clock_sources[DCE120_CLK_SRC_PLL2] = + dce120_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs[2], false); + pool->base.clock_sources[DCE120_CLK_SRC_PLL3] = + dce120_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + pool->base.clock_sources[DCE120_CLK_SRC_PLL4] = + dce120_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL4, + &clk_src_regs[4], false); + pool->base.clock_sources[DCE120_CLK_SRC_PLL5] = + dce120_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL5, + &clk_src_regs[5], false); + pool->base.clk_src_count = DCE120_CLK_SRC_TOTAL; + + pool->base.dp_clock_source = + dce120_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, + &clk_src_regs[0], true); + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto clk_src_create_fail; + } + } + + pool->base.dmcu = dce_dmcu_create(ctx, + &dmcu_regs, + &dmcu_shift, + &dmcu_mask); + if (pool->base.dmcu == NULL) { + dm_error("DC: failed to create dmcu!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + pool->base.abm = dce_abm_create(ctx, + &abm_regs, + &abm_shift, + &abm_mask); + if (pool->base.abm == NULL) { + dm_error("DC: failed to create abm!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + + irq_init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dce120_create(&irq_init_data); + if (!pool->base.irqs) + goto irqs_create_fail; + + /* VG20: Pipe harvesting enabled, retrieve valid pipe fuses */ + if (is_vg20) + pipe_fuses = read_pipe_fuses(ctx); + + /* index to valid pipe resource */ + j = 0; + for (i = 0; i < pool->base.pipe_count; i++) { + if (is_vg20) { + if ((pipe_fuses & (1 << i)) != 0) { + dm_error("DC: skip invalid pipe %d!\n", i); + continue; + } + } + + pool->base.timing_generators[j] = + dce120_timing_generator_create( + ctx, + i, + &dce120_tg_offsets[i]); + if (pool->base.timing_generators[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto controller_create_fail; + } + + pool->base.mis[j] = dce120_mem_input_create(ctx, i); + + if (pool->base.mis[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create memory input!\n"); + goto controller_create_fail; + } + + pool->base.ipps[j] = dce120_ipp_create(ctx, i); + if (pool->base.ipps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create input pixel processor!\n"); + goto controller_create_fail; + } + + pool->base.transforms[j] = dce120_transform_create(ctx, i); + if (pool->base.transforms[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create transform!\n"); + goto res_create_fail; + } + + pool->base.opps[j] = dce120_opp_create( + ctx, + i); + if (pool->base.opps[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + } + + /* check next valid pipe */ + j++; + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dce120_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto res_create_fail; + } + pool->base.hw_i2cs[i] = dce120_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create i2c engine!!\n"); + goto res_create_fail; + } + pool->base.sw_i2cs[i] = NULL; + } + + /* valid pipe num */ + pool->base.pipe_count = j; + pool->base.timing_generator_count = j; + + if (is_vg20) + res_funcs = &dce121_res_create_funcs; + else + res_funcs = &res_create_funcs; + + if (!resource_construct(num_virtual_links, dc, &pool->base, res_funcs)) + goto res_create_fail; + + /* Create hardware sequencer */ + if (!dce120_hw_sequencer_create(dc)) + goto controller_create_fail; + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id); + + bw_calcs_data_update_from_pplib(dc); + + return true; + +irqs_create_fail: +controller_create_fail: +clk_src_create_fail: +res_create_fail: + + dce120_resource_destruct(pool); + + return false; +} + +struct resource_pool *dce120_create_resource_pool( + uint8_t num_virtual_links, + struct dc *dc) +{ + struct dce110_resource_pool *pool = + kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dce120_resource_construct(num_virtual_links, dc, pool)) + return &pool->base; + + kfree(pool); + BREAK_TO_DEBUGGER(); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.h new file mode 100644 index 0000000000..3d1f3cf012 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.h @@ -0,0 +1,39 @@ +/* +* Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_RESOURCE_DCE120_H__ +#define __DC_RESOURCE_DCE120_H__ + +#include "core_types.h" + +struct dc; +struct resource_pool; + +struct resource_pool *dce120_create_resource_pool( + uint8_t num_virtual_links, + struct dc *dc); + +#endif /* __DC_RESOURCE_DCE120_H__ */ + diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/resource/dce80/CMakeLists.txt new file mode 100644 index 0000000000..19dd73bc9a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/CMakeLists.txt @@ -0,0 +1,4 @@ +dal3_subdirectory_sources( + dce80_resource.c + dce80_resource.h + ) \ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c new file mode 100644 index 0000000000..35a2cce0c2 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c @@ -0,0 +1,1544 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce/dce_8_0_d.h" +#include "dce/dce_8_0_sh_mask.h" + +#include "dm_services.h" + +#include "link_encoder.h" +#include "stream_encoder.h" + +#include "resource.h" +#include "include/irq_service_interface.h" +#include "irq/dce80/irq_service_dce80.h" +#include "dce110/dce110_timing_generator.h" +#include "dce110/dce110_resource.h" +#include "dce80/dce80_timing_generator.h" +#include "dce/dce_mem_input.h" +#include "dce/dce_link_encoder.h" +#include "dce/dce_stream_encoder.h" +#include "dce/dce_ipp.h" +#include "dce/dce_transform.h" +#include "dce/dce_opp.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_audio.h" +#include "dce/dce_hwseq.h" +#include "dce80/dce80_hwseq.h" +#include "dce100/dce100_resource.h" +#include "dce/dce_panel_cntl.h" + +#include "reg_helper.h" + +#include "dce/dce_dmcu.h" +#include "dce/dce_aux.h" +#include "dce/dce_abm.h" +#include "dce/dce_i2c.h" +/* TODO remove this include */ + +#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT +#include "gmc/gmc_7_1_d.h" +#include "gmc/gmc_7_1_sh_mask.h" +#endif + +#include "dce80/dce80_resource.h" + +#ifndef mmDP_DPHY_INTERNAL_CTRL +#define mmDP_DPHY_INTERNAL_CTRL 0x1CDE +#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x1CDE +#define mmDP1_DP_DPHY_INTERNAL_CTRL 0x1FDE +#define mmDP2_DP_DPHY_INTERNAL_CTRL 0x42DE +#define mmDP3_DP_DPHY_INTERNAL_CTRL 0x45DE +#define mmDP4_DP_DPHY_INTERNAL_CTRL 0x48DE +#define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4BDE +#define mmDP6_DP_DPHY_INTERNAL_CTRL 0x4EDE +#endif + + +#ifndef mmBIOS_SCRATCH_2 + #define mmBIOS_SCRATCH_2 0x05CB + #define mmBIOS_SCRATCH_3 0x05CC + #define mmBIOS_SCRATCH_6 0x05CF +#endif + +#ifndef mmDP_DPHY_FAST_TRAINING + #define mmDP_DPHY_FAST_TRAINING 0x1CCE + #define mmDP0_DP_DPHY_FAST_TRAINING 0x1CCE + #define mmDP1_DP_DPHY_FAST_TRAINING 0x1FCE + #define mmDP2_DP_DPHY_FAST_TRAINING 0x42CE + #define mmDP3_DP_DPHY_FAST_TRAINING 0x45CE + #define mmDP4_DP_DPHY_FAST_TRAINING 0x48CE + #define mmDP5_DP_DPHY_FAST_TRAINING 0x4BCE + #define mmDP6_DP_DPHY_FAST_TRAINING 0x4ECE +#endif + + +#ifndef mmHPD_DC_HPD_CONTROL + #define mmHPD_DC_HPD_CONTROL 0x189A + #define mmHPD0_DC_HPD_CONTROL 0x189A + #define mmHPD1_DC_HPD_CONTROL 0x18A2 + #define mmHPD2_DC_HPD_CONTROL 0x18AA + #define mmHPD3_DC_HPD_CONTROL 0x18B2 + #define mmHPD4_DC_HPD_CONTROL 0x18BA + #define mmHPD5_DC_HPD_CONTROL 0x18C2 +#endif + +#define DCE11_DIG_FE_CNTL 0x4a00 +#define DCE11_DIG_BE_CNTL 0x4a47 +#define DCE11_DP_SEC 0x4ac3 + +static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = { + { + .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmGRPH_CONTROL - mmGRPH_CONTROL), + .dmif = (mmDMIF_PG0_DPG_WATERMARK_MASK_CONTROL + - mmDPG_WATERMARK_MASK_CONTROL), + }, + { + .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL), + .dmif = (mmDMIF_PG1_DPG_WATERMARK_MASK_CONTROL + - mmDPG_WATERMARK_MASK_CONTROL), + }, + { + .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL), + .dmif = (mmDMIF_PG2_DPG_WATERMARK_MASK_CONTROL + - mmDPG_WATERMARK_MASK_CONTROL), + }, + { + .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL), + .dmif = (mmDMIF_PG3_DPG_WATERMARK_MASK_CONTROL + - mmDPG_WATERMARK_MASK_CONTROL), + }, + { + .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL), + .dmif = (mmDMIF_PG4_DPG_WATERMARK_MASK_CONTROL + - mmDPG_WATERMARK_MASK_CONTROL), + }, + { + .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL), + .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL), + .dmif = (mmDMIF_PG5_DPG_WATERMARK_MASK_CONTROL + - mmDPG_WATERMARK_MASK_CONTROL), + } +}; + +/* set register offset */ +#define SR(reg_name)\ + .reg_name = mm ## reg_name + +/* set register offset with instance */ +#define SRI(reg_name, block, id)\ + .reg_name = mm ## block ## id ## _ ## reg_name + +#define ipp_regs(id)\ +[id] = {\ + IPP_COMMON_REG_LIST_DCE_BASE(id)\ +} + +static const struct dce_ipp_registers ipp_regs[] = { + ipp_regs(0), + ipp_regs(1), + ipp_regs(2), + ipp_regs(3), + ipp_regs(4), + ipp_regs(5) +}; + +static const struct dce_ipp_shift ipp_shift = { + IPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) +}; + +static const struct dce_ipp_mask ipp_mask = { + IPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) +}; + +#define transform_regs(id)\ +[id] = {\ + XFM_COMMON_REG_LIST_DCE80(id)\ +} + +static const struct dce_transform_registers xfm_regs[] = { + transform_regs(0), + transform_regs(1), + transform_regs(2), + transform_regs(3), + transform_regs(4), + transform_regs(5) +}; + +static const struct dce_transform_shift xfm_shift = { + XFM_COMMON_MASK_SH_LIST_DCE80(__SHIFT) +}; + +static const struct dce_transform_mask xfm_mask = { + XFM_COMMON_MASK_SH_LIST_DCE80(_MASK) +}; + +#define aux_regs(id)\ +[id] = {\ + AUX_REG_LIST(id)\ +} + +static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), + aux_regs(2), + aux_regs(3), + aux_regs(4), + aux_regs(5) +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), + hpd_regs(4), + hpd_regs(5) +}; + +#define link_regs(id)\ +[id] = {\ + LE_DCE80_REG_LIST(id)\ +} + +static const struct dce110_link_enc_registers link_enc_regs[] = { + link_regs(0), + link_regs(1), + link_regs(2), + link_regs(3), + link_regs(4), + link_regs(5), + link_regs(6), +}; + +#define stream_enc_regs(id)\ +[id] = {\ + SE_COMMON_REG_LIST_DCE_BASE(id),\ + .AFMT_CNTL = 0,\ +} + +static const struct dce110_stream_enc_registers stream_enc_regs[] = { + stream_enc_regs(0), + stream_enc_regs(1), + stream_enc_regs(2), + stream_enc_regs(3), + stream_enc_regs(4), + stream_enc_regs(5), + stream_enc_regs(6) +}; + +static const struct dce_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCE80_100(__SHIFT) +}; + +static const struct dce_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK) +}; + +static const struct dce_panel_cntl_registers panel_cntl_regs[] = { + { DCE_PANEL_CNTL_REG_LIST() } +}; + +static const struct dce_panel_cntl_shift panel_cntl_shift = { + DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_panel_cntl_mask panel_cntl_mask = { + DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) +}; + +#define opp_regs(id)\ +[id] = {\ + OPP_DCE_80_REG_LIST(id),\ +} + +static const struct dce_opp_registers opp_regs[] = { + opp_regs(0), + opp_regs(1), + opp_regs(2), + opp_regs(3), + opp_regs(4), + opp_regs(5) +}; + +static const struct dce_opp_shift opp_shift = { + OPP_COMMON_MASK_SH_LIST_DCE_80(__SHIFT) +}; + +static const struct dce_opp_mask opp_mask = { + OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK) +}; + +static const struct dce110_aux_registers_shift aux_shift = { + DCE10_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE10_AUX_MASK_SH_LIST(_MASK) +}; + +#define aux_engine_regs(id)\ +[id] = {\ + AUX_COMMON_REG_LIST(id), \ + .AUX_RESET_MASK = 0 \ +} + +static const struct dce110_aux_registers aux_engine_regs[] = { + aux_engine_regs(0), + aux_engine_regs(1), + aux_engine_regs(2), + aux_engine_regs(3), + aux_engine_regs(4), + aux_engine_regs(5) +}; + +#define audio_regs(id)\ +[id] = {\ + AUD_COMMON_REG_LIST(id)\ +} + +static const struct dce_audio_registers audio_regs[] = { + audio_regs(0), + audio_regs(1), + audio_regs(2), + audio_regs(3), + audio_regs(4), + audio_regs(5), + audio_regs(6), +}; + +static const struct dce_audio_shift audio_shift = { + AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +#define clk_src_regs(id)\ +[id] = {\ + CS_COMMON_REG_LIST_DCE_80(id),\ +} + + +static const struct dce110_clk_src_regs clk_src_regs[] = { + clk_src_regs(0), + clk_src_regs(1), + clk_src_regs(2) +}; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) +}; + +static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, + .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 +}; + +static const struct resource_caps res_cap = { + .num_timing_generator = 6, + .num_audio = 6, + .num_stream_encoder = 6, + .num_pll = 3, + .num_ddc = 6, +}; + +static const struct resource_caps res_cap_81 = { + .num_timing_generator = 4, + .num_audio = 7, + .num_stream_encoder = 7, + .num_pll = 3, + .num_ddc = 6, +}; + +static const struct resource_caps res_cap_83 = { + .num_timing_generator = 2, + .num_audio = 6, + .num_stream_encoder = 6, + .num_pll = 2, + .num_ddc = 2, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCE_RGB, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = false, + .fp16 = true + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 1, + .fp16 = 1 + }, + + .max_downscale_factor = { + .argb8888 = 250, + .nv12 = 1, + .fp16 = 1 + } +}; + +static const struct dc_debug_options debug_defaults = { + .enable_legacy_fast_update = true, +}; + +static const struct dce_dmcu_registers dmcu_regs = { + DMCU_DCE80_REG_LIST() +}; + +static const struct dce_dmcu_shift dmcu_shift = { + DMCU_MASK_SH_LIST_DCE80(__SHIFT) +}; + +static const struct dce_dmcu_mask dmcu_mask = { + DMCU_MASK_SH_LIST_DCE80(_MASK) +}; +static const struct dce_abm_registers abm_regs = { + ABM_DCE110_COMMON_REG_LIST() +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCE110(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCE110(_MASK) +}; + +#define CTX ctx +#define REG(reg) mm ## reg + +#ifndef mmCC_DC_HDMI_STRAPS +#define mmCC_DC_HDMI_STRAPS 0x1918 +#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40 +#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6 +#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700 +#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 +#endif + +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + case TRANSMITTER_UNIPHY_B: + return 1; + case TRANSMITTER_UNIPHY_C: + return 2; + case TRANSMITTER_UNIPHY_D: + return 3; + case TRANSMITTER_UNIPHY_E: + return 4; + case TRANSMITTER_UNIPHY_F: + return 5; + case TRANSMITTER_UNIPHY_G: + return 6; + default: + ASSERT(0); + return 0; + } +} + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + REG_GET_2(CC_DC_HDMI_STRAPS, + HDMI_DISABLE, &straps->hdmi_disable, + AUDIO_STREAM_NUMBER, &straps->audio_stream_number); + + REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio); +} + +static struct audio *create_audio( + struct dc_context *ctx, unsigned int inst) +{ + return dce_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + +static struct timing_generator *dce80_timing_generator_create( + struct dc_context *ctx, + uint32_t instance, + const struct dce110_timing_generator_offsets *offsets) +{ + struct dce110_timing_generator *tg110 = + kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL); + + if (!tg110) + return NULL; + + dce80_timing_generator_construct(tg110, ctx, instance, offsets); + return &tg110->base; +} + +static struct output_pixel_processor *dce80_opp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce110_opp *opp = + kzalloc(sizeof(struct dce110_opp), GFP_KERNEL); + + if (!opp) + return NULL; + + dce110_opp_construct(opp, + ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); + return &opp->base; +} + +static struct dce_aux *dce80_aux_engine_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5), + i2c_inst_regs(6), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) +}; + +static struct dce_i2c_hw *dce80_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dce_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} + +static struct dce_i2c_sw *dce80_i2c_sw_create( + struct dc_context *ctx) +{ + struct dce_i2c_sw *dce_i2c_sw = + kzalloc(sizeof(struct dce_i2c_sw), GFP_KERNEL); + + if (!dce_i2c_sw) + return NULL; + + dce_i2c_sw_construct(dce_i2c_sw, ctx); + + return dce_i2c_sw; +} +static struct stream_encoder *dce80_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dce110_stream_encoder *enc110 = + kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); + + if (!enc110) + return NULL; + + dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, + &stream_enc_regs[eng_id], + &se_shift, &se_mask); + return &enc110->base; +} + +#define SRII(reg_name, block, id)\ + .reg_name[id] = mm ## block ## id ## _ ## reg_name + +static const struct dce_hwseq_registers hwseq_reg = { + HWSEQ_DCE8_REG_LIST() +}; + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCE8_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCE8_MASK_SH_LIST(_MASK) +}; + +static struct dce_hwseq *dce80_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + } + return hws; +} + +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = create_audio, + .create_stream_encoder = dce80_stream_encoder_create, + .create_hwseq = dce80_hwseq_create, +}; + +#define mi_inst_regs(id) { \ + MI_DCE8_REG_LIST(id), \ + .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \ +} +static const struct dce_mem_input_registers mi_regs[] = { + mi_inst_regs(0), + mi_inst_regs(1), + mi_inst_regs(2), + mi_inst_regs(3), + mi_inst_regs(4), + mi_inst_regs(5), +}; + +static const struct dce_mem_input_shift mi_shifts = { + MI_DCE8_MASK_SH_LIST(__SHIFT), + .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT +}; + +static const struct dce_mem_input_mask mi_masks = { + MI_DCE8_MASK_SH_LIST(_MASK), + .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK +}; + +static struct mem_input *dce80_mem_input_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input), + GFP_KERNEL); + + if (!dce_mi) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); + dce_mi->wa.single_head_rdreq_dmif_limit = 2; + return &dce_mi->base; +} + +static void dce80_transform_destroy(struct transform **xfm) +{ + kfree(TO_DCE_TRANSFORM(*xfm)); + *xfm = NULL; +} + +static struct transform *dce80_transform_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_transform *transform = + kzalloc(sizeof(struct dce_transform), GFP_KERNEL); + + if (!transform) + return NULL; + + dce_transform_construct(transform, ctx, inst, + &xfm_regs[inst], &xfm_shift, &xfm_mask); + transform->prescaler_on = false; + return &transform->base; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 297000, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true +}; + +static struct link_encoder *dce80_link_encoder_create( + struct dc_context *ctx, + const struct encoder_init_data *enc_init_data) +{ + struct dce110_link_encoder *enc110 = + kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; + + if (!enc110) + return NULL; + + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + + dce110_link_encoder_construct(enc110, + enc_init_data, + &link_enc_feature, + &link_enc_regs[link_regs_id], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source]); + return &enc110->base; +} + +static struct panel_cntl *dce80_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dce_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dce_panel_cntl_construct(panel_cntl, + init_data, + &panel_cntl_regs[init_data->inst], + &panel_cntl_shift, + &panel_cntl_mask); + + return &panel_cntl->base; +} + +static struct clock_source *dce80_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dce110_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + kfree(clk_src); + BREAK_TO_DEBUGGER(); + return NULL; +} + +static void dce80_clock_source_destroy(struct clock_source **clk_src) +{ + kfree(TO_DCE110_CLK_SRC(*clk_src)); + *clk_src = NULL; +} + +static struct input_pixel_processor *dce80_ipp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL); + + if (!ipp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dce_ipp_construct(ipp, ctx, inst, + &ipp_regs[inst], &ipp_shift, &ipp_mask); + return &ipp->base; +} + +static void dce80_resource_destruct(struct dce110_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.opps[i] != NULL) + dce110_opp_destroy(&pool->base.opps[i]); + + if (pool->base.transforms[i] != NULL) + dce80_transform_destroy(&pool->base.transforms[i]); + + if (pool->base.ipps[i] != NULL) + dce_ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.mis[i] != NULL) { + kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); + pool->base.mis[i] = NULL; + } + + if (pool->base.timing_generators[i] != NULL) { + kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + if (pool->base.engines[i] != NULL) + dce110_engine_destroy(&pool->base.engines[i]); + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) + kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i])); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) { + dce80_clock_source_destroy(&pool->base.clock_sources[i]); + } + } + + if (pool->base.abm != NULL) + dce_abm_destroy(&pool->base.abm); + + if (pool->base.dmcu != NULL) + dce_dmcu_destroy(&pool->base.dmcu); + + if (pool->base.dp_clock_source != NULL) + dce80_clock_source_destroy(&pool->base.dp_clock_source); + + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i] != NULL) { + dce_aud_destroy(&pool->base.audios[i]); + } + } + + if (pool->base.irqs != NULL) { + dal_irq_service_destroy(&pool->base.irqs); + } +} + +static bool dce80_validate_bandwidth( + struct dc *dc, + struct dc_state *context, + bool fast_validate) +{ + int i; + bool at_least_one_pipe = false; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (context->res_ctx.pipe_ctx[i].stream) + at_least_one_pipe = true; + } + + if (at_least_one_pipe) { + /* TODO implement when needed but for now hardcode max value*/ + context->bw_ctx.bw.dce.dispclk_khz = 681000; + context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; + } else { + context->bw_ctx.bw.dce.dispclk_khz = 0; + context->bw_ctx.bw.dce.yclk_khz = 0; + } + + return true; +} + +static bool dce80_validate_surface_sets( + struct dc_state *context) +{ + int i; + + for (i = 0; i < context->stream_count; i++) { + if (context->stream_status[i].plane_count == 0) + continue; + + if (context->stream_status[i].plane_count > 1) + return false; + + if (context->stream_status[i].plane_states[0]->format + >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) + return false; + } + + return true; +} + +static enum dc_status dce80_validate_global( + struct dc *dc, + struct dc_state *context) +{ + if (!dce80_validate_surface_sets(context)) + return DC_FAIL_SURFACE_VALIDATE; + + return DC_OK; +} + +static void dce80_destroy_resource_pool(struct resource_pool **pool) +{ + struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); + + dce80_resource_destruct(dce110_pool); + kfree(dce110_pool); + *pool = NULL; +} + +static const struct resource_funcs dce80_res_pool_funcs = { + .destroy = dce80_destroy_resource_pool, + .link_enc_create = dce80_link_encoder_create, + .panel_cntl_create = dce80_panel_cntl_create, + .validate_bandwidth = dce80_validate_bandwidth, + .validate_plane = dce100_validate_plane, + .add_stream_to_ctx = dce100_add_stream_to_ctx, + .validate_global = dce80_validate_global, + .find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link +}; + +static bool dce80_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dce110_resource_pool *pool) +{ + unsigned int i; + struct dc_context *ctx = dc->ctx; + struct dc_bios *bp; + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap; + pool->base.funcs = &dce80_res_pool_funcs; + + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->base.pipe_count = res_cap.num_timing_generator; + pool->base.timing_generator_count = res_cap.num_timing_generator; + dc->caps.max_downscale_ratio = 200; + dc->caps.i2c_speed_in_khz = 40; + dc->caps.i2c_speed_in_khz_hdcp = 40; + dc->caps.max_cursor_size = 128; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dual_link_dvi = true; + dc->caps.extended_aux_timeout_support = false; + dc->debug = debug_defaults; + + /************************************************* + * Create resources * + *************************************************/ + + bp = ctx->dc_bios; + + if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { + pool->base.dp_clock_source = + dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); + + pool->base.clock_sources[0] = + dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); + pool->base.clock_sources[1] = + dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); + pool->base.clock_sources[2] = + dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); + pool->base.clk_src_count = 3; + + } else { + pool->base.dp_clock_source = + dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); + + pool->base.clock_sources[0] = + dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); + pool->base.clock_sources[1] = + dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); + pool->base.clk_src_count = 2; + } + + if (pool->base.dp_clock_source == NULL) { + dm_error("DC: failed to create dp clock source!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + } + + pool->base.dmcu = dce_dmcu_create(ctx, + &dmcu_regs, + &dmcu_shift, + &dmcu_mask); + if (pool->base.dmcu == NULL) { + dm_error("DC: failed to create dmcu!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + pool->base.abm = dce_abm_create(ctx, + &abm_regs, + &abm_shift, + &abm_mask); + if (pool->base.abm == NULL) { + dm_error("DC: failed to create abm!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + { + struct irq_service_init_data init_data; + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dce80_create(&init_data); + if (!pool->base.irqs) + goto res_create_fail; + } + + for (i = 0; i < pool->base.pipe_count; i++) { + pool->base.timing_generators[i] = dce80_timing_generator_create( + ctx, i, &dce80_tg_offsets[i]); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto res_create_fail; + } + + pool->base.mis[i] = dce80_mem_input_create(ctx, i); + if (pool->base.mis[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create memory input!\n"); + goto res_create_fail; + } + + pool->base.ipps[i] = dce80_ipp_create(ctx, i); + if (pool->base.ipps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create input pixel processor!\n"); + goto res_create_fail; + } + + pool->base.transforms[i] = dce80_transform_create(ctx, i); + if (pool->base.transforms[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create transform!\n"); + goto res_create_fail; + } + + pool->base.opps[i] = dce80_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create output pixel processor!\n"); + goto res_create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dce80_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto res_create_fail; + } + pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create i2c engine!!\n"); + goto res_create_fail; + } + pool->base.sw_i2cs[i] = dce80_i2c_sw_create(ctx); + if (pool->base.sw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create sw i2c!!\n"); + goto res_create_fail; + } + } + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->caps.disable_dp_clk_share = true; + + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto res_create_fail; + + /* Create hardware sequencer */ + dce80_hw_sequencer_construct(dc); + + return true; + +res_create_fail: + dce80_resource_destruct(pool); + return false; +} + +struct resource_pool *dce80_create_resource_pool( + uint8_t num_virtual_links, + struct dc *dc) +{ + struct dce110_resource_pool *pool = + kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dce80_construct(num_virtual_links, dc, pool)) + return &pool->base; + + kfree(pool); + BREAK_TO_DEBUGGER(); + return NULL; +} + +static bool dce81_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dce110_resource_pool *pool) +{ + unsigned int i; + struct dc_context *ctx = dc->ctx; + struct dc_bios *bp; + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap_81; + pool->base.funcs = &dce80_res_pool_funcs; + + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->base.pipe_count = res_cap_81.num_timing_generator; + pool->base.timing_generator_count = res_cap_81.num_timing_generator; + dc->caps.max_downscale_ratio = 200; + dc->caps.i2c_speed_in_khz = 40; + dc->caps.i2c_speed_in_khz_hdcp = 40; + dc->caps.max_cursor_size = 128; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.is_apu = true; + + /************************************************* + * Create resources * + *************************************************/ + + bp = ctx->dc_bios; + + if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { + pool->base.dp_clock_source = + dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); + + pool->base.clock_sources[0] = + dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); + pool->base.clock_sources[1] = + dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); + pool->base.clock_sources[2] = + dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); + pool->base.clk_src_count = 3; + + } else { + pool->base.dp_clock_source = + dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); + + pool->base.clock_sources[0] = + dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); + pool->base.clock_sources[1] = + dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); + pool->base.clk_src_count = 2; + } + + if (pool->base.dp_clock_source == NULL) { + dm_error("DC: failed to create dp clock source!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + } + + pool->base.dmcu = dce_dmcu_create(ctx, + &dmcu_regs, + &dmcu_shift, + &dmcu_mask); + if (pool->base.dmcu == NULL) { + dm_error("DC: failed to create dmcu!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + pool->base.abm = dce_abm_create(ctx, + &abm_regs, + &abm_shift, + &abm_mask); + if (pool->base.abm == NULL) { + dm_error("DC: failed to create abm!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + { + struct irq_service_init_data init_data; + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dce80_create(&init_data); + if (!pool->base.irqs) + goto res_create_fail; + } + + for (i = 0; i < pool->base.pipe_count; i++) { + pool->base.timing_generators[i] = dce80_timing_generator_create( + ctx, i, &dce80_tg_offsets[i]); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto res_create_fail; + } + + pool->base.mis[i] = dce80_mem_input_create(ctx, i); + if (pool->base.mis[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create memory input!\n"); + goto res_create_fail; + } + + pool->base.ipps[i] = dce80_ipp_create(ctx, i); + if (pool->base.ipps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create input pixel processor!\n"); + goto res_create_fail; + } + + pool->base.transforms[i] = dce80_transform_create(ctx, i); + if (pool->base.transforms[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create transform!\n"); + goto res_create_fail; + } + + pool->base.opps[i] = dce80_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create output pixel processor!\n"); + goto res_create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dce80_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto res_create_fail; + } + pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create i2c engine!!\n"); + goto res_create_fail; + } + pool->base.sw_i2cs[i] = dce80_i2c_sw_create(ctx); + if (pool->base.sw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create sw i2c!!\n"); + goto res_create_fail; + } + } + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->caps.disable_dp_clk_share = true; + + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto res_create_fail; + + /* Create hardware sequencer */ + dce80_hw_sequencer_construct(dc); + + return true; + +res_create_fail: + dce80_resource_destruct(pool); + return false; +} + +struct resource_pool *dce81_create_resource_pool( + uint8_t num_virtual_links, + struct dc *dc) +{ + struct dce110_resource_pool *pool = + kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dce81_construct(num_virtual_links, dc, pool)) + return &pool->base; + + kfree(pool); + BREAK_TO_DEBUGGER(); + return NULL; +} + +static bool dce83_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dce110_resource_pool *pool) +{ + unsigned int i; + struct dc_context *ctx = dc->ctx; + struct dc_bios *bp; + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap_83; + pool->base.funcs = &dce80_res_pool_funcs; + + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->base.pipe_count = res_cap_83.num_timing_generator; + pool->base.timing_generator_count = res_cap_83.num_timing_generator; + dc->caps.max_downscale_ratio = 200; + dc->caps.i2c_speed_in_khz = 40; + dc->caps.i2c_speed_in_khz_hdcp = 40; + dc->caps.max_cursor_size = 128; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.is_apu = true; + dc->debug = debug_defaults; + + /************************************************* + * Create resources * + *************************************************/ + + bp = ctx->dc_bios; + + if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { + pool->base.dp_clock_source = + dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); + + pool->base.clock_sources[0] = + dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], false); + pool->base.clock_sources[1] = + dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false); + pool->base.clk_src_count = 2; + + } else { + pool->base.dp_clock_source = + dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], true); + + pool->base.clock_sources[0] = + dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false); + pool->base.clk_src_count = 1; + } + + if (pool->base.dp_clock_source == NULL) { + dm_error("DC: failed to create dp clock source!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + } + + pool->base.dmcu = dce_dmcu_create(ctx, + &dmcu_regs, + &dmcu_shift, + &dmcu_mask); + if (pool->base.dmcu == NULL) { + dm_error("DC: failed to create dmcu!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + pool->base.abm = dce_abm_create(ctx, + &abm_regs, + &abm_shift, + &abm_mask); + if (pool->base.abm == NULL) { + dm_error("DC: failed to create abm!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + { + struct irq_service_init_data init_data; + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dce80_create(&init_data); + if (!pool->base.irqs) + goto res_create_fail; + } + + for (i = 0; i < pool->base.pipe_count; i++) { + pool->base.timing_generators[i] = dce80_timing_generator_create( + ctx, i, &dce80_tg_offsets[i]); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto res_create_fail; + } + + pool->base.mis[i] = dce80_mem_input_create(ctx, i); + if (pool->base.mis[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create memory input!\n"); + goto res_create_fail; + } + + pool->base.ipps[i] = dce80_ipp_create(ctx, i); + if (pool->base.ipps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create input pixel processor!\n"); + goto res_create_fail; + } + + pool->base.transforms[i] = dce80_transform_create(ctx, i); + if (pool->base.transforms[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create transform!\n"); + goto res_create_fail; + } + + pool->base.opps[i] = dce80_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create output pixel processor!\n"); + goto res_create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dce80_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto res_create_fail; + } + pool->base.hw_i2cs[i] = dce80_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create i2c engine!!\n"); + goto res_create_fail; + } + pool->base.sw_i2cs[i] = dce80_i2c_sw_create(ctx); + if (pool->base.sw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create sw i2c!!\n"); + goto res_create_fail; + } + } + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->caps.disable_dp_clk_share = true; + + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto res_create_fail; + + /* Create hardware sequencer */ + dce80_hw_sequencer_construct(dc); + + return true; + +res_create_fail: + dce80_resource_destruct(pool); + return false; +} + +struct resource_pool *dce83_create_resource_pool( + uint8_t num_virtual_links, + struct dc *dc) +{ + struct dce110_resource_pool *pool = + kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dce83_construct(num_virtual_links, dc, pool)) + return &pool->base; + + BREAK_TO_DEBUGGER(); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.h new file mode 100644 index 0000000000..eff31ab83a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.h @@ -0,0 +1,47 @@ +/* +* Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_RESOURCE_DCE80_H__ +#define __DC_RESOURCE_DCE80_H__ + +#include "core_types.h" + +struct dc; +struct resource_pool; + +struct resource_pool *dce80_create_resource_pool( + uint8_t num_virtual_links, + struct dc *dc); + +struct resource_pool *dce81_create_resource_pool( + uint8_t num_virtual_links, + struct dc *dc); + +struct resource_pool *dce83_create_resource_pool( + uint8_t num_virtual_links, + struct dc *dc); + +#endif /* __DC_RESOURCE_DCE80_H__ */ + diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c new file mode 100644 index 0000000000..d08d109692 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c @@ -0,0 +1,1692 @@ +/* +* Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "dc.h" + +#include "dcn10/dcn10_init.h" + +#include "resource.h" +#include "include/irq_service_interface.h" +#include "dcn10/dcn10_resource.h" +#include "dcn10/dcn10_ipp.h" +#include "dcn10/dcn10_mpc.h" + +#include "dcn10/dcn10_dwb.h" + +#include "irq/dcn10/irq_service_dcn10.h" +#include "dcn10/dcn10_dpp.h" +#include "dcn10/dcn10_optc.h" +#include "dcn10/dcn10_hwseq.h" +#include "dce110/dce110_hwseq.h" +#include "dcn10/dcn10_opp.h" +#include "dcn10/dcn10_link_encoder.h" +#include "dcn10/dcn10_stream_encoder.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_audio.h" +#include "dce/dce_hwseq.h" +#include "virtual/virtual_stream_encoder.h" +#include "dce110/dce110_resource.h" +#include "dce112/dce112_resource.h" +#include "dcn10/dcn10_hubp.h" +#include "dcn10/dcn10_hubbub.h" +#include "dce/dce_panel_cntl.h" + +#include "soc15_hw_ip.h" +#include "vega10_ip_offset.h" + +#include "dcn/dcn_1_0_offset.h" +#include "dcn/dcn_1_0_sh_mask.h" + +#include "nbio/nbio_7_0_offset.h" + +#include "mmhub/mmhub_9_1_offset.h" +#include "mmhub/mmhub_9_1_sh_mask.h" + +#include "reg_helper.h" +#include "dce/dce_abm.h" +#include "dce/dce_dmcu.h" +#include "dce/dce_aux.h" +#include "dce/dce_i2c.h" + +#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL + #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f + #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 + #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f + #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 + #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f + #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 + #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f + #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 + #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f + #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 + #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f + #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 + #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f + #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 +#endif + + +enum dcn10_clk_src_array_id { + DCN10_CLK_SRC_PLL0, + DCN10_CLK_SRC_PLL1, + DCN10_CLK_SRC_PLL2, + DCN10_CLK_SRC_PLL3, + DCN10_CLK_SRC_TOTAL, + DCN101_CLK_SRC_TOTAL = DCN10_CLK_SRC_PLL3 +}; + +/* begin ********************* + * macros to expend register list macro defined in HW object header file */ + +/* DCN */ +#define BASE_INNER(seg) \ + DCE_BASE__INST0_SEG ## seg + +#define BASE(seg) \ + BASE_INNER(seg) + +#define SR(reg_name)\ + .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +#define SRI(reg_name, block, id)\ + .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + + +#define SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define VUPDATE_SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## reg_name ## 0 ## _ ## block ## id ## _BASE_IDX) + \ + mm ## reg_name ## 0 ## _ ## block ## id + +/* set field/register/bitfield name */ +#define SFRB(field_name, reg_name, bitfield, post_fix)\ + .field_name = reg_name ## __ ## bitfield ## post_fix + +/* NBIO */ +#define NBIO_BASE_INNER(seg) \ + NBIF_BASE__INST0_SEG ## seg + +#define NBIO_BASE(seg) \ + NBIO_BASE_INNER(seg) + +#define NBIO_SR(reg_name)\ + .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +/* MMHUB */ +#define MMHUB_BASE_INNER(seg) \ + MMHUB_BASE__INST0_SEG ## seg + +#define MMHUB_BASE(seg) \ + MMHUB_BASE_INNER(seg) + +#define MMHUB_SR(reg_name)\ + .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +/* macros to expend register list macro defined in HW object header file + * end *********************/ + + +static const struct dce_dmcu_registers dmcu_regs = { + DMCU_DCN10_REG_LIST() +}; + +static const struct dce_dmcu_shift dmcu_shift = { + DMCU_MASK_SH_LIST_DCN10(__SHIFT) +}; + +static const struct dce_dmcu_mask dmcu_mask = { + DMCU_MASK_SH_LIST_DCN10(_MASK) +}; + +static const struct dce_abm_registers abm_regs = { + ABM_DCN10_REG_LIST(0) +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCN10(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCN10(_MASK) +}; + +#define stream_enc_regs(id)\ +[id] = {\ + SE_DCN_REG_LIST(id)\ +} + +static const struct dcn10_stream_enc_registers stream_enc_regs[] = { + stream_enc_regs(0), + stream_enc_regs(1), + stream_enc_regs(2), + stream_enc_regs(3), +}; + +static const struct dcn10_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCN10(__SHIFT) +}; + +static const struct dcn10_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCN10(_MASK) +}; + +#define audio_regs(id)\ +[id] = {\ + AUD_COMMON_REG_LIST(id)\ +} + +static const struct dce_audio_registers audio_regs[] = { + audio_regs(0), + audio_regs(1), + audio_regs(2), + audio_regs(3), +}; + +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ + AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) + +static const struct dce_audio_shift audio_shift = { + DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +#define aux_regs(id)\ +[id] = {\ + AUX_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), + aux_regs(2), + aux_regs(3) +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3) +}; + +#define link_regs(id)\ +[id] = {\ + LE_DCN10_REG_LIST(id), \ + SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ +} + +static const struct dcn10_link_enc_registers link_enc_regs[] = { + link_regs(0), + link_regs(1), + link_regs(2), + link_regs(3) +}; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN10(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK) +}; + +static const struct dce_panel_cntl_registers panel_cntl_regs[] = { + { DCN_PANEL_CNTL_REG_LIST() } +}; + +static const struct dce_panel_cntl_shift panel_cntl_shift = { + DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_panel_cntl_mask panel_cntl_mask = { + DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) +}; + +static const struct dce110_aux_registers_shift aux_shift = { + DCN10_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN10_AUX_MASK_SH_LIST(_MASK) +}; + +#define ipp_regs(id)\ +[id] = {\ + IPP_REG_LIST_DCN10(id),\ +} + +static const struct dcn10_ipp_registers ipp_regs[] = { + ipp_regs(0), + ipp_regs(1), + ipp_regs(2), + ipp_regs(3), +}; + +static const struct dcn10_ipp_shift ipp_shift = { + IPP_MASK_SH_LIST_DCN10(__SHIFT) +}; + +static const struct dcn10_ipp_mask ipp_mask = { + IPP_MASK_SH_LIST_DCN10(_MASK), +}; + +#define opp_regs(id)\ +[id] = {\ + OPP_REG_LIST_DCN10(id),\ +} + +static const struct dcn10_opp_registers opp_regs[] = { + opp_regs(0), + opp_regs(1), + opp_regs(2), + opp_regs(3), +}; + +static const struct dcn10_opp_shift opp_shift = { + OPP_MASK_SH_LIST_DCN10(__SHIFT) +}; + +static const struct dcn10_opp_mask opp_mask = { + OPP_MASK_SH_LIST_DCN10(_MASK), +}; + +#define aux_engine_regs(id)\ +[id] = {\ + AUX_COMMON_REG_LIST(id), \ + .AUX_RESET_MASK = 0 \ +} + +static const struct dce110_aux_registers aux_engine_regs[] = { + aux_engine_regs(0), + aux_engine_regs(1), + aux_engine_regs(2), + aux_engine_regs(3), + aux_engine_regs(4), + aux_engine_regs(5) +}; + +#define tf_regs(id)\ +[id] = {\ + TF_REG_LIST_DCN10(id),\ +} + +static const struct dcn_dpp_registers tf_regs[] = { + tf_regs(0), + tf_regs(1), + tf_regs(2), + tf_regs(3), +}; + +static const struct dcn_dpp_shift tf_shift = { + TF_REG_LIST_SH_MASK_DCN10(__SHIFT), + TF_DEBUG_REG_LIST_SH_DCN10 + +}; + +static const struct dcn_dpp_mask tf_mask = { + TF_REG_LIST_SH_MASK_DCN10(_MASK), + TF_DEBUG_REG_LIST_MASK_DCN10 +}; + +static const struct dcn_mpc_registers mpc_regs = { + MPC_COMMON_REG_LIST_DCN1_0(0), + MPC_COMMON_REG_LIST_DCN1_0(1), + MPC_COMMON_REG_LIST_DCN1_0(2), + MPC_COMMON_REG_LIST_DCN1_0(3), + MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(0), + MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(1), + MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(2), + MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(3) +}; + +static const struct dcn_mpc_shift mpc_shift = { + MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT),\ + SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, __SHIFT) +}; + +static const struct dcn_mpc_mask mpc_mask = { + MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),\ + SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, _MASK) +}; + +#define tg_regs(id)\ +[id] = {TG_COMMON_REG_LIST_DCN1_0(id)} + +static const struct dcn_optc_registers tg_regs[] = { + tg_regs(0), + tg_regs(1), + tg_regs(2), + tg_regs(3), +}; + +static const struct dcn_optc_shift tg_shift = { + TG_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) +}; + +static const struct dcn_optc_mask tg_mask = { + TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK) +}; + +static const struct bios_registers bios_regs = { + NBIO_SR(BIOS_SCRATCH_3), + NBIO_SR(BIOS_SCRATCH_6) +}; + +#define hubp_regs(id)\ +[id] = {\ + HUBP_REG_LIST_DCN10(id)\ +} + +static const struct dcn_mi_registers hubp_regs[] = { + hubp_regs(0), + hubp_regs(1), + hubp_regs(2), + hubp_regs(3), +}; + +static const struct dcn_mi_shift hubp_shift = { + HUBP_MASK_SH_LIST_DCN10(__SHIFT) +}; + +static const struct dcn_mi_mask hubp_mask = { + HUBP_MASK_SH_LIST_DCN10(_MASK) +}; + +static const struct dcn_hubbub_registers hubbub_reg = { + HUBBUB_REG_LIST_DCN10(0) +}; + +static const struct dcn_hubbub_shift hubbub_shift = { + HUBBUB_MASK_SH_LIST_DCN10(__SHIFT) +}; + +static const struct dcn_hubbub_mask hubbub_mask = { + HUBBUB_MASK_SH_LIST_DCN10(_MASK) +}; + +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + default: + ASSERT(0); + return 0; + } +} + +#define clk_src_regs(index, pllid)\ +[index] = {\ + CS_COMMON_REG_LIST_DCN1_0(index, pllid),\ +} + +static const struct dce110_clk_src_regs clk_src_regs[] = { + clk_src_regs(0, A), + clk_src_regs(1, B), + clk_src_regs(2, C), + clk_src_regs(3, D) +}; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK) +}; + +static const struct resource_caps res_cap = { + .num_timing_generator = 4, + .num_opp = 4, + .num_video_plane = 4, + .num_audio = 4, + .num_stream_encoder = 4, + .num_pll = 4, + .num_ddc = 4, +}; + +static const struct resource_caps rv2_res_cap = { + .num_timing_generator = 3, + .num_opp = 3, + .num_video_plane = 3, + .num_audio = 3, + .num_stream_encoder = 3, + .num_pll = 3, + .num_ddc = 4, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCN_UNIVERSAL, + .per_pixel_alpha = true, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = true, + .fp16 = true, + .p010 = true + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 16000, + .fp16 = 1 + }, + + .max_downscale_factor = { + .argb8888 = 250, + .nv12 = 250, + .fp16 = 1 + } +}; + +static const struct dc_debug_options debug_defaults_drv = { + .sanity_checks = true, + .disable_dmcu = false, + .force_abm_enable = false, + .timing_trace = false, + .clock_trace = true, + + /* raven smu dones't allow 0 disp clk, + * smu min disp clk limit is 50Mhz + * keep min disp clk 100Mhz avoid smu hang + */ + .min_disp_clk_khz = 100000, + + .disable_pplib_clock_request = false, + .disable_pplib_wm_range = false, + .pplib_wm_report_mode = WM_REPORT_DEFAULT, + .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .force_single_disp_pipe_split = true, + .disable_dcc = DCC_ENABLE, + .voltage_align_fclk = true, + .disable_stereo_support = true, + .vsr_support = true, + .performance_trace = false, + .az_endpoint_mute_only = true, + .recovery_enabled = false, /*enable this by default after testing.*/ + .max_downscale_src_width = 3840, + .underflow_assert_delay_us = 0xFFFFFFFF, + .enable_legacy_fast_update = true, + .using_dml2 = false, +}; + +static const struct dc_debug_options debug_defaults_diags = { + .disable_dmcu = false, + .force_abm_enable = false, + .timing_trace = true, + .clock_trace = true, + .disable_stutter = true, + .disable_pplib_clock_request = true, + .disable_pplib_wm_range = true, + .underflow_assert_delay_us = 0xFFFFFFFF, +}; + +static void dcn10_dpp_destroy(struct dpp **dpp) +{ + kfree(TO_DCN10_DPP(*dpp)); + *dpp = NULL; +} + +static struct dpp *dcn10_dpp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn10_dpp *dpp = + kzalloc(sizeof(struct dcn10_dpp), GFP_KERNEL); + + if (!dpp) + return NULL; + + dpp1_construct(dpp, ctx, inst, + &tf_regs[inst], &tf_shift, &tf_mask); + return &dpp->base; +} + +static struct input_pixel_processor *dcn10_ipp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn10_ipp *ipp = + kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL); + + if (!ipp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dcn10_ipp_construct(ipp, ctx, inst, + &ipp_regs[inst], &ipp_shift, &ipp_mask); + return &ipp->base; +} + + +static struct output_pixel_processor *dcn10_opp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn10_opp *opp = + kzalloc(sizeof(struct dcn10_opp), GFP_KERNEL); + + if (!opp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dcn10_opp_construct(opp, ctx, inst, + &opp_regs[inst], &opp_shift, &opp_mask); + return &opp->base; +} + +static struct dce_aux *dcn10_aux_engine_create(struct dc_context *ctx, + uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5), + i2c_inst_regs(6), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) +}; + +static struct dce_i2c_hw *dcn10_i2c_hw_create(struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dcn1_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} +static struct mpc *dcn10_mpc_create(struct dc_context *ctx) +{ + struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc), + GFP_KERNEL); + + if (!mpc10) + return NULL; + + dcn10_mpc_construct(mpc10, ctx, + &mpc_regs, + &mpc_shift, + &mpc_mask, + 4); + + return &mpc10->base; +} + +static struct hubbub *dcn10_hubbub_create(struct dc_context *ctx) +{ + struct dcn10_hubbub *dcn10_hubbub = kzalloc(sizeof(struct dcn10_hubbub), + GFP_KERNEL); + + if (!dcn10_hubbub) + return NULL; + + hubbub1_construct(&dcn10_hubbub->base, ctx, + &hubbub_reg, + &hubbub_shift, + &hubbub_mask); + + return &dcn10_hubbub->base; +} + +static struct timing_generator *dcn10_timing_generator_create( + struct dc_context *ctx, + uint32_t instance) +{ + struct optc *tgn10 = + kzalloc(sizeof(struct optc), GFP_KERNEL); + + if (!tgn10) + return NULL; + + tgn10->base.inst = instance; + tgn10->base.ctx = ctx; + + tgn10->tg_regs = &tg_regs[instance]; + tgn10->tg_shift = &tg_shift; + tgn10->tg_mask = &tg_mask; + + dcn10_timing_generator_init(tgn10); + + return &tgn10->base; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + +static struct link_encoder *dcn10_link_encoder_create( + struct dc_context *ctx, + const struct encoder_init_data *enc_init_data) +{ + struct dcn10_link_encoder *enc10 = + kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL); + int link_regs_id; + + if (!enc10) + return NULL; + + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + + dcn10_link_encoder_construct(enc10, + enc_init_data, + &link_enc_feature, + &link_enc_regs[link_regs_id], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], + &le_shift, + &le_mask); + + return &enc10->base; +} + +static struct panel_cntl *dcn10_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dce_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dce_panel_cntl_construct(panel_cntl, + init_data, + &panel_cntl_regs[init_data->inst], + &panel_cntl_shift, + &panel_cntl_mask); + + return &panel_cntl->base; +} + +static struct clock_source *dcn10_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dce112_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + kfree(clk_src); + BREAK_TO_DEBUGGER(); + return NULL; +} + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), + FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); +} + +static struct audio *create_audio( + struct dc_context *ctx, unsigned int inst) +{ + return dce_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + +static struct stream_encoder *dcn10_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn10_stream_encoder *enc1 = + kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); + + if (!enc1) + return NULL; + + dcn10_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, + &stream_enc_regs[eng_id], + &se_shift, &se_mask); + return &enc1->base; +} + +static const struct dce_hwseq_registers hwseq_reg = { + HWSEQ_DCN1_REG_LIST() +}; + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCN1_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCN1_MASK_SH_LIST(_MASK) +}; + +static struct dce_hwseq *dcn10_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + hws->wa.DEGVIDCN10_253 = true; + hws->wa.false_optc_underflow = true; + hws->wa.DEGVIDCN10_254 = true; + + if ((ctx->asic_id.chip_family == FAMILY_RV) && + ASICREV_IS_RAVEN2(ctx->asic_id.hw_internal_rev)) + switch (ctx->asic_id.pci_revision_id) { + case PRID_POLLOCK_94: + case PRID_POLLOCK_95: + case PRID_POLLOCK_E9: + case PRID_POLLOCK_EA: + case PRID_POLLOCK_EB: + hws->wa.wait_hubpret_read_start_during_mpo_transition = true; + break; + default: + hws->wa.wait_hubpret_read_start_during_mpo_transition = false; + break; + } + } + return hws; +} + +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = create_audio, + .create_stream_encoder = dcn10_stream_encoder_create, + .create_hwseq = dcn10_hwseq_create, +}; + +static void dcn10_clock_source_destroy(struct clock_source **clk_src) +{ + kfree(TO_DCE110_CLK_SRC(*clk_src)); + *clk_src = NULL; +} + +static struct pp_smu_funcs *dcn10_pp_smu_create(struct dc_context *ctx) +{ + struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); + + if (!pp_smu) + return pp_smu; + + dm_pp_get_funcs(ctx, pp_smu); + return pp_smu; +} + +static void dcn10_resource_destruct(struct dcn10_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) { + kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); + pool->base.stream_enc[i] = NULL; + } + } + + if (pool->base.mpc != NULL) { + kfree(TO_DCN10_MPC(pool->base.mpc)); + pool->base.mpc = NULL; + } + + kfree(pool->base.hubbub); + pool->base.hubbub = NULL; + + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.opps[i] != NULL) + pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); + + if (pool->base.dpps[i] != NULL) + dcn10_dpp_destroy(&pool->base.dpps[i]); + + if (pool->base.ipps[i] != NULL) + pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.hubps[i] != NULL) { + kfree(TO_DCN10_HUBP(pool->base.hubps[i])); + pool->base.hubps[i] = NULL; + } + + if (pool->base.irqs != NULL) { + dal_irq_service_destroy(&pool->base.irqs); + } + + if (pool->base.timing_generators[i] != NULL) { + kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + if (pool->base.engines[i] != NULL) + dce110_engine_destroy(&pool->base.engines[i]); + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } + + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i]) + dce_aud_destroy(&pool->base.audios[i]); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) { + dcn10_clock_source_destroy(&pool->base.clock_sources[i]); + pool->base.clock_sources[i] = NULL; + } + } + + if (pool->base.dp_clock_source != NULL) { + dcn10_clock_source_destroy(&pool->base.dp_clock_source); + pool->base.dp_clock_source = NULL; + } + + if (pool->base.abm != NULL) + dce_abm_destroy(&pool->base.abm); + + if (pool->base.dmcu != NULL) + dce_dmcu_destroy(&pool->base.dmcu); + + kfree(pool->base.pp_smu); +} + +static struct hubp *dcn10_hubp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn10_hubp *hubp1 = + kzalloc(sizeof(struct dcn10_hubp), GFP_KERNEL); + + if (!hubp1) + return NULL; + + dcn10_hubp_construct(hubp1, ctx, inst, + &hubp_regs[inst], &hubp_shift, &hubp_mask); + return &hubp1->base; +} + +static void get_pixel_clock_parameters( + const struct pipe_ctx *pipe_ctx, + struct pixel_clk_params *pixel_clk_params) +{ + const struct dc_stream_state *stream = pipe_ctx->stream; + pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; + pixel_clk_params->encoder_object_id = stream->link->link_enc->id; + pixel_clk_params->signal_type = pipe_ctx->stream->signal; + pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; + /* TODO: un-hardcode*/ + pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * + LINK_RATE_REF_FREQ_IN_KHZ; + pixel_clk_params->flags.ENABLE_SS = 0; + pixel_clk_params->color_depth = + stream->timing.display_color_depth; + pixel_clk_params->flags.DISPLAY_BLANKED = 1; + pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding; + + if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) + pixel_clk_params->color_depth = COLOR_DEPTH_888; + + if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) + pixel_clk_params->requested_pix_clk_100hz /= 2; + if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) + pixel_clk_params->requested_pix_clk_100hz *= 2; + +} + +static void build_clamping_params(struct dc_stream_state *stream) +{ + stream->clamping.clamping_level = CLAMPING_FULL_RANGE; + stream->clamping.c_depth = stream->timing.display_color_depth; + stream->clamping.pixel_encoding = stream->timing.pixel_encoding; +} + +static void build_pipe_hw_param(struct pipe_ctx *pipe_ctx) +{ + + get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); + + pipe_ctx->clock_source->funcs->get_pix_clk_dividers( + pipe_ctx->clock_source, + &pipe_ctx->stream_res.pix_clk_params, + &pipe_ctx->pll_settings); + + pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; + + resource_build_bit_depth_reduction_params(pipe_ctx->stream, + &pipe_ctx->stream->bit_depth_params); + build_clamping_params(pipe_ctx->stream); +} + +static enum dc_status build_mapped_resource( + const struct dc *dc, + struct dc_state *context, + struct dc_stream_state *stream) +{ + struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); + + if (!pipe_ctx) + return DC_ERROR_UNEXPECTED; + + build_pipe_hw_param(pipe_ctx); + return DC_OK; +} + +static enum dc_status dcn10_add_stream_to_ctx( + struct dc *dc, + struct dc_state *new_ctx, + struct dc_stream_state *dc_stream) +{ + enum dc_status result = DC_ERROR_UNEXPECTED; + + result = resource_map_pool_resources(dc, new_ctx, dc_stream); + + if (result == DC_OK) + result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream); + + + if (result == DC_OK) + result = build_mapped_resource(dc, new_ctx, dc_stream); + + return result; +} + +static struct pipe_ctx *dcn10_acquire_free_pipe_for_layer( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *opp_head_pipe) +{ + struct resource_context *res_ctx = &new_ctx->res_ctx; + struct pipe_ctx *head_pipe = resource_get_otg_master_for_stream(res_ctx, opp_head_pipe->stream); + struct pipe_ctx *idle_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, head_pipe); + + if (!head_pipe) { + ASSERT(0); + return NULL; + } + + if (!idle_pipe) + return NULL; + + idle_pipe->stream = head_pipe->stream; + idle_pipe->stream_res.tg = head_pipe->stream_res.tg; + idle_pipe->stream_res.abm = head_pipe->stream_res.abm; + idle_pipe->stream_res.opp = head_pipe->stream_res.opp; + + idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; + idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx]; + idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx]; + idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst; + + return idle_pipe; +} + +static bool dcn10_get_dcc_compression_cap(const struct dc *dc, + const struct dc_dcc_surface_param *input, + struct dc_surface_dcc_cap *output) +{ + return dc->res_pool->hubbub->funcs->get_dcc_compression_cap( + dc->res_pool->hubbub, + input, + output); +} + +static void dcn10_destroy_resource_pool(struct resource_pool **pool) +{ + struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool); + + dcn10_resource_destruct(dcn10_pool); + kfree(dcn10_pool); + *pool = NULL; +} + +static bool dcn10_validate_bandwidth( + struct dc *dc, + struct dc_state *context, + bool fast_validate) +{ + bool voltage_supported; + + DC_FP_START(); + voltage_supported = dcn_validate_bandwidth(dc, context, fast_validate); + DC_FP_END(); + + return voltage_supported; +} + +static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps) +{ + if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN + && caps->max_video_width != 0 + && plane_state->src_rect.width > caps->max_video_width) + return DC_FAIL_SURFACE_VALIDATE; + + return DC_OK; +} + +static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *context) +{ + int i, j; + bool video_down_scaled = false; + bool video_large = false; + bool desktop_large = false; + bool dcc_disabled = false; + bool mpo_enabled = false; + + for (i = 0; i < context->stream_count; i++) { + if (context->stream_status[i].plane_count == 0) + continue; + + if (context->stream_status[i].plane_count > 2) + return DC_FAIL_UNSUPPORTED_1; + + if (context->stream_status[i].plane_count > 1) + mpo_enabled = true; + + for (j = 0; j < context->stream_status[i].plane_count; j++) { + struct dc_plane_state *plane = + context->stream_status[i].plane_states[j]; + + + if (plane->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { + + if (plane->src_rect.width > plane->dst_rect.width || + plane->src_rect.height > plane->dst_rect.height) + video_down_scaled = true; + + if (plane->src_rect.width >= 3840) + video_large = true; + + } else { + if (plane->src_rect.width >= 3840) + desktop_large = true; + if (!plane->dcc.enable) + dcc_disabled = true; + } + } + } + + /* Disable MPO in multi-display configurations. */ + if (context->stream_count > 1 && mpo_enabled) + return DC_FAIL_UNSUPPORTED_1; + + /* + * Workaround: On DCN10 there is UMC issue that causes underflow when + * playing 4k video on 4k desktop with video downscaled and single channel + * memory + */ + if (video_large && desktop_large && video_down_scaled && dcc_disabled && + dc->dcn_soc->number_of_channels == 1) + return DC_FAIL_SURFACE_VALIDATE; + + return DC_OK; +} + +static enum dc_status dcn10_patch_unknown_plane_state(struct dc_plane_state *plane_state) +{ + enum surface_pixel_format surf_pix_format = plane_state->format; + unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format); + + enum swizzle_mode_values swizzle = DC_SW_LINEAR; + + if (bpp == 64) + swizzle = DC_SW_64KB_D; + else + swizzle = DC_SW_64KB_S; + + plane_state->tiling_info.gfx9.swizzle = swizzle; + return DC_OK; +} + +struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct dc_stream_state *stream) +{ + int i; + int j = -1; + struct dc_link *link = stream->link; + + for (i = 0; i < pool->stream_enc_count; i++) { + if (!res_ctx->is_stream_enc_acquired[i] && + pool->stream_enc[i]) { + /* Store first available for MST second display + * in daisy chain use case + */ + + if (pool->stream_enc[i]->id != ENGINE_ID_VIRTUAL) + j = i; + + if (link->ep_type == DISPLAY_ENDPOINT_PHY && pool->stream_enc[i]->id == + link->link_enc->preferred_engine) + return pool->stream_enc[i]; + } + } + + /* + * For CZ and later, we can allow DIG FE and BE to differ for all display types + */ + + if (j >= 0) + return pool->stream_enc[j]; + + return NULL; +} + +static const struct dc_cap_funcs cap_funcs = { + .get_dcc_compression_cap = dcn10_get_dcc_compression_cap +}; + +static const struct resource_funcs dcn10_res_pool_funcs = { + .destroy = dcn10_destroy_resource_pool, + .link_enc_create = dcn10_link_encoder_create, + .panel_cntl_create = dcn10_panel_cntl_create, + .validate_bandwidth = dcn10_validate_bandwidth, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn10_acquire_free_pipe_for_layer, + .validate_plane = dcn10_validate_plane, + .validate_global = dcn10_validate_global, + .add_stream_to_ctx = dcn10_add_stream_to_ctx, + .patch_unknown_plane_state = dcn10_patch_unknown_plane_state, + .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link +}; + +static uint32_t read_pipe_fuses(struct dc_context *ctx) +{ + uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0); + /* RV1 support max 4 pipes */ + value = value & 0xf; + return value; +} + +static bool verify_clock_values(struct dm_pp_clock_levels_with_voltage *clks) +{ + int i; + + if (clks->num_levels == 0) + return false; + + for (i = 0; i < clks->num_levels; i++) + /* Ensure that the result is sane */ + if (clks->data[i].clocks_in_khz == 0) + return false; + + return true; +} + +static bool dcn10_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dcn10_resource_pool *pool) +{ + int i; + int j; + struct dc_context *ctx = dc->ctx; + uint32_t pipe_fuses = read_pipe_fuses(ctx); + struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0}; + int min_fclk_khz, min_dcfclk_khz, socclk_khz; + bool res; + + ctx->dc_bios->regs = &bios_regs; + + if (ctx->dce_version == DCN_VERSION_1_01) + pool->base.res_cap = &rv2_res_cap; + else + pool->base.res_cap = &res_cap; + pool->base.funcs = &dcn10_res_pool_funcs; + + /* + * TODO fill in from actual raven resource when we create + * more than virtual encoder + */ + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + + /* max pipe num for ASIC before check pipe fuses */ + pool->base.pipe_count = pool->base.res_cap->num_timing_generator; + + if (dc->ctx->dce_version == DCN_VERSION_1_01) + pool->base.pipe_count = 3; + dc->caps.max_video_width = 3840; + dc->caps.max_downscale_ratio = 200; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a not applied by default*/ + dc->caps.max_cursor_size = 256; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.max_slave_planes = 1; + dc->caps.max_slave_yuv_planes = 1; + dc->caps.max_slave_rgb_planes = 0; + dc->caps.is_apu = true; + dc->caps.post_blend_color_processing = false; + dc->caps.extended_aux_timeout_support = false; + + /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */ + dc->caps.force_dp_tps4_for_cp2520 = true; + + /* Color pipeline capabilities */ + dc->caps.color.dpp.dcn_arch = 1; + dc->caps.color.dpp.input_lut_shared = 1; + dc->caps.color.dpp.icsc = 1; + dc->caps.color.dpp.dgam_ram = 1; + dc->caps.color.dpp.dgam_rom_caps.srgb = 1; + dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; + dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.dgam_rom_caps.pq = 0; + dc->caps.color.dpp.dgam_rom_caps.hlg = 0; + dc->caps.color.dpp.post_csc = 0; + dc->caps.color.dpp.gamma_corr = 0; + dc->caps.color.dpp.dgam_rom_for_yuv = 1; + + dc->caps.color.dpp.hw_3d_lut = 0; + dc->caps.color.dpp.ogam_ram = 1; // RGAM on DCN1 + dc->caps.color.dpp.ogam_rom_caps.srgb = 1; + dc->caps.color.dpp.ogam_rom_caps.bt2020 = 1; + dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.ogam_rom_caps.pq = 0; + dc->caps.color.dpp.ogam_rom_caps.hlg = 0; + dc->caps.color.dpp.ocsc = 1; + + /* no post-blend color operations */ + dc->caps.color.mpc.gamut_remap = 0; + dc->caps.color.mpc.num_3dluts = 0; + dc->caps.color.mpc.shared_3d_lut = 0; + dc->caps.color.mpc.ogam_ram = 0; + dc->caps.color.mpc.ogam_rom_caps.srgb = 0; + dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; + dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.mpc.ogam_rom_caps.pq = 0; + dc->caps.color.mpc.ogam_rom_caps.hlg = 0; + dc->caps.color.mpc.ocsc = 0; + + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) + dc->debug = debug_defaults_drv; + else + dc->debug = debug_defaults_diags; + + /************************************************* + * Create resources * + *************************************************/ + + pool->base.clock_sources[DCN10_CLK_SRC_PLL0] = + dcn10_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->base.clock_sources[DCN10_CLK_SRC_PLL1] = + dcn10_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + pool->base.clock_sources[DCN10_CLK_SRC_PLL2] = + dcn10_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs[2], false); + + if (dc->ctx->dce_version == DCN_VERSION_1_0) { + pool->base.clock_sources[DCN10_CLK_SRC_PLL3] = + dcn10_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + } + + pool->base.clk_src_count = DCN10_CLK_SRC_TOTAL; + + if (dc->ctx->dce_version == DCN_VERSION_1_01) + pool->base.clk_src_count = DCN101_CLK_SRC_TOTAL; + + pool->base.dp_clock_source = + dcn10_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, + /* todo: not reuse phy_pll registers */ + &clk_src_regs[0], true); + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto fail; + } + } + + pool->base.dmcu = dcn10_dmcu_create(ctx, + &dmcu_regs, + &dmcu_shift, + &dmcu_mask); + if (pool->base.dmcu == NULL) { + dm_error("DC: failed to create dmcu!\n"); + BREAK_TO_DEBUGGER(); + goto fail; + } + + pool->base.abm = dce_abm_create(ctx, + &abm_regs, + &abm_shift, + &abm_mask); + if (pool->base.abm == NULL) { + dm_error("DC: failed to create abm!\n"); + BREAK_TO_DEBUGGER(); + goto fail; + } + + dml_init_instance(&dc->dml, &dcn1_0_soc, &dcn1_0_ip, DML_PROJECT_RAVEN1); + memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults)); + memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults)); + + DC_FP_START(); + dcn10_resource_construct_fp(dc); + DC_FP_END(); + + if (!dc->config.is_vmin_only_asic) + if (ASICREV_IS_RAVEN2(dc->ctx->asic_id.hw_internal_rev)) + switch (dc->ctx->asic_id.pci_revision_id) { + case PRID_DALI_DE: + case PRID_DALI_DF: + case PRID_DALI_E3: + case PRID_DALI_E4: + case PRID_POLLOCK_94: + case PRID_POLLOCK_95: + case PRID_POLLOCK_E9: + case PRID_POLLOCK_EA: + case PRID_POLLOCK_EB: + dc->config.is_vmin_only_asic = true; + break; + default: + break; + } + + pool->base.pp_smu = dcn10_pp_smu_create(ctx); + + /* + * Right now SMU/PPLIB and DAL all have the AZ D3 force PME notification * + * implemented. So AZ D3 should work.For issue 197007. * + */ + if (pool->base.pp_smu != NULL + && pool->base.pp_smu->rv_funcs.set_pme_wa_enable != NULL) + dc->debug.az_endpoint_mute_only = false; + + + if (!dc->debug.disable_pplib_clock_request) { + /* + * TODO: This is not the proper way to obtain + * fabric_and_dram_bandwidth, should be min(fclk, memclk). + */ + res = dm_pp_get_clock_levels_by_type_with_voltage( + ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks); + + DC_FP_START(); + + if (res) + res = verify_clock_values(&fclks); + + if (res) + dcn_bw_update_from_pplib_fclks(dc, &fclks); + else + BREAK_TO_DEBUGGER(); + + DC_FP_END(); + + res = dm_pp_get_clock_levels_by_type_with_voltage( + ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks); + + DC_FP_START(); + + if (res) + res = verify_clock_values(&dcfclks); + + if (res) + dcn_bw_update_from_pplib_dcfclks(dc, &dcfclks); + else + BREAK_TO_DEBUGGER(); + + DC_FP_END(); + } + + dcn_bw_sync_calcs_and_dml(dc); + if (!dc->debug.disable_pplib_wm_range) { + dc->res_pool = &pool->base; + DC_FP_START(); + dcn_get_soc_clks( + dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz); + DC_FP_END(); + dcn_bw_notify_pplib_of_wm_ranges( + dc, min_fclk_khz, min_dcfclk_khz, socclk_khz); + } + + { + struct irq_service_init_data init_data; + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dcn10_create(&init_data); + if (!pool->base.irqs) + goto fail; + } + + /* index to valid pipe resource */ + j = 0; + /* mem input -> ipp -> dpp -> opp -> TG */ + for (i = 0; i < pool->base.pipe_count; i++) { + /* if pipe is disabled, skip instance of HW pipe, + * i.e, skip ASIC register instance + */ + if ((pipe_fuses & (1 << i)) != 0) + continue; + + pool->base.hubps[j] = dcn10_hubp_create(ctx, i); + if (pool->base.hubps[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create memory input!\n"); + goto fail; + } + + pool->base.ipps[j] = dcn10_ipp_create(ctx, i); + if (pool->base.ipps[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create input pixel processor!\n"); + goto fail; + } + + pool->base.dpps[j] = dcn10_dpp_create(ctx, i); + if (pool->base.dpps[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create dpp!\n"); + goto fail; + } + + pool->base.opps[j] = dcn10_opp_create(ctx, i); + if (pool->base.opps[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + goto fail; + } + + pool->base.timing_generators[j] = dcn10_timing_generator_create( + ctx, i); + if (pool->base.timing_generators[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto fail; + } + /* check next valid pipe */ + j++; + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dcn10_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto fail; + } + pool->base.hw_i2cs[i] = dcn10_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create hw i2c!!\n"); + goto fail; + } + pool->base.sw_i2cs[i] = NULL; + } + + /* valid pipe num */ + pool->base.pipe_count = j; + pool->base.timing_generator_count = j; + + /* within dml lib, it is hard code to 4. If ASIC pipe is fused, + * the value may be changed + */ + dc->dml.ip.max_num_dpp = pool->base.pipe_count; + dc->dcn_ip->max_num_dpp = pool->base.pipe_count; + + pool->base.mpc = dcn10_mpc_create(ctx); + if (pool->base.mpc == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mpc!\n"); + goto fail; + } + + pool->base.hubbub = dcn10_hubbub_create(ctx); + if (pool->base.hubbub == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create hubbub!\n"); + goto fail; + } + + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto fail; + + dcn10_hw_sequencer_construct(dc); + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->cap_funcs = cap_funcs; + + return true; + +fail: + + dcn10_resource_destruct(pool); + + return false; +} + +struct resource_pool *dcn10_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc) +{ + struct dcn10_resource_pool *pool = + kzalloc(sizeof(struct dcn10_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dcn10_resource_construct(init_data->num_virtual_links, dc, pool)) + return &pool->base; + + kfree(pool); + BREAK_TO_DEBUGGER(); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h new file mode 100644 index 0000000000..bf8e33cd81 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h @@ -0,0 +1,56 @@ +/* +* Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_RESOURCE_DCN10_H__ +#define __DC_RESOURCE_DCN10_H__ + +#include "core_types.h" +#include "dml/dcn10/dcn10_fpu.h" + +#define TO_DCN10_RES_POOL(pool)\ + container_of(pool, struct dcn10_resource_pool, base) + +struct dc; +struct resource_pool; +struct _vcs_dpi_display_pipe_params_st; + +extern struct _vcs_dpi_ip_params_st dcn1_0_ip; +extern struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc; + +struct dcn10_resource_pool { + struct resource_pool base; +}; +struct resource_pool *dcn10_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc); + +struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct dc_stream_state *stream); + + +#endif /* __DC_RESOURCE_DCN10_H__ */ + diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c new file mode 100644 index 0000000000..f9c5bc624b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -0,0 +1,2793 @@ +/* +* Copyright 2016 Advanced Micro Devices, Inc. + * Copyright 2019 Raptor Engineering, LLC + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include + +#include "dm_services.h" +#include "dc.h" + +#include "dcn20/dcn20_init.h" + +#include "resource.h" +#include "include/irq_service_interface.h" +#include "dcn20/dcn20_resource.h" + +#include "dml/dcn20/dcn20_fpu.h" + +#include "dcn10/dcn10_hubp.h" +#include "dcn10/dcn10_ipp.h" +#include "dcn20/dcn20_hubbub.h" +#include "dcn20/dcn20_mpc.h" +#include "dcn20/dcn20_hubp.h" +#include "irq/dcn20/irq_service_dcn20.h" +#include "dcn20/dcn20_dpp.h" +#include "dcn20/dcn20_optc.h" +#include "dcn20/dcn20_hwseq.h" +#include "dce110/dce110_hwseq.h" +#include "dcn10/dcn10_resource.h" +#include "dcn20/dcn20_opp.h" + +#include "dcn20/dcn20_dsc.h" + +#include "dcn20/dcn20_link_encoder.h" +#include "dcn20/dcn20_stream_encoder.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_audio.h" +#include "dce/dce_hwseq.h" +#include "virtual/virtual_stream_encoder.h" +#include "dce110/dce110_resource.h" +#include "dml/display_mode_vba.h" +#include "dcn20/dcn20_dccg.h" +#include "dcn20/dcn20_vmid.h" +#include "dce/dce_panel_cntl.h" + +#include "navi10_ip_offset.h" + +#include "dcn/dcn_2_0_0_offset.h" +#include "dcn/dcn_2_0_0_sh_mask.h" +#include "dpcs/dpcs_2_0_0_offset.h" +#include "dpcs/dpcs_2_0_0_sh_mask.h" + +#include "nbio/nbio_2_3_offset.h" + +#include "dcn20/dcn20_dwb.h" +#include "dcn20/dcn20_mmhubbub.h" + +#include "mmhub/mmhub_2_0_0_offset.h" +#include "mmhub/mmhub_2_0_0_sh_mask.h" + +#include "reg_helper.h" +#include "dce/dce_abm.h" +#include "dce/dce_dmcu.h" +#include "dce/dce_aux.h" +#include "dce/dce_i2c.h" +#include "vm_helper.h" +#include "link_enc_cfg.h" + +#include "amdgpu_socbb.h" + +#include "link.h" +#define DC_LOGGER_INIT(logger) + +#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL + #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f + #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 + #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f + #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 + #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f + #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 + #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f + #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 + #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f + #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 + #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f + #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 + #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f + #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 +#endif + + +enum dcn20_clk_src_array_id { + DCN20_CLK_SRC_PLL0, + DCN20_CLK_SRC_PLL1, + DCN20_CLK_SRC_PLL2, + DCN20_CLK_SRC_PLL3, + DCN20_CLK_SRC_PLL4, + DCN20_CLK_SRC_PLL5, + DCN20_CLK_SRC_TOTAL +}; + +/* begin ********************* + * macros to expend register list macro defined in HW object header file */ + +/* DCN */ +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +#define SRI(reg_name, block, id)\ + .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define SRI2_DWB(reg_name, block, id)\ + .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name +#define SF_DWB(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define SRIR(var_name, reg_name, block, id)\ + .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define DCCG_SRII(reg_name, block, id)\ + .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define VUPDATE_SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + mm ## reg_name ## _ ## block ## id + +/* NBIO */ +#define NBIO_BASE_INNER(seg) \ + NBIO_BASE__INST0_SEG ## seg + +#define NBIO_BASE(seg) \ + NBIO_BASE_INNER(seg) + +#define NBIO_SR(reg_name)\ + .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +/* MMHUB */ +#define MMHUB_BASE_INNER(seg) \ + MMHUB_BASE__INST0_SEG ## seg + +#define MMHUB_BASE(seg) \ + MMHUB_BASE_INNER(seg) + +#define MMHUB_SR(reg_name)\ + .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \ + mmMM ## reg_name + +static const struct bios_registers bios_regs = { + NBIO_SR(BIOS_SCRATCH_3), + NBIO_SR(BIOS_SCRATCH_6) +}; + +#define clk_src_regs(index, pllid)\ +[index] = {\ + CS_COMMON_REG_LIST_DCN2_0(index, pllid),\ +} + +static const struct dce110_clk_src_regs clk_src_regs[] = { + clk_src_regs(0, A), + clk_src_regs(1, B), + clk_src_regs(2, C), + clk_src_regs(3, D), + clk_src_regs(4, E), + clk_src_regs(5, F) +}; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) +}; + +static const struct dce_dmcu_registers dmcu_regs = { + DMCU_DCN10_REG_LIST() +}; + +static const struct dce_dmcu_shift dmcu_shift = { + DMCU_MASK_SH_LIST_DCN10(__SHIFT) +}; + +static const struct dce_dmcu_mask dmcu_mask = { + DMCU_MASK_SH_LIST_DCN10(_MASK) +}; + +static const struct dce_abm_registers abm_regs = { + ABM_DCN20_REG_LIST() +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCN20(_MASK) +}; + +#define audio_regs(id)\ +[id] = {\ + AUD_COMMON_REG_LIST(id)\ +} + +static const struct dce_audio_registers audio_regs[] = { + audio_regs(0), + audio_regs(1), + audio_regs(2), + audio_regs(3), + audio_regs(4), + audio_regs(5), + audio_regs(6), +}; + +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ + AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) + +static const struct dce_audio_shift audio_shift = { + DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +#define stream_enc_regs(id)\ +[id] = {\ + SE_DCN2_REG_LIST(id)\ +} + +static const struct dcn10_stream_enc_registers stream_enc_regs[] = { + stream_enc_regs(0), + stream_enc_regs(1), + stream_enc_regs(2), + stream_enc_regs(3), + stream_enc_regs(4), + stream_enc_regs(5), +}; + +static const struct dcn10_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn10_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCN20(_MASK) +}; + + +#define aux_regs(id)\ +[id] = {\ + DCN2_AUX_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), + aux_regs(2), + aux_regs(3), + aux_regs(4), + aux_regs(5) +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), + hpd_regs(4), + hpd_regs(5) +}; + +#define link_regs(id, phyid)\ +[id] = {\ + LE_DCN10_REG_LIST(id), \ + UNIPHY_DCN2_REG_LIST(phyid), \ + DPCS_DCN2_REG_LIST(id), \ + SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ +} + +static const struct dcn10_link_enc_registers link_enc_regs[] = { + link_regs(0, A), + link_regs(1, B), + link_regs(2, C), + link_regs(3, D), + link_regs(4, E), + link_regs(5, F) +}; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\ + DPCS_DCN2_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\ + DPCS_DCN2_MASK_SH_LIST(_MASK) +}; + +static const struct dce_panel_cntl_registers panel_cntl_regs[] = { + { DCN_PANEL_CNTL_REG_LIST() } +}; + +static const struct dce_panel_cntl_shift panel_cntl_shift = { + DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_panel_cntl_mask panel_cntl_mask = { + DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) +}; + +#define ipp_regs(id)\ +[id] = {\ + IPP_REG_LIST_DCN20(id),\ +} + +static const struct dcn10_ipp_registers ipp_regs[] = { + ipp_regs(0), + ipp_regs(1), + ipp_regs(2), + ipp_regs(3), + ipp_regs(4), + ipp_regs(5), +}; + +static const struct dcn10_ipp_shift ipp_shift = { + IPP_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn10_ipp_mask ipp_mask = { + IPP_MASK_SH_LIST_DCN20(_MASK), +}; + +#define opp_regs(id)\ +[id] = {\ + OPP_REG_LIST_DCN20(id),\ +} + +static const struct dcn20_opp_registers opp_regs[] = { + opp_regs(0), + opp_regs(1), + opp_regs(2), + opp_regs(3), + opp_regs(4), + opp_regs(5), +}; + +static const struct dcn20_opp_shift opp_shift = { + OPP_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn20_opp_mask opp_mask = { + OPP_MASK_SH_LIST_DCN20(_MASK) +}; + +#define aux_engine_regs(id)\ +[id] = {\ + AUX_COMMON_REG_LIST0(id), \ + .AUXN_IMPCAL = 0, \ + .AUXP_IMPCAL = 0, \ + .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ +} + +static const struct dce110_aux_registers aux_engine_regs[] = { + aux_engine_regs(0), + aux_engine_regs(1), + aux_engine_regs(2), + aux_engine_regs(3), + aux_engine_regs(4), + aux_engine_regs(5) +}; + +#define tf_regs(id)\ +[id] = {\ + TF_REG_LIST_DCN20(id),\ + TF_REG_LIST_DCN20_COMMON_APPEND(id),\ +} + +static const struct dcn2_dpp_registers tf_regs[] = { + tf_regs(0), + tf_regs(1), + tf_regs(2), + tf_regs(3), + tf_regs(4), + tf_regs(5), +}; + +static const struct dcn2_dpp_shift tf_shift = { + TF_REG_LIST_SH_MASK_DCN20(__SHIFT), + TF_DEBUG_REG_LIST_SH_DCN20 +}; + +static const struct dcn2_dpp_mask tf_mask = { + TF_REG_LIST_SH_MASK_DCN20(_MASK), + TF_DEBUG_REG_LIST_MASK_DCN20 +}; + +#define dwbc_regs_dcn2(id)\ +[id] = {\ + DWBC_COMMON_REG_LIST_DCN2_0(id),\ + } + +static const struct dcn20_dwbc_registers dwbc20_regs[] = { + dwbc_regs_dcn2(0), +}; + +static const struct dcn20_dwbc_shift dwbc20_shift = { + DWBC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) +}; + +static const struct dcn20_dwbc_mask dwbc20_mask = { + DWBC_COMMON_MASK_SH_LIST_DCN2_0(_MASK) +}; + +#define mcif_wb_regs_dcn2(id)\ +[id] = {\ + MCIF_WB_COMMON_REG_LIST_DCN2_0(id),\ + } + +static const struct dcn20_mmhubbub_registers mcif_wb20_regs[] = { + mcif_wb_regs_dcn2(0), +}; + +static const struct dcn20_mmhubbub_shift mcif_wb20_shift = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) +}; + +static const struct dcn20_mmhubbub_mask mcif_wb20_mask = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(_MASK) +}; + +static const struct dcn20_mpc_registers mpc_regs = { + MPC_REG_LIST_DCN2_0(0), + MPC_REG_LIST_DCN2_0(1), + MPC_REG_LIST_DCN2_0(2), + MPC_REG_LIST_DCN2_0(3), + MPC_REG_LIST_DCN2_0(4), + MPC_REG_LIST_DCN2_0(5), + MPC_OUT_MUX_REG_LIST_DCN2_0(0), + MPC_OUT_MUX_REG_LIST_DCN2_0(1), + MPC_OUT_MUX_REG_LIST_DCN2_0(2), + MPC_OUT_MUX_REG_LIST_DCN2_0(3), + MPC_OUT_MUX_REG_LIST_DCN2_0(4), + MPC_OUT_MUX_REG_LIST_DCN2_0(5), + MPC_DBG_REG_LIST_DCN2_0() +}; + +static const struct dcn20_mpc_shift mpc_shift = { + MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT), + MPC_DEBUG_REG_LIST_SH_DCN20 +}; + +static const struct dcn20_mpc_mask mpc_mask = { + MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK), + MPC_DEBUG_REG_LIST_MASK_DCN20 +}; + +#define tg_regs(id)\ +[id] = {TG_COMMON_REG_LIST_DCN2_0(id)} + + +static const struct dcn_optc_registers tg_regs[] = { + tg_regs(0), + tg_regs(1), + tg_regs(2), + tg_regs(3), + tg_regs(4), + tg_regs(5) +}; + +static const struct dcn_optc_shift tg_shift = { + TG_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) +}; + +static const struct dcn_optc_mask tg_mask = { + TG_COMMON_MASK_SH_LIST_DCN2_0(_MASK) +}; + +#define hubp_regs(id)\ +[id] = {\ + HUBP_REG_LIST_DCN20(id)\ +} + +static const struct dcn_hubp2_registers hubp_regs[] = { + hubp_regs(0), + hubp_regs(1), + hubp_regs(2), + hubp_regs(3), + hubp_regs(4), + hubp_regs(5) +}; + +static const struct dcn_hubp2_shift hubp_shift = { + HUBP_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn_hubp2_mask hubp_mask = { + HUBP_MASK_SH_LIST_DCN20(_MASK) +}; + +static const struct dcn_hubbub_registers hubbub_reg = { + HUBBUB_REG_LIST_DCN20(0) +}; + +static const struct dcn_hubbub_shift hubbub_shift = { + HUBBUB_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn_hubbub_mask hubbub_mask = { + HUBBUB_MASK_SH_LIST_DCN20(_MASK) +}; + +#define vmid_regs(id)\ +[id] = {\ + DCN20_VMID_REG_LIST(id)\ +} + +static const struct dcn_vmid_registers vmid_regs[] = { + vmid_regs(0), + vmid_regs(1), + vmid_regs(2), + vmid_regs(3), + vmid_regs(4), + vmid_regs(5), + vmid_regs(6), + vmid_regs(7), + vmid_regs(8), + vmid_regs(9), + vmid_regs(10), + vmid_regs(11), + vmid_regs(12), + vmid_regs(13), + vmid_regs(14), + vmid_regs(15) +}; + +static const struct dcn20_vmid_shift vmid_shifts = { + DCN20_VMID_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn20_vmid_mask vmid_masks = { + DCN20_VMID_MASK_SH_LIST(_MASK) +}; + +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + default: + ASSERT(0); + return 0; + } +} + +#define dsc_regsDCN20(id)\ +[id] = {\ + DSC_REG_LIST_DCN20(id)\ +} + +static const struct dcn20_dsc_registers dsc_regs[] = { + dsc_regsDCN20(0), + dsc_regsDCN20(1), + dsc_regsDCN20(2), + dsc_regsDCN20(3), + dsc_regsDCN20(4), + dsc_regsDCN20(5) +}; + +static const struct dcn20_dsc_shift dsc_shift = { + DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) +}; + +static const struct dcn20_dsc_mask dsc_mask = { + DSC_REG_LIST_SH_MASK_DCN20(_MASK) +}; + +static const struct dccg_registers dccg_regs = { + DCCG_REG_LIST_DCN2() +}; + +static const struct dccg_shift dccg_shift = { + DCCG_MASK_SH_LIST_DCN2(__SHIFT) +}; + +static const struct dccg_mask dccg_mask = { + DCCG_MASK_SH_LIST_DCN2(_MASK) +}; + +static const struct resource_caps res_cap_nv10 = { + .num_timing_generator = 6, + .num_opp = 6, + .num_video_plane = 6, + .num_audio = 7, + .num_stream_encoder = 6, + .num_pll = 6, + .num_dwb = 1, + .num_ddc = 6, + .num_vmid = 16, + .num_dsc = 6, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCN_UNIVERSAL, + .per_pixel_alpha = true, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = true, + .fp16 = true, + .p010 = true + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 16000, + .fp16 = 1 + }, + + .max_downscale_factor = { + .argb8888 = 250, + .nv12 = 250, + .fp16 = 1 + }, + 16, + 16 +}; +static const struct resource_caps res_cap_nv14 = { + .num_timing_generator = 5, + .num_opp = 5, + .num_video_plane = 5, + .num_audio = 6, + .num_stream_encoder = 5, + .num_pll = 5, + .num_dwb = 1, + .num_ddc = 5, + .num_vmid = 16, + .num_dsc = 5, +}; + +static const struct dc_debug_options debug_defaults_drv = { + .disable_dmcu = false, + .force_abm_enable = false, + .timing_trace = false, + .clock_trace = true, + .disable_pplib_clock_request = true, + .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .vsr_support = true, + .performance_trace = false, + .max_downscale_src_width = 5120,/*upto 5K*/ + .disable_pplib_wm_range = false, + .scl_reset_length10 = true, + .sanity_checks = false, + .underflow_assert_delay_us = 0xFFFFFFFF, + .enable_legacy_fast_update = true, + .using_dml2 = false, +}; + +void dcn20_dpp_destroy(struct dpp **dpp) +{ + kfree(TO_DCN20_DPP(*dpp)); + *dpp = NULL; +} + +struct dpp *dcn20_dpp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn20_dpp *dpp = + kzalloc(sizeof(struct dcn20_dpp), GFP_ATOMIC); + + if (!dpp) + return NULL; + + if (dpp2_construct(dpp, ctx, inst, + &tf_regs[inst], &tf_shift, &tf_mask)) + return &dpp->base; + + BREAK_TO_DEBUGGER(); + kfree(dpp); + return NULL; +} + +struct input_pixel_processor *dcn20_ipp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn10_ipp *ipp = + kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC); + + if (!ipp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dcn20_ipp_construct(ipp, ctx, inst, + &ipp_regs[inst], &ipp_shift, &ipp_mask); + return &ipp->base; +} + + +struct output_pixel_processor *dcn20_opp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_opp *opp = + kzalloc(sizeof(struct dcn20_opp), GFP_ATOMIC); + + if (!opp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dcn20_opp_construct(opp, ctx, inst, + &opp_regs[inst], &opp_shift, &opp_mask); + return &opp->base; +} + +struct dce_aux *dcn20_aux_engine_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC); + + if (!aux_engine) + return NULL; + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5), + i2c_inst_regs(6), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) +}; + +struct dce_i2c_hw *dcn20_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC); + + if (!dce_i2c_hw) + return NULL; + + dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} +struct mpc *dcn20_mpc_create(struct dc_context *ctx) +{ + struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc), + GFP_ATOMIC); + + if (!mpc20) + return NULL; + + dcn20_mpc_construct(mpc20, ctx, + &mpc_regs, + &mpc_shift, + &mpc_mask, + 6); + + return &mpc20->base; +} + +struct hubbub *dcn20_hubbub_create(struct dc_context *ctx) +{ + int i; + struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), + GFP_ATOMIC); + + if (!hubbub) + return NULL; + + hubbub2_construct(hubbub, ctx, + &hubbub_reg, + &hubbub_shift, + &hubbub_mask); + + for (i = 0; i < res_cap_nv10.num_vmid; i++) { + struct dcn20_vmid *vmid = &hubbub->vmid[i]; + + vmid->ctx = ctx; + + vmid->regs = &vmid_regs[i]; + vmid->shifts = &vmid_shifts; + vmid->masks = &vmid_masks; + } + + return &hubbub->base; +} + +struct timing_generator *dcn20_timing_generator_create( + struct dc_context *ctx, + uint32_t instance) +{ + struct optc *tgn10 = + kzalloc(sizeof(struct optc), GFP_ATOMIC); + + if (!tgn10) + return NULL; + + tgn10->base.inst = instance; + tgn10->base.ctx = ctx; + + tgn10->tg_regs = &tg_regs[instance]; + tgn10->tg_shift = &tg_shift; + tgn10->tg_mask = &tg_mask; + + dcn20_timing_generator_init(tgn10); + + return &tgn10->base; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .fec_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + +struct link_encoder *dcn20_link_encoder_create( + struct dc_context *ctx, + const struct encoder_init_data *enc_init_data) +{ + struct dcn20_link_encoder *enc20 = + kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + int link_regs_id; + + if (!enc20) + return NULL; + + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + + dcn20_link_encoder_construct(enc20, + enc_init_data, + &link_enc_feature, + &link_enc_regs[link_regs_id], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], + &le_shift, + &le_mask); + + return &enc20->enc10.base; +} + +static struct panel_cntl *dcn20_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dce_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dce_panel_cntl_construct(panel_cntl, + init_data, + &panel_cntl_regs[init_data->inst], + &panel_cntl_shift, + &panel_cntl_mask); + + return &panel_cntl->base; +} + +static struct clock_source *dcn20_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC); + + if (!clk_src) + return NULL; + + if (dcn20_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + kfree(clk_src); + BREAK_TO_DEBUGGER(); + return NULL; +} + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), + FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); +} + +static struct audio *dcn20_create_audio( + struct dc_context *ctx, unsigned int inst) +{ + return dce_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + +struct stream_encoder *dcn20_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn10_stream_encoder *enc1 = + kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); + + if (!enc1) + return NULL; + + if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) { + if (eng_id >= ENGINE_ID_DIGD) + eng_id++; + } + + dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, + &stream_enc_regs[eng_id], + &se_shift, &se_mask); + + return &enc1->base; +} + +static const struct dce_hwseq_registers hwseq_reg = { + HWSEQ_DCN2_REG_LIST() +}; + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCN2_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCN2_MASK_SH_LIST(_MASK) +}; + +struct dce_hwseq *dcn20_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + } + return hws; +} + +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = dcn20_create_audio, + .create_stream_encoder = dcn20_stream_encoder_create, + .create_hwseq = dcn20_hwseq_create, +}; + +static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu); + +void dcn20_clock_source_destroy(struct clock_source **clk_src) +{ + kfree(TO_DCE110_CLK_SRC(*clk_src)); + *clk_src = NULL; +} + + +struct display_stream_compressor *dcn20_dsc_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_dsc *dsc = + kzalloc(sizeof(struct dcn20_dsc), GFP_ATOMIC); + + if (!dsc) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); + return &dsc->base; +} + +void dcn20_dsc_destroy(struct display_stream_compressor **dsc) +{ + kfree(container_of(*dsc, struct dcn20_dsc, base)); + *dsc = NULL; +} + + +static void dcn20_resource_destruct(struct dcn20_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) { + kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); + pool->base.stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + if (pool->base.dscs[i] != NULL) + dcn20_dsc_destroy(&pool->base.dscs[i]); + } + + if (pool->base.mpc != NULL) { + kfree(TO_DCN20_MPC(pool->base.mpc)); + pool->base.mpc = NULL; + } + if (pool->base.hubbub != NULL) { + kfree(pool->base.hubbub); + pool->base.hubbub = NULL; + } + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.dpps[i] != NULL) + dcn20_dpp_destroy(&pool->base.dpps[i]); + + if (pool->base.ipps[i] != NULL) + pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.hubps[i] != NULL) { + kfree(TO_DCN20_HUBP(pool->base.hubps[i])); + pool->base.hubps[i] = NULL; + } + + if (pool->base.irqs != NULL) { + dal_irq_service_destroy(&pool->base.irqs); + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + if (pool->base.engines[i] != NULL) + dce110_engine_destroy(&pool->base.engines[i]); + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + if (pool->base.opps[i] != NULL) + pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.timing_generators[i] != NULL) { + kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dwb; i++) { + if (pool->base.dwbc[i] != NULL) { + kfree(TO_DCN20_DWBC(pool->base.dwbc[i])); + pool->base.dwbc[i] = NULL; + } + if (pool->base.mcif_wb[i] != NULL) { + kfree(TO_DCN20_MMHUBBUB(pool->base.mcif_wb[i])); + pool->base.mcif_wb[i] = NULL; + } + } + + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i]) + dce_aud_destroy(&pool->base.audios[i]); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) { + dcn20_clock_source_destroy(&pool->base.clock_sources[i]); + pool->base.clock_sources[i] = NULL; + } + } + + if (pool->base.dp_clock_source != NULL) { + dcn20_clock_source_destroy(&pool->base.dp_clock_source); + pool->base.dp_clock_source = NULL; + } + + + if (pool->base.abm != NULL) + dce_abm_destroy(&pool->base.abm); + + if (pool->base.dmcu != NULL) + dce_dmcu_destroy(&pool->base.dmcu); + + if (pool->base.dccg != NULL) + dcn_dccg_destroy(&pool->base.dccg); + + if (pool->base.pp_smu != NULL) + dcn20_pp_smu_destroy(&pool->base.pp_smu); + + if (pool->base.oem_device != NULL) { + struct dc *dc = pool->base.oem_device->ctx->dc; + + dc->link_srv->destroy_ddc_service(&pool->base.oem_device); + } +} + +struct hubp *dcn20_hubp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn20_hubp *hubp2 = + kzalloc(sizeof(struct dcn20_hubp), GFP_ATOMIC); + + if (!hubp2) + return NULL; + + if (hubp2_construct(hubp2, ctx, inst, + &hubp_regs[inst], &hubp_shift, &hubp_mask)) + return &hubp2->base; + + BREAK_TO_DEBUGGER(); + kfree(hubp2); + return NULL; +} + +static void get_pixel_clock_parameters( + struct pipe_ctx *pipe_ctx, + struct pixel_clk_params *pixel_clk_params) +{ + const struct dc_stream_state *stream = pipe_ctx->stream; + struct pipe_ctx *odm_pipe; + int opp_cnt = 1; + struct dc_link *link = stream->link; + struct link_encoder *link_enc = NULL; + struct dc *dc = pipe_ctx->stream->ctx->dc; + struct dce_hwseq *hws = dc->hwseq; + + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + opp_cnt++; + + pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; + + link_enc = link_enc_cfg_get_link_enc(link); + if (link_enc) + pixel_clk_params->encoder_object_id = link_enc->id; + + pixel_clk_params->signal_type = pipe_ctx->stream->signal; + pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; + /* TODO: un-hardcode*/ + /* TODO - DP2.0 HW: calculate requested_sym_clk for UHBR rates */ + pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * + LINK_RATE_REF_FREQ_IN_KHZ; + pixel_clk_params->flags.ENABLE_SS = 0; + pixel_clk_params->color_depth = + stream->timing.display_color_depth; + pixel_clk_params->flags.DISPLAY_BLANKED = 1; + pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding; + + if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) + pixel_clk_params->color_depth = COLOR_DEPTH_888; + + if (opp_cnt == 4) + pixel_clk_params->requested_pix_clk_100hz /= 4; + else if (optc2_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2) + pixel_clk_params->requested_pix_clk_100hz /= 2; + else if (hws->funcs.is_dp_dig_pixel_rate_div_policy) { + if (hws->funcs.is_dp_dig_pixel_rate_div_policy(pipe_ctx)) + pixel_clk_params->requested_pix_clk_100hz /= 2; + } + + if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) + pixel_clk_params->requested_pix_clk_100hz *= 2; + +} + +static void build_clamping_params(struct dc_stream_state *stream) +{ + stream->clamping.clamping_level = CLAMPING_FULL_RANGE; + stream->clamping.c_depth = stream->timing.display_color_depth; + stream->clamping.pixel_encoding = stream->timing.pixel_encoding; +} + +void dcn20_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx) +{ + get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); + pipe_ctx->clock_source->funcs->get_pix_clk_dividers( + pipe_ctx->clock_source, + &pipe_ctx->stream_res.pix_clk_params, + &pipe_ctx->pll_settings); +} + +static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx) +{ + + dcn20_build_pipe_pix_clk_params(pipe_ctx); + + pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; + + resource_build_bit_depth_reduction_params(pipe_ctx->stream, + &pipe_ctx->stream->bit_depth_params); + build_clamping_params(pipe_ctx->stream); + + return DC_OK; +} + +enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) +{ + enum dc_status status = DC_OK; + struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); + + if (!pipe_ctx) + return DC_ERROR_UNEXPECTED; + + + status = build_pipe_hw_param(pipe_ctx); + + return status; +} + + +void dcn20_acquire_dsc(const struct dc *dc, + struct resource_context *res_ctx, + struct display_stream_compressor **dsc, + int pipe_idx) +{ + int i; + const struct resource_pool *pool = dc->res_pool; + struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc; + + ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */ + *dsc = NULL; + + /* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */ + if (pool->res_cap->num_dsc == pool->res_cap->num_opp) { + *dsc = pool->dscs[pipe_idx]; + res_ctx->is_dsc_acquired[pipe_idx] = true; + return; + } + + /* Return old DSC to avoid the need for re-programming */ + if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) { + *dsc = dsc_old; + res_ctx->is_dsc_acquired[dsc_old->inst] = true; + return ; + } + + /* Find first free DSC */ + for (i = 0; i < pool->res_cap->num_dsc; i++) + if (!res_ctx->is_dsc_acquired[i]) { + *dsc = pool->dscs[i]; + res_ctx->is_dsc_acquired[i] = true; + break; + } +} + +void dcn20_release_dsc(struct resource_context *res_ctx, + const struct resource_pool *pool, + struct display_stream_compressor **dsc) +{ + int i; + + for (i = 0; i < pool->res_cap->num_dsc; i++) + if (pool->dscs[i] == *dsc) { + res_ctx->is_dsc_acquired[i] = false; + *dsc = NULL; + break; + } +} + + + +enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, + struct dc_state *dc_ctx, + struct dc_stream_state *dc_stream) +{ + enum dc_status result = DC_OK; + int i; + + /* Get a DSC if required and available */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &dc_ctx->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->top_pipe) + continue; + + if (pipe_ctx->stream != dc_stream) + continue; + + if (pipe_ctx->stream_res.dsc) + continue; + + dcn20_acquire_dsc(dc, &dc_ctx->res_ctx, &pipe_ctx->stream_res.dsc, i); + + /* The number of DSCs can be less than the number of pipes */ + if (!pipe_ctx->stream_res.dsc) { + result = DC_NO_DSC_RESOURCE; + } + + break; + } + + return result; +} + + +static enum dc_status remove_dsc_from_stream_resource(struct dc *dc, + struct dc_state *new_ctx, + struct dc_stream_state *dc_stream) +{ + struct pipe_ctx *pipe_ctx = NULL; + int i; + + for (i = 0; i < MAX_PIPES; i++) { + if (new_ctx->res_ctx.pipe_ctx[i].stream == dc_stream && !new_ctx->res_ctx.pipe_ctx[i].top_pipe) { + pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->stream_res.dsc) + dcn20_release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc); + } + } + + if (!pipe_ctx) + return DC_ERROR_UNEXPECTED; + else + return DC_OK; +} + + +enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) +{ + enum dc_status result = DC_ERROR_UNEXPECTED; + + result = resource_map_pool_resources(dc, new_ctx, dc_stream); + + if (result == DC_OK) + result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream); + + /* Get a DSC if required and available */ + if (result == DC_OK && dc_stream->timing.flags.DSC) + result = dcn20_add_dsc_to_stream_resource(dc, new_ctx, dc_stream); + + if (result == DC_OK) + result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream); + + return result; +} + + +enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) +{ + enum dc_status result = DC_OK; + + result = remove_dsc_from_stream_resource(dc, new_ctx, dc_stream); + + return result; +} + +/** + * dcn20_split_stream_for_odm - Check if stream can be splited for ODM + * + * @dc: DC object with resource pool info required for pipe split + * @res_ctx: Persistent state of resources + * @prev_odm_pipe: Reference to the previous ODM pipe + * @next_odm_pipe: Reference to the next ODM pipe + * + * This function takes a logically active pipe and a logically free pipe and + * halves all the scaling parameters that need to be halved while populating + * the free pipe with the required resources and configuring the next/previous + * ODM pipe pointers. + * + * Return: + * Return true if split stream for ODM is possible, otherwise, return false. + */ +bool dcn20_split_stream_for_odm( + const struct dc *dc, + struct resource_context *res_ctx, + struct pipe_ctx *prev_odm_pipe, + struct pipe_ctx *next_odm_pipe) +{ + int pipe_idx = next_odm_pipe->pipe_idx; + const struct resource_pool *pool = dc->res_pool; + + *next_odm_pipe = *prev_odm_pipe; + + next_odm_pipe->pipe_idx = pipe_idx; + next_odm_pipe->plane_res.mi = pool->mis[next_odm_pipe->pipe_idx]; + next_odm_pipe->plane_res.hubp = pool->hubps[next_odm_pipe->pipe_idx]; + next_odm_pipe->plane_res.ipp = pool->ipps[next_odm_pipe->pipe_idx]; + next_odm_pipe->plane_res.xfm = pool->transforms[next_odm_pipe->pipe_idx]; + next_odm_pipe->plane_res.dpp = pool->dpps[next_odm_pipe->pipe_idx]; + next_odm_pipe->plane_res.mpcc_inst = pool->dpps[next_odm_pipe->pipe_idx]->inst; + next_odm_pipe->stream_res.dsc = NULL; + if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) { + next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe; + next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe; + } + if (prev_odm_pipe->top_pipe && prev_odm_pipe->top_pipe->next_odm_pipe) { + prev_odm_pipe->top_pipe->next_odm_pipe->bottom_pipe = next_odm_pipe; + next_odm_pipe->top_pipe = prev_odm_pipe->top_pipe->next_odm_pipe; + } + if (prev_odm_pipe->bottom_pipe && prev_odm_pipe->bottom_pipe->next_odm_pipe) { + prev_odm_pipe->bottom_pipe->next_odm_pipe->top_pipe = next_odm_pipe; + next_odm_pipe->bottom_pipe = prev_odm_pipe->bottom_pipe->next_odm_pipe; + } + prev_odm_pipe->next_odm_pipe = next_odm_pipe; + next_odm_pipe->prev_odm_pipe = prev_odm_pipe; + + if (prev_odm_pipe->plane_state) { + struct scaler_data *sd = &prev_odm_pipe->plane_res.scl_data; + int new_width; + + /* HACTIVE halved for odm combine */ + sd->h_active /= 2; + /* Calculate new vp and recout for left pipe */ + /* Need at least 16 pixels width per side */ + if (sd->recout.x + 16 >= sd->h_active) + return false; + new_width = sd->h_active - sd->recout.x; + sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int( + sd->ratios.horz, sd->recout.width - new_width)); + sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int( + sd->ratios.horz_c, sd->recout.width - new_width)); + sd->recout.width = new_width; + + /* Calculate new vp and recout for right pipe */ + sd = &next_odm_pipe->plane_res.scl_data; + /* HACTIVE halved for odm combine */ + sd->h_active /= 2; + /* Need at least 16 pixels width per side */ + if (new_width <= 16) + return false; + new_width = sd->recout.width + sd->recout.x - sd->h_active; + sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int( + sd->ratios.horz, sd->recout.width - new_width)); + sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int( + sd->ratios.horz_c, sd->recout.width - new_width)); + sd->recout.width = new_width; + sd->viewport.x += dc_fixpt_floor(dc_fixpt_mul_int( + sd->ratios.horz, sd->h_active - sd->recout.x)); + sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int( + sd->ratios.horz_c, sd->h_active - sd->recout.x)); + sd->recout.x = 0; + } + if (!next_odm_pipe->top_pipe) + next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx]; + else + next_odm_pipe->stream_res.opp = next_odm_pipe->top_pipe->stream_res.opp; + if (next_odm_pipe->stream->timing.flags.DSC == 1 && !next_odm_pipe->top_pipe) { + dcn20_acquire_dsc(dc, res_ctx, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx); + ASSERT(next_odm_pipe->stream_res.dsc); + if (next_odm_pipe->stream_res.dsc == NULL) + return false; + } + + return true; +} + +void dcn20_split_stream_for_mpc( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct pipe_ctx *primary_pipe, + struct pipe_ctx *secondary_pipe) +{ + int pipe_idx = secondary_pipe->pipe_idx; + struct pipe_ctx *sec_bot_pipe = secondary_pipe->bottom_pipe; + + *secondary_pipe = *primary_pipe; + secondary_pipe->bottom_pipe = sec_bot_pipe; + + secondary_pipe->pipe_idx = pipe_idx; + secondary_pipe->plane_res.mi = pool->mis[secondary_pipe->pipe_idx]; + secondary_pipe->plane_res.hubp = pool->hubps[secondary_pipe->pipe_idx]; + secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx]; + secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx]; + secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx]; + secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst; + secondary_pipe->stream_res.dsc = NULL; + if (primary_pipe->bottom_pipe && primary_pipe->bottom_pipe != secondary_pipe) { + ASSERT(!secondary_pipe->bottom_pipe); + secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe; + secondary_pipe->bottom_pipe->top_pipe = secondary_pipe; + } + primary_pipe->bottom_pipe = secondary_pipe; + secondary_pipe->top_pipe = primary_pipe; + + ASSERT(primary_pipe->plane_state); +} + +unsigned int dcn20_calc_max_scaled_time( + unsigned int time_per_pixel, + enum mmhubbub_wbif_mode mode, + unsigned int urgent_watermark) +{ + unsigned int time_per_byte = 0; + unsigned int total_y_free_entry = 0x200; /* two memory piece for luma */ + unsigned int total_c_free_entry = 0x140; /* two memory piece for chroma */ + unsigned int small_free_entry, max_free_entry; + unsigned int buf_lh_capability; + unsigned int max_scaled_time; + + if (mode == PACKED_444) /* packed mode */ + time_per_byte = time_per_pixel/4; + else if (mode == PLANAR_420_8BPC) + time_per_byte = time_per_pixel; + else if (mode == PLANAR_420_10BPC) /* p010 */ + time_per_byte = time_per_pixel * 819/1024; + + if (time_per_byte == 0) + time_per_byte = 1; + + small_free_entry = (total_y_free_entry > total_c_free_entry) ? total_c_free_entry : total_y_free_entry; + max_free_entry = (mode == PACKED_444) ? total_y_free_entry + total_c_free_entry : small_free_entry; + buf_lh_capability = max_free_entry*time_per_byte*32/16; /* there is 4bit fraction */ + max_scaled_time = buf_lh_capability - urgent_watermark; + return max_scaled_time; +} + +void dcn20_set_mcif_arb_params( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt) +{ + enum mmhubbub_wbif_mode wbif_mode; + struct mcif_arb_params *wb_arb_params; + int i, j, dwb_pipe; + + /* Writeback MCIF_WB arbitration parameters */ + dwb_pipe = 0; + for (i = 0; i < dc->res_pool->pipe_count; i++) { + + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + for (j = 0; j < MAX_DWB_PIPES; j++) { + if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].wb_enabled == false) + continue; + + //wb_arb_params = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j].mcif_arb_params; + wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[dwb_pipe]; + + if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.out_format == dwb_scaler_mode_yuv420) { + if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC) + wbif_mode = PLANAR_420_8BPC; + else + wbif_mode = PLANAR_420_10BPC; + } else + wbif_mode = PACKED_444; + + DC_FP_START(); + dcn20_fpu_set_wb_arb_params(wb_arb_params, context, pipes, pipe_cnt, i); + DC_FP_END(); + + wb_arb_params->slice_lines = 32; + wb_arb_params->arbitration_slice = 2; + wb_arb_params->max_scaled_time = dcn20_calc_max_scaled_time(wb_arb_params->time_per_pixel, + wbif_mode, + wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */ + + dwb_pipe++; + + if (dwb_pipe >= MAX_DWB_PIPES) + return; + } + if (dwb_pipe >= MAX_DWB_PIPES) + return; + } +} + +bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) +{ + int i; + + /* Validate DSC config, dsc count validation is already done */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i]; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dsc_config dsc_cfg; + struct pipe_ctx *odm_pipe; + int opp_cnt = 1; + + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + opp_cnt++; + + /* Only need to validate top pipe */ + if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC) + continue; + + dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + + stream->timing.h_border_right) / opp_cnt; + dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + + stream->timing.v_border_bottom; + dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; + dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; + dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; + dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + + if (!pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg)) + return false; + } + return true; +} + +struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, + struct resource_context *res_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *primary_pipe) +{ + struct pipe_ctx *secondary_pipe = NULL; + + if (dc && primary_pipe) { + int j; + int preferred_pipe_idx = 0; + + /* first check the prev dc state: + * if this primary pipe has a bottom pipe in prev. state + * and if the bottom pipe is still available (which it should be), + * pick that pipe as secondary + * Same logic applies for ODM pipes + */ + if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) { + preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx; + if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { + secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; + secondary_pipe->pipe_idx = preferred_pipe_idx; + } + } + if (secondary_pipe == NULL && + dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) { + preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx; + if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { + secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; + secondary_pipe->pipe_idx = preferred_pipe_idx; + } + } + + /* + * if this primary pipe does not have a bottom pipe in prev. state + * start backward and find a pipe that did not used to be a bottom pipe in + * prev. dc state. This way we make sure we keep the same assignment as + * last state and will not have to reprogram every pipe + */ + if (secondary_pipe == NULL) { + for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) { + if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL + && dc->current_state->res_ctx.pipe_ctx[j].prev_odm_pipe == NULL) { + preferred_pipe_idx = j; + + if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { + secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; + secondary_pipe->pipe_idx = preferred_pipe_idx; + break; + } + } + } + } + /* + * We should never hit this assert unless assignments are shuffled around + * if this happens we will prob. hit a vsync tdr + */ + ASSERT(secondary_pipe); + /* + * search backwards for the second pipe to keep pipe + * assignment more consistent + */ + if (secondary_pipe == NULL) { + for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) { + preferred_pipe_idx = j; + + if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { + secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; + secondary_pipe->pipe_idx = preferred_pipe_idx; + break; + } + } + } + } + + return secondary_pipe; +} + +void dcn20_merge_pipes_for_validate( + struct dc *dc, + struct dc_state *context) +{ + int i; + + /* merge previously split odm pipes since mode support needs to make the decision */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *odm_pipe = pipe->next_odm_pipe; + + if (pipe->prev_odm_pipe) + continue; + + pipe->next_odm_pipe = NULL; + while (odm_pipe) { + struct pipe_ctx *next_odm_pipe = odm_pipe->next_odm_pipe; + + odm_pipe->plane_state = NULL; + odm_pipe->stream = NULL; + odm_pipe->top_pipe = NULL; + odm_pipe->bottom_pipe = NULL; + odm_pipe->prev_odm_pipe = NULL; + odm_pipe->next_odm_pipe = NULL; + if (odm_pipe->stream_res.dsc) + dcn20_release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc); + /* Clear plane_res and stream_res */ + memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res)); + memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res)); + odm_pipe = next_odm_pipe; + } + if (pipe->plane_state) + resource_build_scaling_params(pipe); + } + + /* merge previously mpc split pipes since mode support needs to make the decision */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe; + + if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) + continue; + + pipe->bottom_pipe = hsplit_pipe->bottom_pipe; + if (hsplit_pipe->bottom_pipe) + hsplit_pipe->bottom_pipe->top_pipe = pipe; + hsplit_pipe->plane_state = NULL; + hsplit_pipe->stream = NULL; + hsplit_pipe->top_pipe = NULL; + hsplit_pipe->bottom_pipe = NULL; + + /* Clear plane_res and stream_res */ + memset(&hsplit_pipe->plane_res, 0, sizeof(hsplit_pipe->plane_res)); + memset(&hsplit_pipe->stream_res, 0, sizeof(hsplit_pipe->stream_res)); + if (pipe->plane_state) + resource_build_scaling_params(pipe); + } +} + +int dcn20_validate_apply_pipe_split_flags( + struct dc *dc, + struct dc_state *context, + int vlevel, + int *split, + bool *merge) +{ + int i, pipe_idx, vlevel_split; + int plane_count = 0; + bool force_split = false; + bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID; + struct vba_vars_st *v = &context->bw_ctx.dml.vba; + int max_mpc_comb = v->maxMpcComb; + + if (context->stream_count > 1) { + if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) + avoid_split = true; + } else if (dc->debug.force_single_disp_pipe_split) + force_split = true; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + /** + * Workaround for avoiding pipe-split in cases where we'd split + * planes that are too small, resulting in splits that aren't + * valid for the scaler. + */ + if (pipe->plane_state && + (pipe->plane_state->dst_rect.width <= 16 || + pipe->plane_state->dst_rect.height <= 16 || + pipe->plane_state->src_rect.width <= 16 || + pipe->plane_state->src_rect.height <= 16)) + avoid_split = true; + + /* TODO: fix dc bugs and remove this split threshold thing */ + if (pipe->stream && !pipe->prev_odm_pipe && + (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state)) + ++plane_count; + } + if (plane_count > dc->res_pool->pipe_count / 2) + avoid_split = true; + + /* W/A: Mode timing with borders may not work well with pipe split, avoid for this corner case */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct dc_crtc_timing timing; + + if (!pipe->stream) + continue; + else { + timing = pipe->stream->timing; + if (timing.h_border_left + timing.h_border_right + + timing.v_border_top + timing.v_border_bottom > 0) { + avoid_split = true; + break; + } + } + } + + /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */ + if (avoid_split) { + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++) + if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 && + v->ModeSupport[vlevel][0]) + break; + /* Impossible to not split this pipe */ + if (vlevel > context->bw_ctx.dml.soc.num_states) + vlevel = vlevel_split; + else + max_mpc_comb = 0; + pipe_idx++; + } + v->maxMpcComb = max_mpc_comb; + } + + /* Split loop sets which pipe should be split based on dml outputs and dc flags */ + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + int pipe_plane = v->pipe_plane[pipe_idx]; + bool split4mpc = context->stream_count == 1 && plane_count == 1 + && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4; + + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + if (split4mpc || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 4) + split[i] = 4; + else if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 2) + split[i] = 2; + + if ((pipe->stream->view_format == + VIEW_3D_FORMAT_SIDE_BY_SIDE || + pipe->stream->view_format == + VIEW_3D_FORMAT_TOP_AND_BOTTOM) && + (pipe->stream->timing.timing_3d_format == + TIMING_3D_FORMAT_TOP_AND_BOTTOM || + pipe->stream->timing.timing_3d_format == + TIMING_3D_FORMAT_SIDE_BY_SIDE)) + split[i] = 2; + if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) { + split[i] = 2; + v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1; + } + if (dc->debug.force_odm_combine_4to1 & (1 << pipe->stream_res.tg->inst)) { + split[i] = 4; + v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_4to1; + } + /*420 format workaround*/ + if (pipe->stream->timing.h_addressable > 7680 && + pipe->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) { + split[i] = 4; + } + v->ODMCombineEnabled[pipe_plane] = + v->ODMCombineEnablePerState[vlevel][pipe_plane]; + + if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) { + if (resource_get_mpc_slice_count(pipe) == 2) { + /*If need split for mpc but 2 way split already*/ + if (split[i] == 4) + split[i] = 2; /* 2 -> 4 MPC */ + else if (split[i] == 2) + split[i] = 0; /* 2 -> 2 MPC */ + else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) + merge[i] = true; /* 2 -> 1 MPC */ + } else if (resource_get_mpc_slice_count(pipe) == 4) { + /*If need split for mpc but 4 way split already*/ + if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe) + || !pipe->bottom_pipe)) { + merge[i] = true; /* 4 -> 2 MPC */ + } else if (split[i] == 0 && pipe->top_pipe && + pipe->top_pipe->plane_state == pipe->plane_state) + merge[i] = true; /* 4 -> 1 MPC */ + split[i] = 0; + } else if (resource_get_odm_slice_count(pipe) > 1) { + /* ODM -> MPC transition */ + if (pipe->prev_odm_pipe) { + split[i] = 0; + merge[i] = true; + } + } + } else { + if (resource_get_odm_slice_count(pipe) == 2) { + /*If need split for odm but 2 way split already*/ + if (split[i] == 4) + split[i] = 2; /* 2 -> 4 ODM */ + else if (split[i] == 2) + split[i] = 0; /* 2 -> 2 ODM */ + else if (pipe->prev_odm_pipe) { + ASSERT(0); /* NOT expected yet */ + merge[i] = true; /* exit ODM */ + } + } else if (resource_get_odm_slice_count(pipe) == 4) { + /*If need split for odm but 4 way split already*/ + if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe) + || !pipe->next_odm_pipe)) { + merge[i] = true; /* 4 -> 2 ODM */ + } else if (split[i] == 0 && pipe->prev_odm_pipe) { + ASSERT(0); /* NOT expected yet */ + merge[i] = true; /* exit ODM */ + } + split[i] = 0; + } else if (resource_get_mpc_slice_count(pipe) > 1) { + /* MPC -> ODM transition */ + ASSERT(0); /* NOT expected yet */ + if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { + split[i] = 0; + merge[i] = true; + } + } + } + + /* Adjust dppclk when split is forced, do not bother with dispclk */ + if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1) { + DC_FP_START(); + dcn20_fpu_adjust_dppclk(v, vlevel, max_mpc_comb, pipe_idx, false); + DC_FP_END(); + } + pipe_idx++; + } + + return vlevel; +} + +bool dcn20_fast_validate_bw( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *pipe_cnt_out, + int *pipe_split_from, + int *vlevel_out, + bool fast_validate) +{ + bool out = false; + int split[MAX_PIPES] = { 0 }; + int pipe_cnt, i, pipe_idx, vlevel; + + ASSERT(pipes); + if (!pipes) + return false; + + dcn20_merge_pipes_for_validate(dc, context); + + DC_FP_START(); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + DC_FP_END(); + + *pipe_cnt_out = pipe_cnt; + + if (!pipe_cnt) { + out = true; + goto validate_out; + } + + vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); + + if (vlevel > context->bw_ctx.dml.soc.num_states) + goto validate_fail; + + vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL); + + /*initialize pipe_just_split_from to invalid idx*/ + for (i = 0; i < MAX_PIPES; i++) + pipe_split_from[i] = -1; + + for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe; + + if (!pipe->stream || pipe_split_from[i] >= 0) + continue; + + pipe_idx++; + + if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { + hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); + ASSERT(hsplit_pipe); + if (!dcn20_split_stream_for_odm( + dc, &context->res_ctx, + pipe, hsplit_pipe)) + goto validate_fail; + pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx; + dcn20_build_mapped_resource(dc, context, pipe->stream); + } + + if (!pipe->plane_state) + continue; + /* Skip 2nd half of already split pipe */ + if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state) + continue; + + /* We do not support mpo + odm at the moment */ + if (hsplit_pipe && hsplit_pipe->plane_state != pipe->plane_state + && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) + goto validate_fail; + + if (split[i] == 2) { + if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) { + /* pipe not split previously needs split */ + hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); + ASSERT(hsplit_pipe); + if (!hsplit_pipe) { + DC_FP_START(); + dcn20_fpu_adjust_dppclk(&context->bw_ctx.dml.vba, vlevel, context->bw_ctx.dml.vba.maxMpcComb, pipe_idx, true); + DC_FP_END(); + continue; + } + if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { + if (!dcn20_split_stream_for_odm( + dc, &context->res_ctx, + pipe, hsplit_pipe)) + goto validate_fail; + dcn20_build_mapped_resource(dc, context, pipe->stream); + } else { + dcn20_split_stream_for_mpc( + &context->res_ctx, dc->res_pool, + pipe, hsplit_pipe); + resource_build_scaling_params(pipe); + resource_build_scaling_params(hsplit_pipe); + } + pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx; + } + } else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) { + /* merge should already have been done */ + ASSERT(0); + } + } + /* Actual dsc count per stream dsc validation*/ + if (!dcn20_validate_dsc(dc, context)) { + context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] = + DML_FAIL_DSC_VALIDATION_FAILURE; + goto validate_fail; + } + + *vlevel_out = vlevel; + + out = true; + goto validate_out; + +validate_fail: + out = false; + +validate_out: + return out; +} + +bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, + bool fast_validate) +{ + bool voltage_supported; + display_e2e_pipe_params_st *pipes; + + pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL); + if (!pipes) + return false; + + DC_FP_START(); + voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate, pipes); + DC_FP_END(); + + kfree(pipes); + return voltage_supported; +} + +struct pipe_ctx *dcn20_acquire_free_pipe_for_layer( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *opp_head) +{ + struct resource_context *res_ctx = &new_ctx->res_ctx; + struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(res_ctx, opp_head->stream); + struct pipe_ctx *sec_dpp_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, otg_master); + + ASSERT(otg_master); + + if (!sec_dpp_pipe) + return NULL; + + sec_dpp_pipe->stream = opp_head->stream; + sec_dpp_pipe->stream_res.tg = opp_head->stream_res.tg; + sec_dpp_pipe->stream_res.opp = opp_head->stream_res.opp; + + sec_dpp_pipe->plane_res.hubp = pool->hubps[sec_dpp_pipe->pipe_idx]; + sec_dpp_pipe->plane_res.ipp = pool->ipps[sec_dpp_pipe->pipe_idx]; + sec_dpp_pipe->plane_res.dpp = pool->dpps[sec_dpp_pipe->pipe_idx]; + sec_dpp_pipe->plane_res.mpcc_inst = pool->dpps[sec_dpp_pipe->pipe_idx]->inst; + + return sec_dpp_pipe; +} + +bool dcn20_get_dcc_compression_cap(const struct dc *dc, + const struct dc_dcc_surface_param *input, + struct dc_surface_dcc_cap *output) +{ + return dc->res_pool->hubbub->funcs->get_dcc_compression_cap( + dc->res_pool->hubbub, + input, + output); +} + +static void dcn20_destroy_resource_pool(struct resource_pool **pool) +{ + struct dcn20_resource_pool *dcn20_pool = TO_DCN20_RES_POOL(*pool); + + dcn20_resource_destruct(dcn20_pool); + kfree(dcn20_pool); + *pool = NULL; +} + + +static struct dc_cap_funcs cap_funcs = { + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap +}; + + +enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state) +{ + enum surface_pixel_format surf_pix_format = plane_state->format; + unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format); + + plane_state->tiling_info.gfx9.swizzle = DC_SW_64KB_S; + if (bpp == 64) + plane_state->tiling_info.gfx9.swizzle = DC_SW_64KB_D; + + return DC_OK; +} + +void dcn20_release_pipe(struct dc_state *context, + struct pipe_ctx *pipe, + const struct resource_pool *pool) +{ + if (resource_is_pipe_type(pipe, OPP_HEAD) && pipe->stream_res.dsc) + dcn20_release_dsc(&context->res_ctx, pool, &pipe->stream_res.dsc); + memset(pipe, 0, sizeof(*pipe)); +} + +static const struct resource_funcs dcn20_res_pool_funcs = { + .destroy = dcn20_destroy_resource_pool, + .link_enc_create = dcn20_link_encoder_create, + .panel_cntl_create = dcn20_panel_cntl_create, + .validate_bandwidth = dcn20_validate_bandwidth, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, + .release_pipe = dcn20_release_pipe, + .add_stream_to_ctx = dcn20_add_stream_to_ctx, + .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, + .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, + .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context, + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, + .set_mcif_arb_params = dcn20_set_mcif_arb_params, + .populate_dml_pipes = dcn20_populate_dml_pipes_from_context, + .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link +}; + +bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn20_dwbc *dwbc20 = kzalloc(sizeof(struct dcn20_dwbc), + GFP_KERNEL); + + if (!dwbc20) { + dm_error("DC: failed to create dwbc20!\n"); + return false; + } + dcn20_dwbc_construct(dwbc20, ctx, + &dwbc20_regs[i], + &dwbc20_shift, + &dwbc20_mask, + i); + pool->dwbc[i] = &dwbc20->base; + } + return true; +} + +bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + ASSERT(pipe_count > 0); + + for (i = 0; i < pipe_count; i++) { + struct dcn20_mmhubbub *mcif_wb20 = kzalloc(sizeof(struct dcn20_mmhubbub), + GFP_KERNEL); + + if (!mcif_wb20) { + dm_error("DC: failed to create mcif_wb20!\n"); + return false; + } + + dcn20_mmhubbub_construct(mcif_wb20, ctx, + &mcif_wb20_regs[i], + &mcif_wb20_shift, + &mcif_wb20_mask, + i); + + pool->mcif_wb[i] = &mcif_wb20->base; + } + return true; +} + +static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx) +{ + struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_ATOMIC); + + if (!pp_smu) + return pp_smu; + + dm_pp_get_funcs(ctx, pp_smu); + + if (pp_smu->ctx.ver != PP_SMU_VER_NV) + pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs)); + + return pp_smu; +} + +static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu) +{ + if (pp_smu && *pp_smu) { + kfree(*pp_smu); + *pp_smu = NULL; + } +} + +static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb( + uint32_t hw_internal_rev) +{ + if (ASICREV_IS_NAVI14_M(hw_internal_rev)) + return &dcn2_0_nv14_soc; + + if (ASICREV_IS_NAVI12_P(hw_internal_rev)) + return &dcn2_0_nv12_soc; + + return &dcn2_0_soc; +} + +static struct _vcs_dpi_ip_params_st *get_asic_rev_ip_params( + uint32_t hw_internal_rev) +{ + /* NV14 */ + if (ASICREV_IS_NAVI14_M(hw_internal_rev)) + return &dcn2_0_nv14_ip; + + /* NV12 and NV10 */ + return &dcn2_0_ip; +} + +static enum dml_project get_dml_project_version(uint32_t hw_internal_rev) +{ + return DML_PROJECT_NAVI10v2; +} + +static bool init_soc_bounding_box(struct dc *dc, + struct dcn20_resource_pool *pool) +{ + struct _vcs_dpi_soc_bounding_box_st *loaded_bb = + get_asic_rev_soc_bb(dc->ctx->asic_id.hw_internal_rev); + struct _vcs_dpi_ip_params_st *loaded_ip = + get_asic_rev_ip_params(dc->ctx->asic_id.hw_internal_rev); + + DC_LOGGER_INIT(dc->ctx->logger); + + if (pool->base.pp_smu) { + struct pp_smu_nv_clock_table max_clocks = {0}; + unsigned int uclk_states[8] = {0}; + unsigned int num_states = 0; + enum pp_smu_status status; + bool clock_limits_available = false; + bool uclk_states_available = false; + + if (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states) { + status = (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states) + (&pool->base.pp_smu->nv_funcs.pp_smu, uclk_states, &num_states); + + uclk_states_available = (status == PP_SMU_RESULT_OK); + } + + if (pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks) { + status = (*pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks) + (&pool->base.pp_smu->nv_funcs.pp_smu, &max_clocks); + /* SMU cannot set DCF clock to anything equal to or higher than SOC clock + */ + if (max_clocks.dcfClockInKhz >= max_clocks.socClockInKhz) + max_clocks.dcfClockInKhz = max_clocks.socClockInKhz - 1000; + clock_limits_available = (status == PP_SMU_RESULT_OK); + } + + if (clock_limits_available && uclk_states_available && num_states) { + DC_FP_START(); + dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states); + DC_FP_END(); + } else if (clock_limits_available) { + DC_FP_START(); + dcn20_cap_soc_clocks(loaded_bb, max_clocks); + DC_FP_END(); + } + } + + loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator; + loaded_ip->max_num_dpp = pool->base.pipe_count; + DC_FP_START(); + dcn20_patch_bounding_box(dc, loaded_bb); + DC_FP_END(); + return true; +} + +static bool dcn20_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dcn20_resource_pool *pool) +{ + int i; + struct dc_context *ctx = dc->ctx; + struct irq_service_init_data init_data; + struct ddc_service_init_data ddc_init_data = {0}; + struct _vcs_dpi_soc_bounding_box_st *loaded_bb = + get_asic_rev_soc_bb(ctx->asic_id.hw_internal_rev); + struct _vcs_dpi_ip_params_st *loaded_ip = + get_asic_rev_ip_params(ctx->asic_id.hw_internal_rev); + enum dml_project dml_project_version = + get_dml_project_version(ctx->asic_id.hw_internal_rev); + + ctx->dc_bios->regs = &bios_regs; + pool->base.funcs = &dcn20_res_pool_funcs; + + if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) { + pool->base.res_cap = &res_cap_nv14; + pool->base.pipe_count = 5; + pool->base.mpcc_count = 5; + } else { + pool->base.res_cap = &res_cap_nv10; + pool->base.pipe_count = 6; + pool->base.mpcc_count = 6; + } + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + + dc->caps.max_downscale_ratio = 200; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a not applied by default*/ + dc->caps.max_cursor_size = 256; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dmdata_alloc_size = 2048; + + dc->caps.max_slave_planes = 1; + dc->caps.max_slave_yuv_planes = 1; + dc->caps.max_slave_rgb_planes = 1; + dc->caps.post_blend_color_processing = true; + dc->caps.force_dp_tps4_for_cp2520 = true; + dc->caps.extended_aux_timeout_support = true; + + /* Color pipeline capabilities */ + dc->caps.color.dpp.dcn_arch = 1; + dc->caps.color.dpp.input_lut_shared = 0; + dc->caps.color.dpp.icsc = 1; + dc->caps.color.dpp.dgam_ram = 1; + dc->caps.color.dpp.dgam_rom_caps.srgb = 1; + dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; + dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.dgam_rom_caps.pq = 0; + dc->caps.color.dpp.dgam_rom_caps.hlg = 0; + dc->caps.color.dpp.post_csc = 0; + dc->caps.color.dpp.gamma_corr = 0; + dc->caps.color.dpp.dgam_rom_for_yuv = 1; + + dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.ogam_ram = 1; + // no OGAM ROM on DCN2, only MPC ROM + dc->caps.color.dpp.ogam_rom_caps.srgb = 0; + dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; + dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.ogam_rom_caps.pq = 0; + dc->caps.color.dpp.ogam_rom_caps.hlg = 0; + dc->caps.color.dpp.ocsc = 0; + + dc->caps.color.mpc.gamut_remap = 0; + dc->caps.color.mpc.num_3dluts = 0; + dc->caps.color.mpc.shared_3d_lut = 0; + dc->caps.color.mpc.ogam_ram = 1; + dc->caps.color.mpc.ogam_rom_caps.srgb = 0; + dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; + dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.mpc.ogam_rom_caps.pq = 0; + dc->caps.color.mpc.ogam_rom_caps.hlg = 0; + dc->caps.color.mpc.ocsc = 1; + + dc->caps.dp_hdmi21_pcon_support = true; + + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) + dc->debug = debug_defaults_drv; + + //dcn2.0x + dc->work_arounds.dedcn20_305_wa = true; + + // Init the vm_helper + if (dc->vm_helper) + vm_helper_init(dc->vm_helper, 16); + + /************************************************* + * Create resources * + *************************************************/ + + pool->base.clock_sources[DCN20_CLK_SRC_PLL0] = + dcn20_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->base.clock_sources[DCN20_CLK_SRC_PLL1] = + dcn20_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + pool->base.clock_sources[DCN20_CLK_SRC_PLL2] = + dcn20_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs[2], false); + pool->base.clock_sources[DCN20_CLK_SRC_PLL3] = + dcn20_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + pool->base.clock_sources[DCN20_CLK_SRC_PLL4] = + dcn20_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL4, + &clk_src_regs[4], false); + pool->base.clock_sources[DCN20_CLK_SRC_PLL5] = + dcn20_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL5, + &clk_src_regs[5], false); + pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL; + /* todo: not reuse phy_pll registers */ + pool->base.dp_clock_source = + dcn20_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, + &clk_src_regs[0], true); + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + pool->base.dccg = dccg2_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); + if (pool->base.dccg == NULL) { + dm_error("DC: failed to create dccg!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + pool->base.dmcu = dcn20_dmcu_create(ctx, + &dmcu_regs, + &dmcu_shift, + &dmcu_mask); + if (pool->base.dmcu == NULL) { + dm_error("DC: failed to create dmcu!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + pool->base.abm = dce_abm_create(ctx, + &abm_regs, + &abm_shift, + &abm_mask); + if (pool->base.abm == NULL) { + dm_error("DC: failed to create abm!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + pool->base.pp_smu = dcn20_pp_smu_create(ctx); + + + if (!init_soc_bounding_box(dc, pool)) { + dm_error("DC: failed to initialize soc bounding box!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + dml_init_instance(&dc->dml, loaded_bb, loaded_ip, dml_project_version); + + if (!dc->debug.disable_pplib_wm_range) { + struct pp_smu_wm_range_sets ranges = {0}; + int i = 0; + + ranges.num_reader_wm_sets = 0; + + if (loaded_bb->num_states == 1) { + ranges.reader_wm_sets[0].wm_inst = i; + ranges.reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges.reader_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges.reader_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + + ranges.num_reader_wm_sets = 1; + } else if (loaded_bb->num_states > 1) { + for (i = 0; i < 4 && i < loaded_bb->num_states; i++) { + ranges.reader_wm_sets[i].wm_inst = i; + ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + DC_FP_START(); + dcn20_fpu_set_wm_ranges(i, &ranges, loaded_bb); + DC_FP_END(); + + ranges.num_reader_wm_sets = i + 1; + } + + ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges.reader_wm_sets[ranges.num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + } + + ranges.num_writer_wm_sets = 1; + + ranges.writer_wm_sets[0].wm_inst = 0; + ranges.writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges.writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + ranges.writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges.writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + + /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ + if (pool->base.pp_smu->nv_funcs.set_wm_ranges) + pool->base.pp_smu->nv_funcs.set_wm_ranges(&pool->base.pp_smu->nv_funcs.pp_smu, &ranges); + } + + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dcn20_create(&init_data); + if (!pool->base.irqs) + goto create_fail; + + /* mem input -> ipp -> dpp -> opp -> TG */ + for (i = 0; i < pool->base.pipe_count; i++) { + pool->base.hubps[i] = dcn20_hubp_create(ctx, i); + if (pool->base.hubps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create memory input!\n"); + goto create_fail; + } + + pool->base.ipps[i] = dcn20_ipp_create(ctx, i); + if (pool->base.ipps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create input pixel processor!\n"); + goto create_fail; + } + + pool->base.dpps[i] = dcn20_dpp_create(ctx, i); + if (pool->base.dpps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create dpps!\n"); + goto create_fail; + } + } + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dcn20_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto create_fail; + } + pool->base.hw_i2cs[i] = dcn20_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create hw i2c!!\n"); + goto create_fail; + } + pool->base.sw_i2cs[i] = NULL; + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + pool->base.opps[i] = dcn20_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + pool->base.timing_generators[i] = dcn20_timing_generator_create( + ctx, i); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + } + + pool->base.timing_generator_count = i; + + pool->base.mpc = dcn20_mpc_create(ctx); + if (pool->base.mpc == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mpc!\n"); + goto create_fail; + } + + pool->base.hubbub = dcn20_hubbub_create(ctx); + if (pool->base.hubbub == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create hubbub!\n"); + goto create_fail; + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + pool->base.dscs[i] = dcn20_dsc_create(ctx, i); + if (pool->base.dscs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create display stream compressor %d!\n", i); + goto create_fail; + } + } + + if (!dcn20_dwbc_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create dwbc!\n"); + goto create_fail; + } + if (!dcn20_mmhubbub_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mcif_wb!\n"); + goto create_fail; + } + + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto create_fail; + + dcn20_hw_sequencer_construct(dc); + + // IF NV12, set PG function pointer to NULL. It's not that + // PG isn't supported for NV12, it's that we don't want to + // program the registers because that will cause more power + // to be consumed. We could have created dcn20_init_hw to get + // the same effect by checking ASIC rev, but there was a + // request at some point to not check ASIC rev on hw sequencer. + if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) { + dc->hwseq->funcs.enable_power_gating_plane = NULL; + dc->debug.disable_dpp_power_gate = true; + dc->debug.disable_hubp_power_gate = true; + } + + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->cap_funcs = cap_funcs; + + if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { + ddc_init_data.ctx = dc->ctx; + ddc_init_data.link = NULL; + ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; + ddc_init_data.id.enum_id = 0; + ddc_init_data.id.type = OBJECT_TYPE_GENERIC; + pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data); + } else { + pool->base.oem_device = NULL; + } + + return true; + +create_fail: + + dcn20_resource_destruct(pool); + + return false; +} + +struct resource_pool *dcn20_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc) +{ + struct dcn20_resource_pool *pool = + kzalloc(sizeof(struct dcn20_resource_pool), GFP_ATOMIC); + + if (!pool) + return NULL; + + if (dcn20_resource_construct(init_data->num_virtual_links, dc, pool)) + return &pool->base; + + BREAK_TO_DEBUGGER(); + kfree(pool); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h new file mode 100644 index 0000000000..4cee3fa11a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h @@ -0,0 +1,171 @@ +/* +* Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_RESOURCE_DCN20_H__ +#define __DC_RESOURCE_DCN20_H__ + +#include "core_types.h" +#include "dml/dcn20/dcn20_fpu.h" + +#define TO_DCN20_RES_POOL(pool)\ + container_of(pool, struct dcn20_resource_pool, base) + +struct dc; +struct resource_pool; +struct _vcs_dpi_display_pipe_params_st; + +extern struct _vcs_dpi_ip_params_st dcn2_0_ip; +extern struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip; +extern struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc; +extern struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc; +extern struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc; + +struct dcn20_resource_pool { + struct resource_pool base; +}; +struct resource_pool *dcn20_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc); + +struct link_encoder *dcn20_link_encoder_create( + struct dc_context *ctx, + const struct encoder_init_data *enc_init_data); + +unsigned int dcn20_calc_max_scaled_time( + unsigned int time_per_pixel, + enum mmhubbub_wbif_mode mode, + unsigned int urgent_watermark); + +struct pipe_ctx *dcn20_acquire_free_pipe_for_layer( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *opp_head_pipe); +void dcn20_release_pipe(struct dc_state *context, + struct pipe_ctx *pipe, + const struct resource_pool *pool); +struct stream_encoder *dcn20_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx); + +struct dce_hwseq *dcn20_hwseq_create( + struct dc_context *ctx); + +bool dcn20_get_dcc_compression_cap(const struct dc *dc, + const struct dc_dcc_surface_param *input, + struct dc_surface_dcc_cap *output); + +void dcn20_dpp_destroy(struct dpp **dpp); + +struct dpp *dcn20_dpp_create( + struct dc_context *ctx, + uint32_t inst); + +struct input_pixel_processor *dcn20_ipp_create( + struct dc_context *ctx, uint32_t inst); + +struct output_pixel_processor *dcn20_opp_create( + struct dc_context *ctx, uint32_t inst); + +struct dce_aux *dcn20_aux_engine_create( + struct dc_context *ctx, uint32_t inst); + +struct dce_i2c_hw *dcn20_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst); + +void dcn20_clock_source_destroy(struct clock_source **clk_src); + +struct display_stream_compressor *dcn20_dsc_create( + struct dc_context *ctx, uint32_t inst); +void dcn20_dsc_destroy(struct display_stream_compressor **dsc); + +struct hubp *dcn20_hubp_create( + struct dc_context *ctx, + uint32_t inst); +struct timing_generator *dcn20_timing_generator_create( + struct dc_context *ctx, + uint32_t instance); +struct mpc *dcn20_mpc_create(struct dc_context *ctx); +struct hubbub *dcn20_hubbub_create(struct dc_context *ctx); + +bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool); +bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool); + +void dcn20_set_mcif_arb_params( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt); +bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); +void dcn20_merge_pipes_for_validate( + struct dc *dc, + struct dc_state *context); +int dcn20_validate_apply_pipe_split_flags( + struct dc *dc, + struct dc_state *context, + int vlevel, + int *split, + bool *merge); +void dcn20_release_dsc(struct resource_context *res_ctx, + const struct resource_pool *pool, + struct display_stream_compressor **dsc); +bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx); +void dcn20_split_stream_for_mpc( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct pipe_ctx *primary_pipe, + struct pipe_ctx *secondary_pipe); +bool dcn20_split_stream_for_odm( + const struct dc *dc, + struct resource_context *res_ctx, + struct pipe_ctx *prev_odm_pipe, + struct pipe_ctx *next_odm_pipe); +void dcn20_acquire_dsc(const struct dc *dc, + struct resource_context *res_ctx, + struct display_stream_compressor **dsc, + int pipe_idx); +struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, + struct resource_context *res_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *primary_pipe); +bool dcn20_fast_validate_bw( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *pipe_cnt_out, + int *pipe_split_from, + int *vlevel_out, + bool fast_validate); + +enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream); +enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); +enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream); +enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); +enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state); +void dcn20_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx); + +#endif /* __DC_RESOURCE_DCN20_H__ */ + diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c new file mode 100644 index 0000000000..914b234d7f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c @@ -0,0 +1,1308 @@ +/* +* Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "dc.h" + +#include "dcn201/dcn201_init.h" +#include "dml/dcn20/dcn20_fpu.h" +#include "resource.h" +#include "include/irq_service_interface.h" +#include "dcn201_resource.h" + +#include "dcn20/dcn20_resource.h" + +#include "dcn10/dcn10_hubp.h" +#include "dcn10/dcn10_ipp.h" +#include "dcn201/dcn201_mpc.h" +#include "dcn201/dcn201_hubp.h" +#include "irq/dcn201/irq_service_dcn201.h" +#include "dcn201/dcn201_dpp.h" +#include "dcn201/dcn201_hubbub.h" +#include "dcn201/dcn201_dccg.h" +#include "dcn201/dcn201_optc.h" +#include "dcn201/dcn201_hwseq.h" +#include "dce110/dce110_hwseq.h" +#include "dcn201/dcn201_opp.h" +#include "dcn201/dcn201_link_encoder.h" +#include "dcn20/dcn20_stream_encoder.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_audio.h" +#include "dce/dce_hwseq.h" +#include "virtual/virtual_stream_encoder.h" +#include "dce110/dce110_resource.h" +#include "dce/dce_aux.h" +#include "dce/dce_i2c.h" +#include "dcn201/dcn201_hubbub.h" +#include "dcn10/dcn10_resource.h" + +#include "cyan_skillfish_ip_offset.h" + +#include "dcn/dcn_2_0_3_offset.h" +#include "dcn/dcn_2_0_3_sh_mask.h" +#include "dpcs/dpcs_2_0_3_offset.h" +#include "dpcs/dpcs_2_0_3_sh_mask.h" + +#include "mmhub/mmhub_2_0_0_offset.h" +#include "mmhub/mmhub_2_0_0_sh_mask.h" +#include "nbio/nbio_7_4_offset.h" + +#include "reg_helper.h" + +#define MIN_DISP_CLK_KHZ 100000 +#define MIN_DPP_CLK_KHZ 100000 + +static struct _vcs_dpi_ip_params_st dcn201_ip = { + .gpuvm_enable = 0, + .hostvm_enable = 0, + .gpuvm_max_page_table_levels = 4, + .hostvm_max_page_table_levels = 4, + .hostvm_cached_page_table_levels = 0, + .pte_group_size_bytes = 2048, + .rob_buffer_size_kbytes = 168, + .det_buffer_size_kbytes = 164, + .dpte_buffer_size_in_pte_reqs_luma = 84, + .pde_proc_buffer_size_64k_reqs = 48, + .dpp_output_buffer_pixels = 2560, + .opp_output_buffer_lines = 1, + .pixel_chunk_size_kbytes = 8, + .pte_chunk_size_kbytes = 2, + .meta_chunk_size_kbytes = 2, + .writeback_chunk_size_kbytes = 2, + .line_buffer_size_bits = 789504, + .is_line_buffer_bpp_fixed = 0, + .line_buffer_fixed_bpp = 0, + .dcc_supported = true, + .max_line_buffer_lines = 12, + .writeback_luma_buffer_size_kbytes = 12, + .writeback_chroma_buffer_size_kbytes = 8, + .writeback_chroma_line_buffer_width_pixels = 4, + .writeback_max_hscl_ratio = 1, + .writeback_max_vscl_ratio = 1, + .writeback_min_hscl_ratio = 1, + .writeback_min_vscl_ratio = 1, + .writeback_max_hscl_taps = 12, + .writeback_max_vscl_taps = 12, + .writeback_line_buffer_luma_buffer_size = 0, + .writeback_line_buffer_chroma_buffer_size = 9600, + .cursor_buffer_size = 8, + .cursor_chunk_size = 2, + .max_num_otg = 2, + .max_num_dpp = 4, + .max_num_wb = 0, + .max_dchub_pscl_bw_pix_per_clk = 4, + .max_pscl_lb_bw_pix_per_clk = 2, + .max_lb_vscl_bw_pix_per_clk = 4, + .max_vscl_hscl_bw_pix_per_clk = 4, + .max_hscl_ratio = 8, + .max_vscl_ratio = 8, + .hscl_mults = 4, + .vscl_mults = 4, + .max_hscl_taps = 8, + .max_vscl_taps = 8, + .dispclk_ramp_margin_percent = 1, + .underscan_factor = 1.10, + .min_vblank_lines = 30, + .dppclk_delay_subtotal = 77, + .dppclk_delay_scl_lb_only = 16, + .dppclk_delay_scl = 50, + .dppclk_delay_cnvc_formatter = 8, + .dppclk_delay_cnvc_cursor = 6, + .dispclk_delay_subtotal = 87, + .dcfclk_cstate_latency = 10, + .max_inter_dcn_tile_repeaters = 8, + .number_of_cursors = 1, +}; + +static struct _vcs_dpi_soc_bounding_box_st dcn201_soc = { + .clock_limits = { + { + .state = 0, + .dscclk_mhz = 400.0, + .dcfclk_mhz = 1000.0, + .fabricclk_mhz = 200.0, + .dispclk_mhz = 300.0, + .dppclk_mhz = 300.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1254.0, + .dram_speed_mts = 2000.0, + }, + { + .state = 1, + .dscclk_mhz = 400.0, + .dcfclk_mhz = 1000.0, + .fabricclk_mhz = 250.0, + .dispclk_mhz = 1200.0, + .dppclk_mhz = 1200.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1254.0, + .dram_speed_mts = 3600.0, + }, + { + .state = 2, + .dscclk_mhz = 400.0, + .dcfclk_mhz = 1000.0, + .fabricclk_mhz = 750.0, + .dispclk_mhz = 1200.0, + .dppclk_mhz = 1200.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1254.0, + .dram_speed_mts = 6800.0, + }, + { + .state = 3, + .dscclk_mhz = 400.0, + .dcfclk_mhz = 1000.0, + .fabricclk_mhz = 250.0, + .dispclk_mhz = 1200.0, + .dppclk_mhz = 1200.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1254.0, + .dram_speed_mts = 14000.0, + }, + { + .state = 4, + .dscclk_mhz = 400.0, + .dcfclk_mhz = 1000.0, + .fabricclk_mhz = 750.0, + .dispclk_mhz = 1200.0, + .dppclk_mhz = 1200.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1254.0, + .dram_speed_mts = 14000.0, + } + }, + .num_states = 4, + .sr_exit_time_us = 9.0, + .sr_enter_plus_exit_time_us = 11.0, + .urgent_latency_us = 4.0, + .urgent_latency_pixel_data_only_us = 4.0, + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, + .urgent_latency_vm_data_only_us = 4.0, + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 256, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 256, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 256, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 80.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 80.0, + .max_avg_sdp_bw_use_normal_percent = 80.0, + .max_avg_dram_bw_use_normal_percent = 69.0, + .writeback_latency_us = 12.0, + .ideal_dram_bw_after_urgent_percent = 80.0, + .max_request_size_bytes = 256, + .dram_channel_width_bytes = 2, + .fabric_datapath_to_dcn_data_return_bytes = 64, + .dcn_downspread_percent = 0.3, + .downspread_percent = 0.3, + .dram_page_open_time_ns = 50.0, + .dram_rw_turnaround_time_ns = 17.5, + .dram_return_buffer_per_channel_bytes = 8192, + .round_trip_ping_latency_dcfclk_cycles = 128, + .urgent_out_of_order_return_per_channel_bytes = 256, + .channel_interleave_bytes = 256, + .num_banks = 8, + .num_chans = 16, + .vmm_page_size_bytes = 4096, + .dram_clock_change_latency_us = 250.0, + .writeback_dram_clock_change_latency_us = 23.0, + .return_bus_width_bytes = 64, + .dispclk_dppclk_vco_speed_mhz = 3000, + .use_urgent_burst_bw = 0, +}; + +enum dcn20_clk_src_array_id { + DCN20_CLK_SRC_PLL0, + DCN20_CLK_SRC_PLL1, + DCN20_CLK_SRC_TOTAL_DCN201 +}; + +/* begin ********************* + * macros to expend register list macro defined in HW object header file */ + +/* DCN */ + +#undef BASE_INNER +#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +#define SRI(reg_name, block, id)\ + .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define SRIR(var_name, reg_name, block, id)\ + .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define SRI_IX(reg_name, block, id)\ + .reg_name = ix ## block ## id ## _ ## reg_name + +#define DCCG_SRII(reg_name, block, id)\ + .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define VUPDATE_SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + mm ## reg_name ## _ ## block ## id + +/* NBIO */ +#define NBIO_BASE_INNER(seg) \ + NBIO_BASE__INST0_SEG ## seg + +#define NBIO_BASE(seg) \ + NBIO_BASE_INNER(seg) + +#define NBIO_SR(reg_name)\ + .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +/* MMHUB */ +#define MMHUB_BASE_INNER(seg) \ + MMHUB_BASE__INST0_SEG ## seg + +#define MMHUB_BASE(seg) \ + MMHUB_BASE_INNER(seg) + +#define MMHUB_SR(reg_name)\ + .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \ + mmMM ## reg_name + +static const struct bios_registers bios_regs = { + NBIO_SR(BIOS_SCRATCH_3), + NBIO_SR(BIOS_SCRATCH_6) +}; + +#define clk_src_regs(index, pllid)\ +[index] = {\ + CS_COMMON_REG_LIST_DCN201(index, pllid),\ +} + +static const struct dce110_clk_src_regs clk_src_regs[] = { + clk_src_regs(0, A), + clk_src_regs(1, B) +}; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) +}; + +#define audio_regs(id)\ +[id] = {\ + AUD_COMMON_REG_LIST(id)\ +} + +static const struct dce_audio_registers audio_regs[] = { + audio_regs(0), + audio_regs(1), +}; + +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ + AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) + +static const struct dce_audio_shift audio_shift = { + DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +#define stream_enc_regs(id)\ +[id] = {\ + SE_DCN2_REG_LIST(id)\ +} + +static const struct dcn10_stream_enc_registers stream_enc_regs[] = { + stream_enc_regs(0), + stream_enc_regs(1) +}; + +static const struct dcn10_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn10_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCN20(_MASK) +}; + +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +#define aux_regs(id)\ +[id] = {\ + DCN2_AUX_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), +}; + +#define link_regs(id, phyid)\ +[id] = {\ + LE_DCN_COMMON_REG_LIST(id), \ + UNIPHY_DCN2_REG_LIST(phyid) \ +} + +static const struct dcn10_link_enc_registers link_enc_regs[] = { + link_regs(0, A), + link_regs(1, B), +}; + +#define LINK_ENCODER_MASK_SH_LIST_DCN201(mask_sh)\ + LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh) + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN201(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN201(_MASK) +}; + +#define ipp_regs(id)\ +[id] = {\ + IPP_REG_LIST_DCN201(id),\ +} + +static const struct dcn10_ipp_registers ipp_regs[] = { + ipp_regs(0), + ipp_regs(1), + ipp_regs(2), + ipp_regs(3), +}; + +static const struct dcn10_ipp_shift ipp_shift = { + IPP_MASK_SH_LIST_DCN201(__SHIFT) +}; + +static const struct dcn10_ipp_mask ipp_mask = { + IPP_MASK_SH_LIST_DCN201(_MASK) +}; + +#define opp_regs(id)\ +[id] = {\ + OPP_REG_LIST_DCN201(id),\ +} + +static const struct dcn201_opp_registers opp_regs[] = { + opp_regs(0), + opp_regs(1), +}; + +static const struct dcn201_opp_shift opp_shift = { + OPP_MASK_SH_LIST_DCN201(__SHIFT) +}; + +static const struct dcn201_opp_mask opp_mask = { + OPP_MASK_SH_LIST_DCN201(_MASK) +}; + +#define aux_engine_regs(id)\ +[id] = {\ + AUX_COMMON_REG_LIST0(id), \ + .AUX_RESET_MASK = 0 \ +} + +static const struct dce110_aux_registers aux_engine_regs[] = { + aux_engine_regs(0), + aux_engine_regs(1) +}; + +#define tf_regs(id)\ +[id] = {\ + TF_REG_LIST_DCN201(id),\ +} + +static const struct dcn201_dpp_registers tf_regs[] = { + tf_regs(0), + tf_regs(1), + tf_regs(2), + tf_regs(3), +}; + +static const struct dcn201_dpp_shift tf_shift = { + TF_REG_LIST_SH_MASK_DCN201(__SHIFT) +}; + +static const struct dcn201_dpp_mask tf_mask = { + TF_REG_LIST_SH_MASK_DCN201(_MASK) +}; + +static const struct dcn201_mpc_registers mpc_regs = { + MPC_REG_LIST_DCN201(0), + MPC_REG_LIST_DCN201(1), + MPC_REG_LIST_DCN201(2), + MPC_REG_LIST_DCN201(3), + MPC_REG_LIST_DCN201(4), + MPC_OUT_MUX_REG_LIST_DCN201(0), + MPC_OUT_MUX_REG_LIST_DCN201(1), +}; + +static const struct dcn201_mpc_shift mpc_shift = { + MPC_COMMON_MASK_SH_LIST_DCN201(__SHIFT) +}; + +static const struct dcn201_mpc_mask mpc_mask = { + MPC_COMMON_MASK_SH_LIST_DCN201(_MASK) +}; + +#define tg_regs_dcn201(id)\ +[id] = {TG_COMMON_REG_LIST_DCN201(id)} + +static const struct dcn_optc_registers tg_regs[] = { + tg_regs_dcn201(0), + tg_regs_dcn201(1) +}; + +static const struct dcn_optc_shift tg_shift = { + TG_COMMON_MASK_SH_LIST_DCN201(__SHIFT) +}; + +static const struct dcn_optc_mask tg_mask = { + TG_COMMON_MASK_SH_LIST_DCN201(_MASK) +}; + +#define hubp_regsDCN201(id)\ +[id] = {\ + HUBP_REG_LIST_DCN201(id)\ +} + +static const struct dcn201_hubp_registers hubp_regs[] = { + hubp_regsDCN201(0), + hubp_regsDCN201(1), + hubp_regsDCN201(2), + hubp_regsDCN201(3) +}; + +static const struct dcn201_hubp_shift hubp_shift = { + HUBP_MASK_SH_LIST_DCN201(__SHIFT) +}; + +static const struct dcn201_hubp_mask hubp_mask = { + HUBP_MASK_SH_LIST_DCN201(_MASK) +}; + +static const struct dcn_hubbub_registers hubbub_reg = { + HUBBUB_REG_LIST_DCN201(0) +}; + +static const struct dcn_hubbub_shift hubbub_shift = { + HUBBUB_MASK_SH_LIST_DCN201(__SHIFT) +}; + +static const struct dcn_hubbub_mask hubbub_mask = { + HUBBUB_MASK_SH_LIST_DCN201(_MASK) +}; + + +static const struct dccg_registers dccg_regs = { + DCCG_COMMON_REG_LIST_DCN_BASE() +}; + +static const struct dccg_shift dccg_shift = { + DCCG_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(__SHIFT) +}; + +static const struct dccg_mask dccg_mask = { + DCCG_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(_MASK) +}; + +static const struct resource_caps res_cap_dnc201 = { + .num_timing_generator = 2, + .num_opp = 2, + .num_video_plane = 4, + .num_audio = 2, + .num_stream_encoder = 2, + .num_pll = 2, + .num_ddc = 2, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCN_UNIVERSAL, + .per_pixel_alpha = true, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = false, + .fp16 = true, + .p010 = false, + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 16000, + .fp16 = 1 + }, + + .max_downscale_factor = { + .argb8888 = 250, + .nv12 = 250, + .fp16 = 250 + }, + 64, + 64 +}; + +static const struct dc_debug_options debug_defaults_drv = { + .disable_dmcu = true, + .force_abm_enable = false, + .timing_trace = false, + .clock_trace = true, + .disable_pplib_clock_request = true, + .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .vsr_support = true, + .performance_trace = false, + .az_endpoint_mute_only = true, + .max_downscale_src_width = 3840, + .disable_pplib_wm_range = true, + .scl_reset_length10 = true, + .sanity_checks = false, + .underflow_assert_delay_us = 0xFFFFFFFF, + .enable_tri_buf = false, + .enable_legacy_fast_update = true, + .using_dml2 = false, +}; + +static void dcn201_dpp_destroy(struct dpp **dpp) +{ + kfree(TO_DCN201_DPP(*dpp)); + *dpp = NULL; +} + +static struct dpp *dcn201_dpp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn201_dpp *dpp = + kzalloc(sizeof(struct dcn201_dpp), GFP_ATOMIC); + + if (!dpp) + return NULL; + + if (dpp201_construct(dpp, ctx, inst, + &tf_regs[inst], &tf_shift, &tf_mask)) + return &dpp->base; + + kfree(dpp); + return NULL; +} + +static struct input_pixel_processor *dcn201_ipp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn10_ipp *ipp = + kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC); + + if (!ipp) { + return NULL; + } + + dcn20_ipp_construct(ipp, ctx, inst, + &ipp_regs[inst], &ipp_shift, &ipp_mask); + return &ipp->base; +} + + +static struct output_pixel_processor *dcn201_opp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn201_opp *opp = + kzalloc(sizeof(struct dcn201_opp), GFP_ATOMIC); + + if (!opp) { + return NULL; + } + + dcn201_opp_construct(opp, ctx, inst, + &opp_regs[inst], &opp_shift, &opp_mask); + return &opp->base; +} + +static struct dce_aux *dcn201_aux_engine_create(struct dc_context *ctx, + uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC); + + if (!aux_engine) + return NULL; + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) +}; + +static struct dce_i2c_hw *dcn201_i2c_hw_create(struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC); + + if (!dce_i2c_hw) + return NULL; + + dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} + +static struct mpc *dcn201_mpc_create(struct dc_context *ctx, uint32_t num_mpcc) +{ + struct dcn201_mpc *mpc201 = kzalloc(sizeof(struct dcn201_mpc), + GFP_ATOMIC); + + if (!mpc201) + return NULL; + + dcn201_mpc_construct(mpc201, ctx, + &mpc_regs, + &mpc_shift, + &mpc_mask, + num_mpcc); + + return &mpc201->base; +} + +static struct hubbub *dcn201_hubbub_create(struct dc_context *ctx) +{ + struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), + GFP_ATOMIC); + + if (!hubbub) + return NULL; + + hubbub201_construct(hubbub, ctx, + &hubbub_reg, + &hubbub_shift, + &hubbub_mask); + + return &hubbub->base; +} + +static struct timing_generator *dcn201_timing_generator_create( + struct dc_context *ctx, + uint32_t instance) +{ + struct optc *tgn10 = + kzalloc(sizeof(struct optc), GFP_ATOMIC); + + if (!tgn10) + return NULL; + + tgn10->base.inst = instance; + tgn10->base.ctx = ctx; + + tgn10->tg_regs = &tg_regs[instance]; + tgn10->tg_shift = &tg_shift; + tgn10->tg_mask = &tg_mask; + + dcn201_timing_generator_init(tgn10); + + return &tgn10->base; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .fec_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + +static struct link_encoder *dcn201_link_encoder_create( + struct dc_context *ctx, + const struct encoder_init_data *enc_init_data) +{ + struct dcn20_link_encoder *enc20 = + kzalloc(sizeof(struct dcn20_link_encoder), GFP_ATOMIC); + struct dcn10_link_encoder *enc10 = &enc20->enc10; + + if (!enc20) + return NULL; + + dcn201_link_encoder_construct(enc20, + enc_init_data, + &link_enc_feature, + &link_enc_regs[enc_init_data->transmitter], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], + &le_shift, + &le_mask); + + return &enc10->base; +} + +static struct clock_source *dcn201_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC); + + if (!clk_src) + return NULL; + + if (dce112_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + kfree(clk_src); + return NULL; +} + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), + + FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); +} + +static struct audio *dcn201_create_audio( + struct dc_context *ctx, unsigned int inst) +{ + return dce_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + +static struct stream_encoder *dcn201_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn10_stream_encoder *enc1 = + kzalloc(sizeof(struct dcn10_stream_encoder), GFP_ATOMIC); + + if (!enc1) + return NULL; + + dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, + &stream_enc_regs[eng_id], + &se_shift, &se_mask); + + return &enc1->base; +} + +static const struct dce_hwseq_registers hwseq_reg = { + HWSEQ_DCN201_REG_LIST() +}; + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCN201_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCN201_MASK_SH_LIST(_MASK) +}; + +static struct dce_hwseq *dcn201_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_ATOMIC); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + } + return hws; +} + +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = dcn201_create_audio, + .create_stream_encoder = dcn201_stream_encoder_create, + .create_hwseq = dcn201_hwseq_create, +}; + +static void dcn201_clock_source_destroy(struct clock_source **clk_src) +{ + kfree(TO_DCE110_CLK_SRC(*clk_src)); + *clk_src = NULL; +} + +static void dcn201_resource_destruct(struct dcn201_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) { + kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); + pool->base.stream_enc[i] = NULL; + } + } + + + if (pool->base.mpc != NULL) { + kfree(TO_DCN201_MPC(pool->base.mpc)); + pool->base.mpc = NULL; + } + + if (pool->base.hubbub != NULL) { + kfree(pool->base.hubbub); + pool->base.hubbub = NULL; + } + + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.dpps[i] != NULL) + dcn201_dpp_destroy(&pool->base.dpps[i]); + + if (pool->base.ipps[i] != NULL) + pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.hubps[i] != NULL) { + kfree(TO_DCN10_HUBP(pool->base.hubps[i])); + pool->base.hubps[i] = NULL; + } + + if (pool->base.irqs != NULL) { + dal_irq_service_destroy(&pool->base.irqs); + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + if (pool->base.opps[i] != NULL) + pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.timing_generators[i] != NULL) { + kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i]) + dce_aud_destroy(&pool->base.audios[i]); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) { + dcn201_clock_source_destroy(&pool->base.clock_sources[i]); + pool->base.clock_sources[i] = NULL; + } + } + + if (pool->base.dp_clock_source != NULL) { + dcn201_clock_source_destroy(&pool->base.dp_clock_source); + pool->base.dp_clock_source = NULL; + } + + if (pool->base.dccg != NULL) + dcn_dccg_destroy(&pool->base.dccg); +} + +static struct hubp *dcn201_hubp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn201_hubp *hubp201 = + kzalloc(sizeof(struct dcn201_hubp), GFP_ATOMIC); + + if (!hubp201) + return NULL; + + if (dcn201_hubp_construct(hubp201, ctx, inst, + &hubp_regs[inst], &hubp_shift, &hubp_mask)) + return &hubp201->base; + + kfree(hubp201); + return NULL; +} + +static struct pipe_ctx *dcn201_acquire_free_pipe_for_layer( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *opp_head_pipe) +{ + struct resource_context *res_ctx = &new_ctx->res_ctx; + struct pipe_ctx *head_pipe = resource_get_otg_master_for_stream(res_ctx, opp_head_pipe->stream); + struct pipe_ctx *idle_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, head_pipe); + + if (!head_pipe) + ASSERT(0); + + if (!idle_pipe) + return NULL; + + idle_pipe->stream = head_pipe->stream; + idle_pipe->stream_res.tg = head_pipe->stream_res.tg; + idle_pipe->stream_res.opp = head_pipe->stream_res.opp; + + idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; + idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx]; + idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx]; + idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst; + + return idle_pipe; +} + +static bool dcn201_get_dcc_compression_cap(const struct dc *dc, + const struct dc_dcc_surface_param *input, + struct dc_surface_dcc_cap *output) +{ + return dc->res_pool->hubbub->funcs->get_dcc_compression_cap( + dc->res_pool->hubbub, + input, + output); +} + +static void dcn201_populate_dml_writeback_from_context(struct dc *dc, + struct resource_context *res_ctx, + display_e2e_pipe_params_st *pipes) +{ + DC_FP_START(); + dcn201_populate_dml_writeback_from_context_fpu(dc, res_ctx, pipes); + DC_FP_END(); +} + +static void dcn201_destroy_resource_pool(struct resource_pool **pool) +{ + struct dcn201_resource_pool *dcn201_pool = TO_DCN201_RES_POOL(*pool); + + dcn201_resource_destruct(dcn201_pool); + kfree(dcn201_pool); + *pool = NULL; +} + +static void dcn201_link_init(struct dc_link *link) +{ + if (link->ctx->dc_bios->integrated_info) + link->dp_ss_off = !link->ctx->dc_bios->integrated_info->dp_ss_control; +} + +static struct dc_cap_funcs cap_funcs = { + .get_dcc_compression_cap = dcn201_get_dcc_compression_cap, +}; + +static struct resource_funcs dcn201_res_pool_funcs = { + .link_init = dcn201_link_init, + .destroy = dcn201_destroy_resource_pool, + .link_enc_create = dcn201_link_encoder_create, + .panel_cntl_create = NULL, + .validate_bandwidth = dcn20_validate_bandwidth, + .populate_dml_pipes = dcn20_populate_dml_pipes_from_context, + .add_stream_to_ctx = dcn20_add_stream_to_ctx, + .add_dsc_to_stream_resource = NULL, + .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn201_acquire_free_pipe_for_layer, + .release_pipe = dcn20_release_pipe, + .populate_dml_writeback_from_context = dcn201_populate_dml_writeback_from_context, + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, + .set_mcif_arb_params = dcn20_set_mcif_arb_params, + .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link +}; + +static bool dcn201_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dcn201_resource_pool *pool) +{ + int i; + struct dc_context *ctx = dc->ctx; + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap_dnc201; + pool->base.funcs = &dcn201_res_pool_funcs; + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + + pool->base.pipe_count = 4; + pool->base.mpcc_count = 5; + dc->caps.max_downscale_ratio = 200; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.5 w/a applied by default*/ + dc->caps.max_cursor_size = 256; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dmdata_alloc_size = 2048; + + dc->caps.max_slave_planes = 1; + dc->caps.max_slave_yuv_planes = 1; + dc->caps.max_slave_rgb_planes = 1; + dc->caps.post_blend_color_processing = true; + dc->caps.force_dp_tps4_for_cp2520 = true; + dc->caps.extended_aux_timeout_support = true; + + /* Color pipeline capabilities */ + dc->caps.color.dpp.dcn_arch = 1; + dc->caps.color.dpp.input_lut_shared = 0; + dc->caps.color.dpp.icsc = 1; + dc->caps.color.dpp.dgam_ram = 1; + dc->caps.color.dpp.dgam_rom_caps.srgb = 1; + dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; + dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.dgam_rom_caps.pq = 0; + dc->caps.color.dpp.dgam_rom_caps.hlg = 0; + dc->caps.color.dpp.post_csc = 0; + dc->caps.color.dpp.gamma_corr = 0; + dc->caps.color.dpp.dgam_rom_for_yuv = 1; + + dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.ogam_ram = 1; + // no OGAM ROM on DCN2 + dc->caps.color.dpp.ogam_rom_caps.srgb = 0; + dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; + dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.ogam_rom_caps.pq = 0; + dc->caps.color.dpp.ogam_rom_caps.hlg = 0; + dc->caps.color.dpp.ocsc = 0; + + dc->caps.color.mpc.gamut_remap = 0; + dc->caps.color.mpc.num_3dluts = 0; + dc->caps.color.mpc.shared_3d_lut = 0; + dc->caps.color.mpc.ogam_ram = 1; + dc->caps.color.mpc.ogam_rom_caps.srgb = 0; + dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; + dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.mpc.ogam_rom_caps.pq = 0; + dc->caps.color.mpc.ogam_rom_caps.hlg = 0; + dc->caps.color.mpc.ocsc = 1; + + dc->debug = debug_defaults_drv; + + /*a0 only, remove later*/ + dc->work_arounds.no_connect_phy_config = true; + dc->work_arounds.dedcn20_305_wa = true; + /************************************************* + * Create resources * + *************************************************/ + + pool->base.clock_sources[DCN20_CLK_SRC_PLL0] = + dcn201_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->base.clock_sources[DCN20_CLK_SRC_PLL1] = + dcn201_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + + pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN201; + + /* todo: not reuse phy_pll registers */ + pool->base.dp_clock_source = + dcn201_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, + &clk_src_regs[0], true); + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + goto create_fail; + } + } + + pool->base.dccg = dccg201_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); + if (pool->base.dccg == NULL) { + dm_error("DC: failed to create dccg!\n"); + goto create_fail; + } + + dcn201_ip.max_num_otg = pool->base.res_cap->num_timing_generator; + dcn201_ip.max_num_dpp = pool->base.pipe_count; + dml_init_instance(&dc->dml, &dcn201_soc, &dcn201_ip, DML_PROJECT_DCN201); + { + struct irq_service_init_data init_data; + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dcn201_create(&init_data); + if (!pool->base.irqs) + goto create_fail; + } + + /* mem input -> ipp -> dpp -> opp -> TG */ + for (i = 0; i < pool->base.pipe_count; i++) { + pool->base.hubps[i] = dcn201_hubp_create(ctx, i); + if (pool->base.hubps[i] == NULL) { + dm_error( + "DC: failed to create memory input!\n"); + goto create_fail; + } + + pool->base.ipps[i] = dcn201_ipp_create(ctx, i); + if (pool->base.ipps[i] == NULL) { + dm_error( + "DC: failed to create input pixel processor!\n"); + goto create_fail; + } + + pool->base.dpps[i] = dcn201_dpp_create(ctx, i); + if (pool->base.dpps[i] == NULL) { + dm_error( + "DC: failed to create dpps!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + pool->base.opps[i] = dcn201_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + dm_error( + "DC: failed to create output pixel processor!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dcn201_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + dm_error( + "DC:failed to create aux engine!!\n"); + goto create_fail; + } + pool->base.hw_i2cs[i] = dcn201_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + dm_error( + "DC:failed to create hw i2c!!\n"); + goto create_fail; + } + pool->base.sw_i2cs[i] = NULL; + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + pool->base.timing_generators[i] = dcn201_timing_generator_create( + ctx, i); + if (pool->base.timing_generators[i] == NULL) { + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + } + + pool->base.timing_generator_count = i; + + pool->base.mpc = dcn201_mpc_create(ctx, pool->base.mpcc_count); + if (pool->base.mpc == NULL) { + dm_error("DC: failed to create mpc!\n"); + goto create_fail; + } + + pool->base.hubbub = dcn201_hubbub_create(ctx); + if (pool->base.hubbub == NULL) { + dm_error("DC: failed to create hubbub!\n"); + goto create_fail; + } + + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto create_fail; + + dcn201_hw_sequencer_construct(dc); + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->cap_funcs = cap_funcs; + + return true; + +create_fail: + + dcn201_resource_destruct(pool); + + return false; +} + +struct resource_pool *dcn201_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc) +{ + struct dcn201_resource_pool *pool = + kzalloc(sizeof(struct dcn201_resource_pool), GFP_ATOMIC); + + if (!pool) + return NULL; + + if (dcn201_resource_construct(init_data->num_virtual_links, dc, pool)) + return &pool->base; + + kfree(pool); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.h new file mode 100644 index 0000000000..e0467d17d4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.h @@ -0,0 +1,50 @@ +/* +* Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_RESOURCE_DCN201_H__ +#define __DC_RESOURCE_DCN201_H__ + +#include "core_types.h" + +#define RRDPCS_PHY_DP_TX_PSTATE_POWER_UP 0x00000000 +#define RRDPCS_PHY_DP_TX_PSTATE_HOLD 0x00000001 +#define RRDPCS_PHY_DP_TX_PSTATE_HOLD_OFF 0x00000002 +#define RRDPCS_PHY_DP_TX_PSTATE_POWER_DOWN 0x00000003 + +#define TO_DCN201_RES_POOL(pool)\ + container_of(pool, struct dcn201_resource_pool, base) + +struct dc; +struct resource_pool; +struct _vcs_dpi_display_pipe_params_st; + +struct dcn201_resource_pool { + struct resource_pool base; +}; +struct resource_pool *dcn201_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc); + +#endif /* __DC_RESOURCE_DCN201_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c new file mode 100644 index 0000000000..65d337731f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c @@ -0,0 +1,1744 @@ +/* +* Copyright 2018 Advanced Micro Devices, Inc. + * Copyright 2019 Raptor Engineering, LLC + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include + +#include "dm_services.h" +#include "dc.h" + +#include "dcn21/dcn21_init.h" + +#include "resource.h" +#include "include/irq_service_interface.h" +#include "dcn20/dcn20_resource.h" +#include "dcn21/dcn21_resource.h" + +#include "dml/dcn20/dcn20_fpu.h" + +#include "clk_mgr.h" +#include "dcn10/dcn10_hubp.h" +#include "dcn10/dcn10_ipp.h" +#include "dcn20/dcn20_hubbub.h" +#include "dcn20/dcn20_mpc.h" +#include "dcn20/dcn20_hubp.h" +#include "dcn21/dcn21_hubp.h" +#include "irq/dcn21/irq_service_dcn21.h" +#include "dcn20/dcn20_dpp.h" +#include "dcn20/dcn20_optc.h" +#include "dcn21/dcn21_hwseq.h" +#include "dce110/dce110_hwseq.h" +#include "dcn20/dcn20_opp.h" +#include "dcn20/dcn20_dsc.h" +#include "dcn21/dcn21_link_encoder.h" +#include "dcn20/dcn20_stream_encoder.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_audio.h" +#include "dce/dce_hwseq.h" +#include "virtual/virtual_stream_encoder.h" +#include "dml/display_mode_vba.h" +#include "dcn20/dcn20_dccg.h" +#include "dcn21/dcn21_dccg.h" +#include "dcn21/dcn21_hubbub.h" +#include "dcn10/dcn10_resource.h" +#include "dce/dce_panel_cntl.h" + +#include "dcn20/dcn20_dwb.h" +#include "dcn20/dcn20_mmhubbub.h" +#include "dpcs/dpcs_2_1_0_offset.h" +#include "dpcs/dpcs_2_1_0_sh_mask.h" + +#include "renoir_ip_offset.h" +#include "dcn/dcn_2_1_0_offset.h" +#include "dcn/dcn_2_1_0_sh_mask.h" + +#include "nbio/nbio_7_0_offset.h" + +#include "mmhub/mmhub_2_0_0_offset.h" +#include "mmhub/mmhub_2_0_0_sh_mask.h" + +#include "reg_helper.h" +#include "dce/dce_abm.h" +#include "dce/dce_dmcu.h" +#include "dce/dce_aux.h" +#include "dce/dce_i2c.h" +#include "dcn21_resource.h" +#include "vm_helper.h" +#include "dcn20/dcn20_vmid.h" +#include "dce/dmub_psr.h" +#include "dce/dmub_abm.h" + +/* begin ********************* + * macros to expend register list macro defined in HW object header file */ + +/* DCN */ +#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +#define SRI(reg_name, block, id)\ + .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define SRIR(var_name, reg_name, block, id)\ + .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define DCCG_SRII(reg_name, block, id)\ + .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define VUPDATE_SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + mm ## reg_name ## _ ## block ## id + +/* NBIO */ +#define NBIO_BASE_INNER(seg) \ + NBIF0_BASE__INST0_SEG ## seg + +#define NBIO_BASE(seg) \ + NBIO_BASE_INNER(seg) + +#define NBIO_SR(reg_name)\ + .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +/* MMHUB */ +#define MMHUB_BASE_INNER(seg) \ + MMHUB_BASE__INST0_SEG ## seg + +#define MMHUB_BASE(seg) \ + MMHUB_BASE_INNER(seg) + +#define MMHUB_SR(reg_name)\ + .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \ + mmMM ## reg_name + +#define clk_src_regs(index, pllid)\ +[index] = {\ + CS_COMMON_REG_LIST_DCN2_1(index, pllid),\ +} + +static const struct dce110_clk_src_regs clk_src_regs[] = { + clk_src_regs(0, A), + clk_src_regs(1, B), + clk_src_regs(2, C), + clk_src_regs(3, D), + clk_src_regs(4, E), +}; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) +}; + +static const struct bios_registers bios_regs = { + NBIO_SR(BIOS_SCRATCH_3), + NBIO_SR(BIOS_SCRATCH_6) +}; + +static const struct dce_dmcu_registers dmcu_regs = { + DMCU_DCN20_REG_LIST() +}; + +static const struct dce_dmcu_shift dmcu_shift = { + DMCU_MASK_SH_LIST_DCN10(__SHIFT) +}; + +static const struct dce_dmcu_mask dmcu_mask = { + DMCU_MASK_SH_LIST_DCN10(_MASK) +}; + +static const struct dce_abm_registers abm_regs = { + ABM_DCN20_REG_LIST() +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCN20(_MASK) +}; + +#define audio_regs(id)\ +[id] = {\ + AUD_COMMON_REG_LIST(id)\ +} + +static const struct dce_audio_registers audio_regs[] = { + audio_regs(0), + audio_regs(1), + audio_regs(2), + audio_regs(3), + audio_regs(4), + audio_regs(5), +}; + +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ + AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) + +static const struct dce_audio_shift audio_shift = { + DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +static const struct dccg_registers dccg_regs = { + DCCG_COMMON_REG_LIST_DCN_BASE() +}; + +static const struct dccg_shift dccg_shift = { + DCCG_MASK_SH_LIST_DCN2_1(__SHIFT) +}; + +static const struct dccg_mask dccg_mask = { + DCCG_MASK_SH_LIST_DCN2_1(_MASK) +}; + +#define opp_regs(id)\ +[id] = {\ + OPP_REG_LIST_DCN20(id),\ +} + +static const struct dcn20_opp_registers opp_regs[] = { + opp_regs(0), + opp_regs(1), + opp_regs(2), + opp_regs(3), + opp_regs(4), + opp_regs(5), +}; + +static const struct dcn20_opp_shift opp_shift = { + OPP_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn20_opp_mask opp_mask = { + OPP_MASK_SH_LIST_DCN20(_MASK) +}; + +#define tg_regs(id)\ +[id] = {TG_COMMON_REG_LIST_DCN2_0(id)} + +static const struct dcn_optc_registers tg_regs[] = { + tg_regs(0), + tg_regs(1), + tg_regs(2), + tg_regs(3) +}; + +static const struct dcn_optc_shift tg_shift = { + TG_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) +}; + +static const struct dcn_optc_mask tg_mask = { + TG_COMMON_MASK_SH_LIST_DCN2_0(_MASK) +}; + +static const struct dcn20_mpc_registers mpc_regs = { + MPC_REG_LIST_DCN2_0(0), + MPC_REG_LIST_DCN2_0(1), + MPC_REG_LIST_DCN2_0(2), + MPC_REG_LIST_DCN2_0(3), + MPC_REG_LIST_DCN2_0(4), + MPC_REG_LIST_DCN2_0(5), + MPC_OUT_MUX_REG_LIST_DCN2_0(0), + MPC_OUT_MUX_REG_LIST_DCN2_0(1), + MPC_OUT_MUX_REG_LIST_DCN2_0(2), + MPC_OUT_MUX_REG_LIST_DCN2_0(3), + MPC_DBG_REG_LIST_DCN2_0() +}; + +static const struct dcn20_mpc_shift mpc_shift = { + MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT), + MPC_DEBUG_REG_LIST_SH_DCN20 +}; + +static const struct dcn20_mpc_mask mpc_mask = { + MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK), + MPC_DEBUG_REG_LIST_MASK_DCN20 +}; + +#define hubp_regs(id)\ +[id] = {\ + HUBP_REG_LIST_DCN21(id)\ +} + +static const struct dcn_hubp2_registers hubp_regs[] = { + hubp_regs(0), + hubp_regs(1), + hubp_regs(2), + hubp_regs(3) +}; + +static const struct dcn_hubp2_shift hubp_shift = { + HUBP_MASK_SH_LIST_DCN21(__SHIFT) +}; + +static const struct dcn_hubp2_mask hubp_mask = { + HUBP_MASK_SH_LIST_DCN21(_MASK) +}; + +static const struct dcn_hubbub_registers hubbub_reg = { + HUBBUB_REG_LIST_DCN21() +}; + +static const struct dcn_hubbub_shift hubbub_shift = { + HUBBUB_MASK_SH_LIST_DCN21(__SHIFT) +}; + +static const struct dcn_hubbub_mask hubbub_mask = { + HUBBUB_MASK_SH_LIST_DCN21(_MASK) +}; + + +#define vmid_regs(id)\ +[id] = {\ + DCN20_VMID_REG_LIST(id)\ +} + +static const struct dcn_vmid_registers vmid_regs[] = { + vmid_regs(0), + vmid_regs(1), + vmid_regs(2), + vmid_regs(3), + vmid_regs(4), + vmid_regs(5), + vmid_regs(6), + vmid_regs(7), + vmid_regs(8), + vmid_regs(9), + vmid_regs(10), + vmid_regs(11), + vmid_regs(12), + vmid_regs(13), + vmid_regs(14), + vmid_regs(15) +}; + +static const struct dcn20_vmid_shift vmid_shifts = { + DCN20_VMID_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn20_vmid_mask vmid_masks = { + DCN20_VMID_MASK_SH_LIST(_MASK) +}; + +#define dsc_regsDCN20(id)\ +[id] = {\ + DSC_REG_LIST_DCN20(id)\ +} + +static const struct dcn20_dsc_registers dsc_regs[] = { + dsc_regsDCN20(0), + dsc_regsDCN20(1), + dsc_regsDCN20(2), + dsc_regsDCN20(3), + dsc_regsDCN20(4), + dsc_regsDCN20(5) +}; + +static const struct dcn20_dsc_shift dsc_shift = { + DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) +}; + +static const struct dcn20_dsc_mask dsc_mask = { + DSC_REG_LIST_SH_MASK_DCN20(_MASK) +}; + +#define ipp_regs(id)\ +[id] = {\ + IPP_REG_LIST_DCN20(id),\ +} + +static const struct dcn10_ipp_registers ipp_regs[] = { + ipp_regs(0), + ipp_regs(1), + ipp_regs(2), + ipp_regs(3), +}; + +static const struct dcn10_ipp_shift ipp_shift = { + IPP_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn10_ipp_mask ipp_mask = { + IPP_MASK_SH_LIST_DCN20(_MASK), +}; + +#define opp_regs(id)\ +[id] = {\ + OPP_REG_LIST_DCN20(id),\ +} + + +#define aux_engine_regs(id)\ +[id] = {\ + AUX_COMMON_REG_LIST0(id), \ + .AUXN_IMPCAL = 0, \ + .AUXP_IMPCAL = 0, \ + .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ +} + +static const struct dce110_aux_registers aux_engine_regs[] = { + aux_engine_regs(0), + aux_engine_regs(1), + aux_engine_regs(2), + aux_engine_regs(3), + aux_engine_regs(4), +}; + +#define tf_regs(id)\ +[id] = {\ + TF_REG_LIST_DCN20(id),\ + TF_REG_LIST_DCN20_COMMON_APPEND(id),\ +} + +static const struct dcn2_dpp_registers tf_regs[] = { + tf_regs(0), + tf_regs(1), + tf_regs(2), + tf_regs(3), +}; + +static const struct dcn2_dpp_shift tf_shift = { + TF_REG_LIST_SH_MASK_DCN20(__SHIFT), + TF_DEBUG_REG_LIST_SH_DCN20 +}; + +static const struct dcn2_dpp_mask tf_mask = { + TF_REG_LIST_SH_MASK_DCN20(_MASK), + TF_DEBUG_REG_LIST_MASK_DCN20 +}; + +#define stream_enc_regs(id)\ +[id] = {\ + SE_DCN2_REG_LIST(id)\ +} + +static const struct dcn10_stream_enc_registers stream_enc_regs[] = { + stream_enc_regs(0), + stream_enc_regs(1), + stream_enc_regs(2), + stream_enc_regs(3), + stream_enc_regs(4), +}; + +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +static const struct dcn10_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn10_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCN20(_MASK) +}; + +static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu); + +static struct input_pixel_processor *dcn21_ipp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn10_ipp *ipp = + kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL); + + if (!ipp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dcn20_ipp_construct(ipp, ctx, inst, + &ipp_regs[inst], &ipp_shift, &ipp_mask); + return &ipp->base; +} + +static struct dpp *dcn21_dpp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn20_dpp *dpp = + kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL); + + if (!dpp) + return NULL; + + if (dpp2_construct(dpp, ctx, inst, + &tf_regs[inst], &tf_shift, &tf_mask)) + return &dpp->base; + + BREAK_TO_DEBUGGER(); + kfree(dpp); + return NULL; +} + +static struct dce_aux *dcn21_aux_engine_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} + +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) +}; + +static struct dce_i2c_hw *dcn21_i2c_hw_create(struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} + +static const struct resource_caps res_cap_rn = { + .num_timing_generator = 4, + .num_opp = 4, + .num_video_plane = 4, + .num_audio = 4, // 4 audio endpoints. 4 audio streams + .num_stream_encoder = 5, + .num_pll = 5, // maybe 3 because the last two used for USB-c + .num_dwb = 1, + .num_ddc = 5, + .num_vmid = 16, + .num_dsc = 3, +}; + +#ifdef DIAGS_BUILD +static const struct resource_caps res_cap_rn_FPGA_4pipe = { + .num_timing_generator = 4, + .num_opp = 4, + .num_video_plane = 4, + .num_audio = 7, + .num_stream_encoder = 4, + .num_pll = 4, + .num_dwb = 1, + .num_ddc = 4, + .num_dsc = 0, +}; + +static const struct resource_caps res_cap_rn_FPGA_2pipe_dsc = { + .num_timing_generator = 2, + .num_opp = 2, + .num_video_plane = 2, + .num_audio = 7, + .num_stream_encoder = 2, + .num_pll = 4, + .num_dwb = 1, + .num_ddc = 4, + .num_dsc = 2, +}; +#endif + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCN_UNIVERSAL, + .per_pixel_alpha = true, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = true, + .fp16 = true, + .p010 = true + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 16000, + .fp16 = 16000 + }, + + .max_downscale_factor = { + .argb8888 = 250, + .nv12 = 250, + .fp16 = 250 + }, + 64, + 64 +}; + +static const struct dc_debug_options debug_defaults_drv = { + .disable_dmcu = false, + .force_abm_enable = false, + .timing_trace = false, + .clock_trace = true, + .disable_pplib_clock_request = true, + .min_disp_clk_khz = 100000, + .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .vsr_support = true, + .performance_trace = false, + .max_downscale_src_width = 4096, + .disable_pplib_wm_range = false, + .scl_reset_length10 = true, + .sanity_checks = true, + .disable_48mhz_pwrdwn = false, + .usbc_combo_phy_reset_wa = true, + .dmub_command_table = true, + .use_max_lb = true, + .enable_legacy_fast_update = true, + .using_dml2 = false, +}; + +static const struct dc_panel_config panel_config_defaults = { + .psr = { + .disable_psr = false, + .disallow_psrsu = false, + .disallow_replay = false, + }, + .ilr = { + .optimize_edp_link_rate = true, + }, +}; + +enum dcn20_clk_src_array_id { + DCN20_CLK_SRC_PLL0, + DCN20_CLK_SRC_PLL1, + DCN20_CLK_SRC_PLL2, + DCN20_CLK_SRC_PLL3, + DCN20_CLK_SRC_PLL4, + DCN20_CLK_SRC_TOTAL_DCN21 +}; + +static void dcn21_resource_destruct(struct dcn21_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) { + kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); + pool->base.stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + if (pool->base.dscs[i] != NULL) + dcn20_dsc_destroy(&pool->base.dscs[i]); + } + + if (pool->base.mpc != NULL) { + kfree(TO_DCN20_MPC(pool->base.mpc)); + pool->base.mpc = NULL; + } + if (pool->base.hubbub != NULL) { + kfree(pool->base.hubbub); + pool->base.hubbub = NULL; + } + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.dpps[i] != NULL) + dcn20_dpp_destroy(&pool->base.dpps[i]); + + if (pool->base.ipps[i] != NULL) + pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.hubps[i] != NULL) { + kfree(TO_DCN20_HUBP(pool->base.hubps[i])); + pool->base.hubps[i] = NULL; + } + + if (pool->base.irqs != NULL) + dal_irq_service_destroy(&pool->base.irqs); + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + if (pool->base.engines[i] != NULL) + dce110_engine_destroy(&pool->base.engines[i]); + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + if (pool->base.opps[i] != NULL) + pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.timing_generators[i] != NULL) { + kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dwb; i++) { + if (pool->base.dwbc[i] != NULL) { + kfree(TO_DCN20_DWBC(pool->base.dwbc[i])); + pool->base.dwbc[i] = NULL; + } + if (pool->base.mcif_wb[i] != NULL) { + kfree(TO_DCN20_MMHUBBUB(pool->base.mcif_wb[i])); + pool->base.mcif_wb[i] = NULL; + } + } + + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i]) + dce_aud_destroy(&pool->base.audios[i]); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) { + dcn20_clock_source_destroy(&pool->base.clock_sources[i]); + pool->base.clock_sources[i] = NULL; + } + } + + if (pool->base.dp_clock_source != NULL) { + dcn20_clock_source_destroy(&pool->base.dp_clock_source); + pool->base.dp_clock_source = NULL; + } + + if (pool->base.abm != NULL) { + if (pool->base.abm->ctx->dc->config.disable_dmcu) + dmub_abm_destroy(&pool->base.abm); + else + dce_abm_destroy(&pool->base.abm); + } + + if (pool->base.dmcu != NULL) + dce_dmcu_destroy(&pool->base.dmcu); + + if (pool->base.psr != NULL) + dmub_psr_destroy(&pool->base.psr); + + if (pool->base.dccg != NULL) + dcn_dccg_destroy(&pool->base.dccg); + + if (pool->base.pp_smu != NULL) + dcn21_pp_smu_destroy(&pool->base.pp_smu); +} + +bool dcn21_fast_validate_bw(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *pipe_cnt_out, + int *pipe_split_from, + int *vlevel_out, + bool fast_validate) +{ + bool out = false; + int split[MAX_PIPES] = { 0 }; + int pipe_cnt, i, pipe_idx, vlevel; + + ASSERT(pipes); + if (!pipes) + return false; + + dcn20_merge_pipes_for_validate(dc, context); + + DC_FP_START(); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + DC_FP_END(); + + *pipe_cnt_out = pipe_cnt; + + if (!pipe_cnt) { + out = true; + goto validate_out; + } + /* + * DML favors voltage over p-state, but we're more interested in + * supporting p-state over voltage. We can't support p-state in + * prefetch mode > 0 so try capping the prefetch mode to start. + */ + context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = + dm_allow_self_refresh_and_mclk_switch; + vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); + + if (vlevel > context->bw_ctx.dml.soc.num_states) { + /* + * If mode is unsupported or there's still no p-state support then + * fall back to favoring voltage. + * + * We don't actually support prefetch mode 2, so require that we + * at least support prefetch mode 1. + */ + context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = + dm_allow_self_refresh; + vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); + if (vlevel > context->bw_ctx.dml.soc.num_states) + goto validate_fail; + } + + vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL); + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *mpo_pipe = pipe->bottom_pipe; + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; + + if (!pipe->stream) + continue; + + /* We only support full screen mpo with ODM */ + if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled + && pipe->plane_state && mpo_pipe + && memcmp(&mpo_pipe->plane_state->clip_rect, + &pipe->stream->src, + sizeof(struct rect)) != 0) { + ASSERT(mpo_pipe->plane_state != pipe->plane_state); + goto validate_fail; + } + pipe_idx++; + } + + /*initialize pipe_just_split_from to invalid idx*/ + for (i = 0; i < MAX_PIPES; i++) + pipe_split_from[i] = -1; + + for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe; + + if (!pipe->stream || pipe_split_from[i] >= 0) + continue; + + pipe_idx++; + + if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { + hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); + ASSERT(hsplit_pipe); + if (!dcn20_split_stream_for_odm( + dc, &context->res_ctx, + pipe, hsplit_pipe)) + goto validate_fail; + pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx; + dcn20_build_mapped_resource(dc, context, pipe->stream); + } + + if (!pipe->plane_state) + continue; + /* Skip 2nd half of already split pipe */ + if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state) + continue; + + if (split[i] == 2) { + if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) { + /* pipe not split previously needs split */ + hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); + ASSERT(hsplit_pipe); + if (!hsplit_pipe) { + DC_FP_START(); + dcn20_fpu_adjust_dppclk(&context->bw_ctx.dml.vba, vlevel, context->bw_ctx.dml.vba.maxMpcComb, pipe_idx, true); + DC_FP_END(); + continue; + } + if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { + if (!dcn20_split_stream_for_odm( + dc, &context->res_ctx, + pipe, hsplit_pipe)) + goto validate_fail; + dcn20_build_mapped_resource(dc, context, pipe->stream); + } else { + dcn20_split_stream_for_mpc( + &context->res_ctx, dc->res_pool, + pipe, hsplit_pipe); + resource_build_scaling_params(pipe); + resource_build_scaling_params(hsplit_pipe); + } + pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx; + } + } else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) { + /* merge should already have been done */ + ASSERT(0); + } + } + /* Actual dsc count per stream dsc validation*/ + if (!dcn20_validate_dsc(dc, context)) { + context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] = + DML_FAIL_DSC_VALIDATION_FAILURE; + goto validate_fail; + } + + *vlevel_out = vlevel; + + out = true; + goto validate_out; + +validate_fail: + out = false; + +validate_out: + return out; +} + +/* + * Some of the functions further below use the FPU, so we need to wrap this + * with DC_FP_START()/DC_FP_END(). Use the same approach as for + * dcn20_validate_bandwidth in dcn20_resource.c. + */ +static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context, + bool fast_validate) +{ + bool voltage_supported; + display_e2e_pipe_params_st *pipes; + + pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL); + if (!pipes) + return false; + + DC_FP_START(); + voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate, pipes); + DC_FP_END(); + + kfree(pipes); + return voltage_supported; +} + +static void dcn21_destroy_resource_pool(struct resource_pool **pool) +{ + struct dcn21_resource_pool *dcn21_pool = TO_DCN21_RES_POOL(*pool); + + dcn21_resource_destruct(dcn21_pool); + kfree(dcn21_pool); + *pool = NULL; +} + +static struct clock_source *dcn21_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dcn20_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + kfree(clk_src); + BREAK_TO_DEBUGGER(); + return NULL; +} + +static struct hubp *dcn21_hubp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn21_hubp *hubp21 = + kzalloc(sizeof(struct dcn21_hubp), GFP_KERNEL); + + if (!hubp21) + return NULL; + + if (hubp21_construct(hubp21, ctx, inst, + &hubp_regs[inst], &hubp_shift, &hubp_mask)) + return &hubp21->base; + + BREAK_TO_DEBUGGER(); + kfree(hubp21); + return NULL; +} + +static struct hubbub *dcn21_hubbub_create(struct dc_context *ctx) +{ + int i; + + struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), + GFP_KERNEL); + + if (!hubbub) + return NULL; + + hubbub21_construct(hubbub, ctx, + &hubbub_reg, + &hubbub_shift, + &hubbub_mask); + + for (i = 0; i < res_cap_rn.num_vmid; i++) { + struct dcn20_vmid *vmid = &hubbub->vmid[i]; + + vmid->ctx = ctx; + + vmid->regs = &vmid_regs[i]; + vmid->shifts = &vmid_shifts; + vmid->masks = &vmid_masks; + } + hubbub->num_vmid = res_cap_rn.num_vmid; + + return &hubbub->base; +} + +static struct output_pixel_processor *dcn21_opp_create(struct dc_context *ctx, + uint32_t inst) +{ + struct dcn20_opp *opp = + kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); + + if (!opp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dcn20_opp_construct(opp, ctx, inst, + &opp_regs[inst], &opp_shift, &opp_mask); + return &opp->base; +} + +static struct timing_generator *dcn21_timing_generator_create(struct dc_context *ctx, + uint32_t instance) +{ + struct optc *tgn10 = + kzalloc(sizeof(struct optc), GFP_KERNEL); + + if (!tgn10) + return NULL; + + tgn10->base.inst = instance; + tgn10->base.ctx = ctx; + + tgn10->tg_regs = &tg_regs[instance]; + tgn10->tg_shift = &tg_shift; + tgn10->tg_mask = &tg_mask; + + dcn20_timing_generator_init(tgn10); + + return &tgn10->base; +} + +static struct mpc *dcn21_mpc_create(struct dc_context *ctx) +{ + struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc), + GFP_KERNEL); + + if (!mpc20) + return NULL; + + dcn20_mpc_construct(mpc20, ctx, + &mpc_regs, + &mpc_shift, + &mpc_mask, + 6); + + return &mpc20->base; +} + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), + FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); + +} + + +static struct display_stream_compressor *dcn21_dsc_create(struct dc_context *ctx, + uint32_t inst) +{ + struct dcn20_dsc *dsc = + kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); + + if (!dsc) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); + return &dsc->base; +} + +static struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx) +{ + struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); + + if (!pp_smu) + return pp_smu; + + dm_pp_get_funcs(ctx, pp_smu); + + if (pp_smu->ctx.ver != PP_SMU_VER_RN) + pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs)); + + + return pp_smu; +} + +static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu) +{ + if (pp_smu && *pp_smu) { + kfree(*pp_smu); + *pp_smu = NULL; + } +} + +static struct audio *dcn21_create_audio( + struct dc_context *ctx, unsigned int inst) +{ + return dce_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + +static struct dc_cap_funcs cap_funcs = { + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap +}; + +static struct stream_encoder *dcn21_stream_encoder_create(enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn10_stream_encoder *enc1 = + kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); + + if (!enc1) + return NULL; + + dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, + &stream_enc_regs[eng_id], + &se_shift, &se_mask); + + return &enc1->base; +} + +static const struct dce_hwseq_registers hwseq_reg = { + HWSEQ_DCN21_REG_LIST() +}; + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCN21_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCN21_MASK_SH_LIST(_MASK) +}; + +static struct dce_hwseq *dcn21_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + hws->wa.DEGVIDCN21 = true; + hws->wa.disallow_self_refresh_during_multi_plane_transition = true; + } + return hws; +} + +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = dcn21_create_audio, + .create_stream_encoder = dcn21_stream_encoder_create, + .create_hwseq = dcn21_hwseq_create, +}; + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .fec_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + + +#define link_regs(id, phyid)\ +[id] = {\ + LE_DCN2_REG_LIST(id), \ + UNIPHY_DCN2_REG_LIST(phyid), \ + DPCS_DCN21_REG_LIST(id), \ + SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ +} + +static const struct dcn10_link_enc_registers link_enc_regs[] = { + link_regs(0, A), + link_regs(1, B), + link_regs(2, C), + link_regs(3, D), + link_regs(4, E), +}; + +static const struct dce_panel_cntl_registers panel_cntl_regs[] = { + { DCN_PANEL_CNTL_REG_LIST() } +}; + +static const struct dce_panel_cntl_shift panel_cntl_shift = { + DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_panel_cntl_mask panel_cntl_mask = { + DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) +}; + +#define aux_regs(id)\ +[id] = {\ + DCN2_AUX_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), + aux_regs(2), + aux_regs(3), + aux_regs(4) +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), + hpd_regs(4) +}; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\ + DPCS_DCN21_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\ + DPCS_DCN21_MASK_SH_LIST(_MASK) +}; + +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + default: + ASSERT(0); + return 0; + } +} + +static struct link_encoder *dcn21_link_encoder_create( + struct dc_context *ctx, + const struct encoder_init_data *enc_init_data) +{ + struct dcn21_link_encoder *enc21 = + kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL); + int link_regs_id; + + if (!enc21) + return NULL; + + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + + dcn21_link_encoder_construct(enc21, + enc_init_data, + &link_enc_feature, + &link_enc_regs[link_regs_id], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], + &le_shift, + &le_mask); + + return &enc21->enc10.base; +} + +static struct panel_cntl *dcn21_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dce_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dce_panel_cntl_construct(panel_cntl, + init_data, + &panel_cntl_regs[init_data->inst], + &panel_cntl_shift, + &panel_cntl_mask); + + return &panel_cntl->base; +} + +static void dcn21_get_panel_config_defaults(struct dc_panel_config *panel_config) +{ + *panel_config = panel_config_defaults; +} + +#define CTX ctx + +#define REG(reg_name) \ + (DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) + +static uint32_t read_pipe_fuses(struct dc_context *ctx) +{ + uint32_t value = REG_READ(CC_DC_PIPE_DIS); + /* RV1 support max 4 pipes */ + value = value & 0xf; + return value; +} + +static enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state) +{ + if (plane_state->ctx->dc->debug.disable_dcc == DCC_ENABLE) { + plane_state->dcc.enable = 1; + /* align to our worst case block width */ + plane_state->dcc.meta_pitch = ((plane_state->src_rect.width + 1023) / 1024) * 1024; + } + + return dcn20_patch_unknown_plane_state(plane_state); +} + +static const struct resource_funcs dcn21_res_pool_funcs = { + .destroy = dcn21_destroy_resource_pool, + .link_enc_create = dcn21_link_encoder_create, + .panel_cntl_create = dcn21_panel_cntl_create, + .validate_bandwidth = dcn21_validate_bandwidth, + .populate_dml_pipes = dcn21_populate_dml_pipes_from_context, + .add_stream_to_ctx = dcn20_add_stream_to_ctx, + .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, + .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, + .release_pipe = dcn20_release_pipe, + .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context, + .patch_unknown_plane_state = dcn21_patch_unknown_plane_state, + .set_mcif_arb_params = dcn20_set_mcif_arb_params, + .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, + .update_bw_bounding_box = dcn21_update_bw_bounding_box, + .get_panel_config_defaults = dcn21_get_panel_config_defaults, +}; + +static bool dcn21_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dcn21_resource_pool *pool) +{ + int i, j; + struct dc_context *ctx = dc->ctx; + struct irq_service_init_data init_data; + uint32_t pipe_fuses = read_pipe_fuses(ctx); + uint32_t num_pipes; + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap_rn; +#ifdef DIAGS_BUILD + if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) + //pool->base.res_cap = &res_cap_nv10_FPGA_2pipe_dsc; + pool->base.res_cap = &res_cap_rn_FPGA_4pipe; +#endif + + pool->base.funcs = &dcn21_res_pool_funcs; + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + + /* max pipe num for ASIC before check pipe fuses */ + pool->base.pipe_count = pool->base.res_cap->num_timing_generator; + + dc->caps.max_downscale_ratio = 200; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a applied by default*/ + dc->caps.max_cursor_size = 256; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dmdata_alloc_size = 2048; + + dc->caps.max_slave_planes = 1; + dc->caps.max_slave_yuv_planes = 1; + dc->caps.max_slave_rgb_planes = 1; + dc->caps.post_blend_color_processing = true; + dc->caps.force_dp_tps4_for_cp2520 = true; + dc->caps.extended_aux_timeout_support = true; + dc->caps.dmcub_support = true; + dc->caps.is_apu = true; + + /* Color pipeline capabilities */ + dc->caps.color.dpp.dcn_arch = 1; + dc->caps.color.dpp.input_lut_shared = 0; + dc->caps.color.dpp.icsc = 1; + dc->caps.color.dpp.dgam_ram = 1; + dc->caps.color.dpp.dgam_rom_caps.srgb = 1; + dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; + dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.dgam_rom_caps.pq = 0; + dc->caps.color.dpp.dgam_rom_caps.hlg = 0; + dc->caps.color.dpp.post_csc = 0; + dc->caps.color.dpp.gamma_corr = 0; + dc->caps.color.dpp.dgam_rom_for_yuv = 1; + + dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.ogam_ram = 1; + // no OGAM ROM on DCN2 + dc->caps.color.dpp.ogam_rom_caps.srgb = 0; + dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; + dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.ogam_rom_caps.pq = 0; + dc->caps.color.dpp.ogam_rom_caps.hlg = 0; + dc->caps.color.dpp.ocsc = 0; + + dc->caps.color.mpc.gamut_remap = 0; + dc->caps.color.mpc.num_3dluts = 0; + dc->caps.color.mpc.shared_3d_lut = 0; + dc->caps.color.mpc.ogam_ram = 1; + dc->caps.color.mpc.ogam_rom_caps.srgb = 0; + dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; + dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.mpc.ogam_rom_caps.pq = 0; + dc->caps.color.mpc.ogam_rom_caps.hlg = 0; + dc->caps.color.mpc.ocsc = 1; + + dc->caps.dp_hdmi21_pcon_support = true; + + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) + dc->debug = debug_defaults_drv; + + // Init the vm_helper + if (dc->vm_helper) + vm_helper_init(dc->vm_helper, 16); + + /************************************************* + * Create resources * + *************************************************/ + + pool->base.clock_sources[DCN20_CLK_SRC_PLL0] = + dcn21_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->base.clock_sources[DCN20_CLK_SRC_PLL1] = + dcn21_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + pool->base.clock_sources[DCN20_CLK_SRC_PLL2] = + dcn21_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs[2], false); + pool->base.clock_sources[DCN20_CLK_SRC_PLL3] = + dcn21_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + pool->base.clock_sources[DCN20_CLK_SRC_PLL4] = + dcn21_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL4, + &clk_src_regs[4], false); + + pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21; + + /* todo: not reuse phy_pll registers */ + pool->base.dp_clock_source = + dcn21_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, + &clk_src_regs[0], true); + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + pool->base.dccg = dccg21_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); + if (pool->base.dccg == NULL) { + dm_error("DC: failed to create dccg!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + if (!dc->config.disable_dmcu) { + pool->base.dmcu = dcn21_dmcu_create(ctx, + &dmcu_regs, + &dmcu_shift, + &dmcu_mask); + if (pool->base.dmcu == NULL) { + dm_error("DC: failed to create dmcu!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + dc->debug.dmub_command_table = false; + } + + if (dc->config.disable_dmcu) { + pool->base.psr = dmub_psr_create(ctx); + + if (pool->base.psr == NULL) { + dm_error("DC: failed to create psr obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + if (dc->config.disable_dmcu) + pool->base.abm = dmub_abm_create(ctx, + &abm_regs, + &abm_shift, + &abm_mask); + else + pool->base.abm = dce_abm_create(ctx, + &abm_regs, + &abm_shift, + &abm_mask); + + pool->base.pp_smu = dcn21_pp_smu_create(ctx); + + num_pipes = dcn2_1_ip.max_num_dpp; + + for (i = 0; i < dcn2_1_ip.max_num_dpp; i++) + if (pipe_fuses & 1 << i) + num_pipes--; + dcn2_1_ip.max_num_dpp = num_pipes; + dcn2_1_ip.max_num_otg = num_pipes; + + dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21); + + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dcn21_create(&init_data); + if (!pool->base.irqs) + goto create_fail; + + j = 0; + /* mem input -> ipp -> dpp -> opp -> TG */ + for (i = 0; i < pool->base.pipe_count; i++) { + /* if pipe is disabled, skip instance of HW pipe, + * i.e, skip ASIC register instance + */ + if ((pipe_fuses & (1 << i)) != 0) + continue; + + pool->base.hubps[j] = dcn21_hubp_create(ctx, i); + if (pool->base.hubps[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create memory input!\n"); + goto create_fail; + } + + pool->base.ipps[j] = dcn21_ipp_create(ctx, i); + if (pool->base.ipps[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create input pixel processor!\n"); + goto create_fail; + } + + pool->base.dpps[j] = dcn21_dpp_create(ctx, i); + if (pool->base.dpps[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create dpps!\n"); + goto create_fail; + } + + pool->base.opps[j] = dcn21_opp_create(ctx, i); + if (pool->base.opps[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + goto create_fail; + } + + pool->base.timing_generators[j] = dcn21_timing_generator_create( + ctx, i); + if (pool->base.timing_generators[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + j++; + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dcn21_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto create_fail; + } + pool->base.hw_i2cs[i] = dcn21_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create hw i2c!!\n"); + goto create_fail; + } + pool->base.sw_i2cs[i] = NULL; + } + + pool->base.timing_generator_count = j; + pool->base.pipe_count = j; + pool->base.mpcc_count = j; + + pool->base.mpc = dcn21_mpc_create(ctx); + if (pool->base.mpc == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mpc!\n"); + goto create_fail; + } + + pool->base.hubbub = dcn21_hubbub_create(ctx); + if (pool->base.hubbub == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create hubbub!\n"); + goto create_fail; + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + pool->base.dscs[i] = dcn21_dsc_create(ctx, i); + if (pool->base.dscs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create display stream compressor %d!\n", i); + goto create_fail; + } + } + + if (!dcn20_dwbc_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create dwbc!\n"); + goto create_fail; + } + if (!dcn20_mmhubbub_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mcif_wb!\n"); + goto create_fail; + } + + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto create_fail; + + dcn21_hw_sequencer_construct(dc); + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->cap_funcs = cap_funcs; + + return true; + +create_fail: + + dcn21_resource_destruct(pool); + + return false; +} + +struct resource_pool *dcn21_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc) +{ + struct dcn21_resource_pool *pool = + kzalloc(sizeof(struct dcn21_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dcn21_resource_construct(init_data->num_virtual_links, dc, pool)) + return &pool->base; + + BREAK_TO_DEBUGGER(); + kfree(pool); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h new file mode 100644 index 0000000000..f7ecc002c2 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h @@ -0,0 +1,56 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DCN21_RESOURCE_H_ +#define _DCN21_RESOURCE_H_ + +#include "core_types.h" + +#define TO_DCN21_RES_POOL(pool)\ + container_of(pool, struct dcn21_resource_pool, base) + +struct dc; +struct resource_pool; +struct _vcs_dpi_display_pipe_params_st; + +extern struct _vcs_dpi_ip_params_st dcn2_1_ip; +extern struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc; + +struct dcn21_resource_pool { + struct resource_pool base; +}; +struct resource_pool *dcn21_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc); +bool dcn21_fast_validate_bw( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *pipe_cnt_out, + int *pipe_split_from, + int *vlevel_out, + bool fast_validate); + +#endif /* _DCN21_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c new file mode 100644 index 0000000000..37a64186f3 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c @@ -0,0 +1,2613 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "dm_services.h" +#include "dc.h" + +#include "dcn30/dcn30_init.h" + +#include "resource.h" +#include "include/irq_service_interface.h" +#include "dcn20/dcn20_resource.h" + +#include "dcn30_resource.h" + +#include "dcn10/dcn10_ipp.h" +#include "dcn30/dcn30_hubbub.h" +#include "dcn30/dcn30_mpc.h" +#include "dcn30/dcn30_hubp.h" +#include "irq/dcn30/irq_service_dcn30.h" +#include "dcn30/dcn30_dpp.h" +#include "dcn30/dcn30_optc.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn30/dcn30_hwseq.h" +#include "dce110/dce110_hwseq.h" +#include "dcn30/dcn30_opp.h" +#include "dcn20/dcn20_dsc.h" +#include "dcn30/dcn30_vpg.h" +#include "dcn30/dcn30_afmt.h" +#include "dcn30/dcn30_dio_stream_encoder.h" +#include "dcn30/dcn30_dio_link_encoder.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_audio.h" +#include "dce/dce_hwseq.h" +#include "clk_mgr.h" +#include "virtual/virtual_stream_encoder.h" +#include "dce110/dce110_resource.h" +#include "dml/display_mode_vba.h" +#include "dcn30/dcn30_dccg.h" +#include "dcn10/dcn10_resource.h" +#include "link.h" +#include "dce/dce_panel_cntl.h" + +#include "dcn30/dcn30_dwb.h" +#include "dcn30/dcn30_mmhubbub.h" + +#include "sienna_cichlid_ip_offset.h" +#include "dcn/dcn_3_0_0_offset.h" +#include "dcn/dcn_3_0_0_sh_mask.h" + +#include "nbio/nbio_7_4_offset.h" + +#include "dpcs/dpcs_3_0_0_offset.h" +#include "dpcs/dpcs_3_0_0_sh_mask.h" + +#include "mmhub/mmhub_2_0_0_offset.h" +#include "mmhub/mmhub_2_0_0_sh_mask.h" + +#include "reg_helper.h" +#include "dce/dmub_abm.h" +#include "dce/dmub_psr.h" +#include "dce/dce_aux.h" +#include "dce/dce_i2c.h" + +#include "dml/dcn30/dcn30_fpu.h" +#include "dml/dcn30/display_mode_vba_30.h" +#include "vm_helper.h" +#include "dcn20/dcn20_vmid.h" +#include "amdgpu_socbb.h" +#include "dc_dmub_srv.h" + +#define DC_LOGGER \ + dc->ctx->logger +#define DC_LOGGER_INIT(logger) + +enum dcn30_clk_src_array_id { + DCN30_CLK_SRC_PLL0, + DCN30_CLK_SRC_PLL1, + DCN30_CLK_SRC_PLL2, + DCN30_CLK_SRC_PLL3, + DCN30_CLK_SRC_PLL4, + DCN30_CLK_SRC_PLL5, + DCN30_CLK_SRC_TOTAL +}; + +/* begin ********************* + * macros to expend register list macro defined in HW object header file + */ + +/* DCN */ +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +#define SRI(reg_name, block, id)\ + .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define SRI2(reg_name, block, id)\ + .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +#define SRIR(var_name, reg_name, block, id)\ + .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define SRII_MPC_RMU(reg_name, block, id)\ + .RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define SRII_DWB(reg_name, temp_name, block, id)\ + .reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## temp_name + +#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define DCCG_SRII(reg_name, block, id)\ + .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define VUPDATE_SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + mm ## reg_name ## _ ## block ## id + +/* NBIO */ +#define NBIO_BASE_INNER(seg) \ + NBIO_BASE__INST0_SEG ## seg + +#define NBIO_BASE(seg) \ + NBIO_BASE_INNER(seg) + +#define NBIO_SR(reg_name)\ + .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +/* MMHUB */ +#define MMHUB_BASE_INNER(seg) \ + MMHUB_BASE__INST0_SEG ## seg + +#define MMHUB_BASE(seg) \ + MMHUB_BASE_INNER(seg) + +#define MMHUB_SR(reg_name)\ + .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \ + mmMM ## reg_name + +/* CLOCK */ +#define CLK_BASE_INNER(seg) \ + CLK_BASE__INST0_SEG ## seg + +#define CLK_BASE(seg) \ + CLK_BASE_INNER(seg) + +#define CLK_SRI(reg_name, block, inst)\ + .reg_name = CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## _ ## inst ## _ ## reg_name + + +static const struct bios_registers bios_regs = { + NBIO_SR(BIOS_SCRATCH_3), + NBIO_SR(BIOS_SCRATCH_6) +}; + +#define clk_src_regs(index, pllid)\ +[index] = {\ + CS_COMMON_REG_LIST_DCN2_0(index, pllid),\ +} + +static const struct dce110_clk_src_regs clk_src_regs[] = { + clk_src_regs(0, A), + clk_src_regs(1, B), + clk_src_regs(2, C), + clk_src_regs(3, D), + clk_src_regs(4, E), + clk_src_regs(5, F) +}; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) +}; + +#define abm_regs(id)\ +[id] = {\ + ABM_DCN30_REG_LIST(id)\ +} + +static const struct dce_abm_registers abm_regs[] = { + abm_regs(0), + abm_regs(1), + abm_regs(2), + abm_regs(3), + abm_regs(4), + abm_regs(5), +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCN30(_MASK) +}; + + + +#define audio_regs(id)\ +[id] = {\ + AUD_COMMON_REG_LIST(id)\ +} + +static const struct dce_audio_registers audio_regs[] = { + audio_regs(0), + audio_regs(1), + audio_regs(2), + audio_regs(3), + audio_regs(4), + audio_regs(5), + audio_regs(6) +}; + +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ + AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) + +static const struct dce_audio_shift audio_shift = { + DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +#define vpg_regs(id)\ +[id] = {\ + VPG_DCN3_REG_LIST(id)\ +} + +static const struct dcn30_vpg_registers vpg_regs[] = { + vpg_regs(0), + vpg_regs(1), + vpg_regs(2), + vpg_regs(3), + vpg_regs(4), + vpg_regs(5), + vpg_regs(6), +}; + +static const struct dcn30_vpg_shift vpg_shift = { + DCN3_VPG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn30_vpg_mask vpg_mask = { + DCN3_VPG_MASK_SH_LIST(_MASK) +}; + +#define afmt_regs(id)\ +[id] = {\ + AFMT_DCN3_REG_LIST(id)\ +} + +static const struct dcn30_afmt_registers afmt_regs[] = { + afmt_regs(0), + afmt_regs(1), + afmt_regs(2), + afmt_regs(3), + afmt_regs(4), + afmt_regs(5), + afmt_regs(6), +}; + +static const struct dcn30_afmt_shift afmt_shift = { + DCN3_AFMT_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn30_afmt_mask afmt_mask = { + DCN3_AFMT_MASK_SH_LIST(_MASK) +}; + +#define stream_enc_regs(id)\ +[id] = {\ + SE_DCN3_REG_LIST(id)\ +} + +static const struct dcn10_stream_enc_registers stream_enc_regs[] = { + stream_enc_regs(0), + stream_enc_regs(1), + stream_enc_regs(2), + stream_enc_regs(3), + stream_enc_regs(4), + stream_enc_regs(5) +}; + +static const struct dcn10_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn10_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + + +#define aux_regs(id)\ +[id] = {\ + DCN2_AUX_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), + aux_regs(2), + aux_regs(3), + aux_regs(4), + aux_regs(5) +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), + hpd_regs(4), + hpd_regs(5) +}; + +#define link_regs(id, phyid)\ +[id] = {\ + LE_DCN3_REG_LIST(id), \ + UNIPHY_DCN2_REG_LIST(phyid), \ + DPCS_DCN2_REG_LIST(id), \ + SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ +} + +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +static const struct dcn10_link_enc_registers link_enc_regs[] = { + link_regs(0, A), + link_regs(1, B), + link_regs(2, C), + link_regs(3, D), + link_regs(4, E), + link_regs(5, F) +}; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN30(__SHIFT),\ + DPCS_DCN2_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN30(_MASK),\ + DPCS_DCN2_MASK_SH_LIST(_MASK) +}; + + +static const struct dce_panel_cntl_registers panel_cntl_regs[] = { + { DCN_PANEL_CNTL_REG_LIST() } +}; + +static const struct dce_panel_cntl_shift panel_cntl_shift = { + DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_panel_cntl_mask panel_cntl_mask = { + DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) +}; + +#define dpp_regs(id)\ +[id] = {\ + DPP_REG_LIST_DCN30(id),\ +} + +static const struct dcn3_dpp_registers dpp_regs[] = { + dpp_regs(0), + dpp_regs(1), + dpp_regs(2), + dpp_regs(3), + dpp_regs(4), + dpp_regs(5), +}; + +static const struct dcn3_dpp_shift tf_shift = { + DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) +}; + +static const struct dcn3_dpp_mask tf_mask = { + DPP_REG_LIST_SH_MASK_DCN30(_MASK) +}; + +#define opp_regs(id)\ +[id] = {\ + OPP_REG_LIST_DCN30(id),\ +} + +static const struct dcn20_opp_registers opp_regs[] = { + opp_regs(0), + opp_regs(1), + opp_regs(2), + opp_regs(3), + opp_regs(4), + opp_regs(5) +}; + +static const struct dcn20_opp_shift opp_shift = { + OPP_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn20_opp_mask opp_mask = { + OPP_MASK_SH_LIST_DCN20(_MASK) +}; + +#define aux_engine_regs(id)\ +[id] = {\ + AUX_COMMON_REG_LIST0(id), \ + .AUXN_IMPCAL = 0, \ + .AUXP_IMPCAL = 0, \ + .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ +} + +static const struct dce110_aux_registers aux_engine_regs[] = { + aux_engine_regs(0), + aux_engine_regs(1), + aux_engine_regs(2), + aux_engine_regs(3), + aux_engine_regs(4), + aux_engine_regs(5) +}; + +#define dwbc_regs_dcn3(id)\ +[id] = {\ + DWBC_COMMON_REG_LIST_DCN30(id),\ +} + +static const struct dcn30_dwbc_registers dwbc30_regs[] = { + dwbc_regs_dcn3(0), +}; + +static const struct dcn30_dwbc_shift dwbc30_shift = { + DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_dwbc_mask dwbc30_mask = { + DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define mcif_wb_regs_dcn3(id)\ +[id] = {\ + MCIF_WB_COMMON_REG_LIST_DCN30(id),\ +} + +static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { + mcif_wb_regs_dcn3(0) +}; + +static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define dsc_regsDCN20(id)\ +[id] = {\ + DSC_REG_LIST_DCN20(id)\ +} + +static const struct dcn20_dsc_registers dsc_regs[] = { + dsc_regsDCN20(0), + dsc_regsDCN20(1), + dsc_regsDCN20(2), + dsc_regsDCN20(3), + dsc_regsDCN20(4), + dsc_regsDCN20(5) +}; + +static const struct dcn20_dsc_shift dsc_shift = { + DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) +}; + +static const struct dcn20_dsc_mask dsc_mask = { + DSC_REG_LIST_SH_MASK_DCN20(_MASK) +}; + +static const struct dcn30_mpc_registers mpc_regs = { + MPC_REG_LIST_DCN3_0(0), + MPC_REG_LIST_DCN3_0(1), + MPC_REG_LIST_DCN3_0(2), + MPC_REG_LIST_DCN3_0(3), + MPC_REG_LIST_DCN3_0(4), + MPC_REG_LIST_DCN3_0(5), + MPC_OUT_MUX_REG_LIST_DCN3_0(0), + MPC_OUT_MUX_REG_LIST_DCN3_0(1), + MPC_OUT_MUX_REG_LIST_DCN3_0(2), + MPC_OUT_MUX_REG_LIST_DCN3_0(3), + MPC_OUT_MUX_REG_LIST_DCN3_0(4), + MPC_OUT_MUX_REG_LIST_DCN3_0(5), + MPC_RMU_GLOBAL_REG_LIST_DCN3AG, + MPC_RMU_REG_LIST_DCN3AG(0), + MPC_RMU_REG_LIST_DCN3AG(1), + MPC_RMU_REG_LIST_DCN3AG(2), + MPC_DWB_MUX_REG_LIST_DCN3_0(0), +}; + +static const struct dcn30_mpc_shift mpc_shift = { + MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_mpc_mask mpc_mask = { + MPC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define optc_regs(id)\ +[id] = {OPTC_COMMON_REG_LIST_DCN3_0(id)} + + +static const struct dcn_optc_registers optc_regs[] = { + optc_regs(0), + optc_regs(1), + optc_regs(2), + optc_regs(3), + optc_regs(4), + optc_regs(5) +}; + +static const struct dcn_optc_shift optc_shift = { + OPTC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn_optc_mask optc_mask = { + OPTC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define hubp_regs(id)\ +[id] = {\ + HUBP_REG_LIST_DCN30(id)\ +} + +static const struct dcn_hubp2_registers hubp_regs[] = { + hubp_regs(0), + hubp_regs(1), + hubp_regs(2), + hubp_regs(3), + hubp_regs(4), + hubp_regs(5) +}; + +static const struct dcn_hubp2_shift hubp_shift = { + HUBP_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn_hubp2_mask hubp_mask = { + HUBP_MASK_SH_LIST_DCN30(_MASK) +}; + +static const struct dcn_hubbub_registers hubbub_reg = { + HUBBUB_REG_LIST_DCN30(0) +}; + +static const struct dcn_hubbub_shift hubbub_shift = { + HUBBUB_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn_hubbub_mask hubbub_mask = { + HUBBUB_MASK_SH_LIST_DCN30(_MASK) +}; + +static const struct dccg_registers dccg_regs = { + DCCG_REG_LIST_DCN30() +}; + +static const struct dccg_shift dccg_shift = { + DCCG_MASK_SH_LIST_DCN3(__SHIFT) +}; + +static const struct dccg_mask dccg_mask = { + DCCG_MASK_SH_LIST_DCN3(_MASK) +}; + +static const struct dce_hwseq_registers hwseq_reg = { + HWSEQ_DCN30_REG_LIST() +}; + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCN30_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCN30_MASK_SH_LIST(_MASK) +}; +#define vmid_regs(id)\ +[id] = {\ + DCN20_VMID_REG_LIST(id)\ +} + +static const struct dcn_vmid_registers vmid_regs[] = { + vmid_regs(0), + vmid_regs(1), + vmid_regs(2), + vmid_regs(3), + vmid_regs(4), + vmid_regs(5), + vmid_regs(6), + vmid_regs(7), + vmid_regs(8), + vmid_regs(9), + vmid_regs(10), + vmid_regs(11), + vmid_regs(12), + vmid_regs(13), + vmid_regs(14), + vmid_regs(15) +}; + +static const struct dcn20_vmid_shift vmid_shifts = { + DCN20_VMID_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn20_vmid_mask vmid_masks = { + DCN20_VMID_MASK_SH_LIST(_MASK) +}; + +static const struct resource_caps res_cap_dcn3 = { + .num_timing_generator = 6, + .num_opp = 6, + .num_video_plane = 6, + .num_audio = 6, + .num_stream_encoder = 6, + .num_pll = 6, + .num_dwb = 1, + .num_ddc = 6, + .num_vmid = 16, + .num_mpc_3dlut = 3, + .num_dsc = 6, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCN_UNIVERSAL, + .per_pixel_alpha = true, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = true, + .fp16 = true, + .p010 = true, + .ayuv = false, + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 16000, + .fp16 = 16000 + }, + + /* 6:1 downscaling ratio: 1000/6 = 166.666 */ + .max_downscale_factor = { + .argb8888 = 167, + .nv12 = 167, + .fp16 = 167 + }, + 16, + 16 +}; + +static const struct dc_debug_options debug_defaults_drv = { + .disable_dmcu = true, //No DMCU on DCN30 + .force_abm_enable = false, + .timing_trace = false, + .clock_trace = true, + .disable_pplib_clock_request = true, + .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .vsr_support = true, + .performance_trace = false, + .max_downscale_src_width = 7680,/*upto 8K*/ + .disable_pplib_wm_range = false, + .scl_reset_length10 = true, + .sanity_checks = false, + .underflow_assert_delay_us = 0xFFFFFFFF, + .dwb_fi_phase = -1, // -1 = disable, + .dmub_command_table = true, + .use_max_lb = true, + .exit_idle_opt_for_cursor_updates = true, + .enable_legacy_fast_update = false, + .using_dml2 = false, +}; + +static const struct dc_panel_config panel_config_defaults = { + .psr = { + .disable_psr = false, + .disallow_psrsu = false, + .disallow_replay = false, + }, +}; + +static void dcn30_dpp_destroy(struct dpp **dpp) +{ + kfree(TO_DCN20_DPP(*dpp)); + *dpp = NULL; +} + +static struct dpp *dcn30_dpp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn3_dpp *dpp = + kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); + + if (!dpp) + return NULL; + + if (dpp3_construct(dpp, ctx, inst, + &dpp_regs[inst], &tf_shift, &tf_mask)) + return &dpp->base; + + BREAK_TO_DEBUGGER(); + kfree(dpp); + return NULL; +} + +static struct output_pixel_processor *dcn30_opp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_opp *opp = + kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); + + if (!opp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dcn20_opp_construct(opp, ctx, inst, + &opp_regs[inst], &opp_shift, &opp_mask); + return &opp->base; +} + +static struct dce_aux *dcn30_aux_engine_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} + +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5), + i2c_inst_regs(6), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +static struct dce_i2c_hw *dcn30_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} + +static struct mpc *dcn30_mpc_create( + struct dc_context *ctx, + int num_mpcc, + int num_rmu) +{ + struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), + GFP_KERNEL); + + if (!mpc30) + return NULL; + + dcn30_mpc_construct(mpc30, ctx, + &mpc_regs, + &mpc_shift, + &mpc_mask, + num_mpcc, + num_rmu); + + return &mpc30->base; +} + +static struct hubbub *dcn30_hubbub_create(struct dc_context *ctx) +{ + int i; + + struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), + GFP_KERNEL); + + if (!hubbub3) + return NULL; + + hubbub3_construct(hubbub3, ctx, + &hubbub_reg, + &hubbub_shift, + &hubbub_mask); + + + for (i = 0; i < res_cap_dcn3.num_vmid; i++) { + struct dcn20_vmid *vmid = &hubbub3->vmid[i]; + + vmid->ctx = ctx; + + vmid->regs = &vmid_regs[i]; + vmid->shifts = &vmid_shifts; + vmid->masks = &vmid_masks; + } + + return &hubbub3->base; +} + +static struct timing_generator *dcn30_timing_generator_create( + struct dc_context *ctx, + uint32_t instance) +{ + struct optc *tgn10 = + kzalloc(sizeof(struct optc), GFP_KERNEL); + + if (!tgn10) + return NULL; + + tgn10->base.inst = instance; + tgn10->base.ctx = ctx; + + tgn10->tg_regs = &optc_regs[instance]; + tgn10->tg_shift = &optc_shift; + tgn10->tg_mask = &optc_mask; + + dcn30_timing_generator_init(tgn10); + + return &tgn10->base; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .fec_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + +static struct link_encoder *dcn30_link_encoder_create( + struct dc_context *ctx, + const struct encoder_init_data *enc_init_data) +{ + struct dcn20_link_encoder *enc20 = + kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + + if (!enc20) + return NULL; + + dcn30_link_encoder_construct(enc20, + enc_init_data, + &link_enc_feature, + &link_enc_regs[enc_init_data->transmitter], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], + &le_shift, + &le_mask); + + return &enc20->enc10.base; +} + +static struct panel_cntl *dcn30_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dce_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dce_panel_cntl_construct(panel_cntl, + init_data, + &panel_cntl_regs[init_data->inst], + &panel_cntl_shift, + &panel_cntl_mask); + + return &panel_cntl->base; +} + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), + FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); + +} + +static struct audio *dcn30_create_audio( + struct dc_context *ctx, unsigned int inst) +{ + return dce_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + +static struct vpg *dcn30_vpg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL); + + if (!vpg3) + return NULL; + + vpg3_construct(vpg3, ctx, inst, + &vpg_regs[inst], + &vpg_shift, + &vpg_mask); + + return &vpg3->base; +} + +static struct afmt *dcn30_afmt_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL); + + if (!afmt3) + return NULL; + + afmt3_construct(afmt3, ctx, inst, + &afmt_regs[inst], + &afmt_shift, + &afmt_mask); + + return &afmt3->base; +} + +static struct stream_encoder *dcn30_stream_encoder_create(enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn10_stream_encoder *enc1; + struct vpg *vpg; + struct afmt *afmt; + int vpg_inst; + int afmt_inst; + + /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ + if (eng_id <= ENGINE_ID_DIGF) { + vpg_inst = eng_id; + afmt_inst = eng_id; + } else + return NULL; + + enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); + vpg = dcn30_vpg_create(ctx, vpg_inst); + afmt = dcn30_afmt_create(ctx, afmt_inst); + + if (!enc1 || !vpg || !afmt) { + kfree(enc1); + kfree(vpg); + kfree(afmt); + return NULL; + } + + dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, + eng_id, vpg, afmt, + &stream_enc_regs[eng_id], + &se_shift, &se_mask); + + return &enc1->base; +} + +static struct dce_hwseq *dcn30_hwseq_create(struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + } + return hws; +} +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = dcn30_create_audio, + .create_stream_encoder = dcn30_stream_encoder_create, + .create_hwseq = dcn30_hwseq_create, +}; + +static void dcn30_resource_destruct(struct dcn30_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) { + if (pool->base.stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); + pool->base.stream_enc[i]->vpg = NULL; + } + if (pool->base.stream_enc[i]->afmt != NULL) { + kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); + pool->base.stream_enc[i]->afmt = NULL; + } + kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); + pool->base.stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + if (pool->base.dscs[i] != NULL) + dcn20_dsc_destroy(&pool->base.dscs[i]); + } + + if (pool->base.mpc != NULL) { + kfree(TO_DCN20_MPC(pool->base.mpc)); + pool->base.mpc = NULL; + } + if (pool->base.hubbub != NULL) { + kfree(pool->base.hubbub); + pool->base.hubbub = NULL; + } + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.dpps[i] != NULL) + dcn30_dpp_destroy(&pool->base.dpps[i]); + + if (pool->base.ipps[i] != NULL) + pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.hubps[i] != NULL) { + kfree(TO_DCN20_HUBP(pool->base.hubps[i])); + pool->base.hubps[i] = NULL; + } + + if (pool->base.irqs != NULL) { + dal_irq_service_destroy(&pool->base.irqs); + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + if (pool->base.engines[i] != NULL) + dce110_engine_destroy(&pool->base.engines[i]); + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + if (pool->base.opps[i] != NULL) + pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.timing_generators[i] != NULL) { + kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dwb; i++) { + if (pool->base.dwbc[i] != NULL) { + kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); + pool->base.dwbc[i] = NULL; + } + if (pool->base.mcif_wb[i] != NULL) { + kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); + pool->base.mcif_wb[i] = NULL; + } + } + + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i]) + dce_aud_destroy(&pool->base.audios[i]); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) { + dcn20_clock_source_destroy(&pool->base.clock_sources[i]); + pool->base.clock_sources[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { + if (pool->base.mpc_lut[i] != NULL) { + dc_3dlut_func_release(pool->base.mpc_lut[i]); + pool->base.mpc_lut[i] = NULL; + } + if (pool->base.mpc_shaper[i] != NULL) { + dc_transfer_func_release(pool->base.mpc_shaper[i]); + pool->base.mpc_shaper[i] = NULL; + } + } + + if (pool->base.dp_clock_source != NULL) { + dcn20_clock_source_destroy(&pool->base.dp_clock_source); + pool->base.dp_clock_source = NULL; + } + + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.multiple_abms[i] != NULL) + dce_abm_destroy(&pool->base.multiple_abms[i]); + } + + if (pool->base.psr != NULL) + dmub_psr_destroy(&pool->base.psr); + + if (pool->base.dccg != NULL) + dcn_dccg_destroy(&pool->base.dccg); + + if (pool->base.oem_device != NULL) { + struct dc *dc = pool->base.oem_device->ctx->dc; + + dc->link_srv->destroy_ddc_service(&pool->base.oem_device); + } +} + +static struct hubp *dcn30_hubp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn20_hubp *hubp2 = + kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); + + if (!hubp2) + return NULL; + + if (hubp3_construct(hubp2, ctx, inst, + &hubp_regs[inst], &hubp_shift, &hubp_mask)) + return &hubp2->base; + + BREAK_TO_DEBUGGER(); + kfree(hubp2); + return NULL; +} + +static bool dcn30_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), + GFP_KERNEL); + + if (!dwbc30) { + dm_error("DC: failed to create dwbc30!\n"); + return false; + } + + dcn30_dwbc_construct(dwbc30, ctx, + &dwbc30_regs[i], + &dwbc30_shift, + &dwbc30_mask, + i); + + pool->dwbc[i] = &dwbc30->base; + } + return true; +} + +static bool dcn30_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), + GFP_KERNEL); + + if (!mcif_wb30) { + dm_error("DC: failed to create mcif_wb30!\n"); + return false; + } + + dcn30_mmhubbub_construct(mcif_wb30, ctx, + &mcif_wb30_regs[i], + &mcif_wb30_shift, + &mcif_wb30_mask, + i); + + pool->mcif_wb[i] = &mcif_wb30->base; + } + return true; +} + +static struct display_stream_compressor *dcn30_dsc_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_dsc *dsc = + kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); + + if (!dsc) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); + return &dsc->base; +} + +enum dc_status dcn30_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) +{ + + return dcn20_add_stream_to_ctx(dc, new_ctx, dc_stream); +} + +static void dcn30_destroy_resource_pool(struct resource_pool **pool) +{ + struct dcn30_resource_pool *dcn30_pool = TO_DCN30_RES_POOL(*pool); + + dcn30_resource_destruct(dcn30_pool); + kfree(dcn30_pool); + *pool = NULL; +} + +static struct clock_source *dcn30_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dcn3_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + kfree(clk_src); + BREAK_TO_DEBUGGER(); + return NULL; +} + +int dcn30_populate_dml_pipes_from_context( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate) +{ + int i, pipe_cnt; + struct resource_context *res_ctx = &context->res_ctx; + + DC_FP_START(); + dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + DC_FP_END(); + + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + if (!res_ctx->pipe_ctx[i].stream) + continue; + + pipes[pipe_cnt++].pipe.scale_ratio_depth.lb_depth = + dm_lb_16; + } + + return pipe_cnt; +} + +void dcn30_populate_dml_writeback_from_context( + struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes) +{ + DC_FP_START(); + dcn30_fpu_populate_dml_writeback_from_context(dc, res_ctx, pipes); + DC_FP_END(); +} + +unsigned int dcn30_calc_max_scaled_time( + unsigned int time_per_pixel, + enum mmhubbub_wbif_mode mode, + unsigned int urgent_watermark) +{ + unsigned int time_per_byte = 0; + unsigned int total_free_entry = 0xb40; + unsigned int buf_lh_capability; + unsigned int max_scaled_time; + + if (mode == PACKED_444) /* packed mode 32 bpp */ + time_per_byte = time_per_pixel/4; + else if (mode == PACKED_444_FP16) /* packed mode 64 bpp */ + time_per_byte = time_per_pixel/8; + + if (time_per_byte == 0) + time_per_byte = 1; + + buf_lh_capability = (total_free_entry*time_per_byte*32) >> 6; /* time_per_byte is in u6.6*/ + max_scaled_time = buf_lh_capability - urgent_watermark; + return max_scaled_time; +} + +void dcn30_set_mcif_arb_params( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt) +{ + enum mmhubbub_wbif_mode wbif_mode; + struct display_mode_lib *dml = &context->bw_ctx.dml; + struct mcif_arb_params *wb_arb_params; + int i, j, dwb_pipe; + + /* Writeback MCIF_WB arbitration parameters */ + dwb_pipe = 0; + for (i = 0; i < dc->res_pool->pipe_count; i++) { + + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + for (j = 0; j < MAX_DWB_PIPES; j++) { + struct dc_writeback_info *writeback_info = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j]; + + if (writeback_info->wb_enabled == false) + continue; + + //wb_arb_params = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j].mcif_arb_params; + wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[dwb_pipe]; + + if (writeback_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_ARGB || + writeback_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_RGBA) + wbif_mode = PACKED_444_FP16; + else + wbif_mode = PACKED_444; + + DC_FP_START(); + dcn30_fpu_set_mcif_arb_params(wb_arb_params, dml, pipes, pipe_cnt, j); + DC_FP_END(); + wb_arb_params->time_per_pixel = (1000000 << 6) / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* time_per_pixel should be in u6.6 format */ + wb_arb_params->slice_lines = 32; + wb_arb_params->arbitration_slice = 2; /* irrelevant since there is no YUV output */ + wb_arb_params->max_scaled_time = dcn30_calc_max_scaled_time(wb_arb_params->time_per_pixel, + wbif_mode, + wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */ + + dwb_pipe++; + + if (dwb_pipe >= MAX_DWB_PIPES) + return; + } + if (dwb_pipe >= MAX_DWB_PIPES) + return; + } + +} + +static struct dc_cap_funcs cap_funcs = { + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap +}; + +bool dcn30_acquire_post_bldn_3dlut( + struct resource_context *res_ctx, + const struct resource_pool *pool, + int mpcc_id, + struct dc_3dlut **lut, + struct dc_transfer_func **shaper) +{ + int i; + bool ret = false; + union dc_3dlut_state *state; + + ASSERT(*lut == NULL && *shaper == NULL); + *lut = NULL; + *shaper = NULL; + + for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) { + if (!res_ctx->is_mpc_3dlut_acquired[i]) { + *lut = pool->mpc_lut[i]; + *shaper = pool->mpc_shaper[i]; + state = &pool->mpc_lut[i]->state; + res_ctx->is_mpc_3dlut_acquired[i] = true; + state->bits.rmu_idx_valid = 1; + state->bits.rmu_mux_num = i; + if (state->bits.rmu_mux_num == 0) + state->bits.mpc_rmu0_mux = mpcc_id; + else if (state->bits.rmu_mux_num == 1) + state->bits.mpc_rmu1_mux = mpcc_id; + else if (state->bits.rmu_mux_num == 2) + state->bits.mpc_rmu2_mux = mpcc_id; + ret = true; + break; + } + } + return ret; +} + +bool dcn30_release_post_bldn_3dlut( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct dc_3dlut **lut, + struct dc_transfer_func **shaper) +{ + int i; + bool ret = false; + + for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) { + if (pool->mpc_lut[i] == *lut && pool->mpc_shaper[i] == *shaper) { + res_ctx->is_mpc_3dlut_acquired[i] = false; + pool->mpc_lut[i]->state.raw = 0; + *lut = NULL; + *shaper = NULL; + ret = true; + break; + } + } + return ret; +} + +static bool is_soc_bounding_box_valid(struct dc *dc) +{ + uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; + + if (ASICREV_IS_SIENNA_CICHLID_P(hw_internal_rev)) + return true; + + return false; +} + +static bool init_soc_bounding_box(struct dc *dc, + struct dcn30_resource_pool *pool) +{ + struct _vcs_dpi_soc_bounding_box_st *loaded_bb = &dcn3_0_soc; + struct _vcs_dpi_ip_params_st *loaded_ip = &dcn3_0_ip; + + DC_LOGGER_INIT(dc->ctx->logger); + + if (!is_soc_bounding_box_valid(dc)) { + DC_LOG_ERROR("%s: not valid soc bounding box\n", __func__); + return false; + } + + loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator; + loaded_ip->max_num_dpp = pool->base.pipe_count; + loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk; + dcn20_patch_bounding_box(dc, loaded_bb); + DC_FP_START(); + patch_dcn30_soc_bounding_box(dc, &dcn3_0_soc); + DC_FP_END(); + + return true; +} + +static bool dcn30_split_stream_for_mpc_or_odm( + const struct dc *dc, + struct resource_context *res_ctx, + struct pipe_ctx *pri_pipe, + struct pipe_ctx *sec_pipe, + bool odm) +{ + int pipe_idx = sec_pipe->pipe_idx; + const struct resource_pool *pool = dc->res_pool; + + *sec_pipe = *pri_pipe; + + sec_pipe->pipe_idx = pipe_idx; + sec_pipe->plane_res.mi = pool->mis[pipe_idx]; + sec_pipe->plane_res.hubp = pool->hubps[pipe_idx]; + sec_pipe->plane_res.ipp = pool->ipps[pipe_idx]; + sec_pipe->plane_res.xfm = pool->transforms[pipe_idx]; + sec_pipe->plane_res.dpp = pool->dpps[pipe_idx]; + sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst; + sec_pipe->stream_res.dsc = NULL; + if (odm) { + if (pri_pipe->next_odm_pipe) { + ASSERT(pri_pipe->next_odm_pipe != sec_pipe); + sec_pipe->next_odm_pipe = pri_pipe->next_odm_pipe; + sec_pipe->next_odm_pipe->prev_odm_pipe = sec_pipe; + } + if (pri_pipe->top_pipe && pri_pipe->top_pipe->next_odm_pipe) { + pri_pipe->top_pipe->next_odm_pipe->bottom_pipe = sec_pipe; + sec_pipe->top_pipe = pri_pipe->top_pipe->next_odm_pipe; + } + if (pri_pipe->bottom_pipe && pri_pipe->bottom_pipe->next_odm_pipe) { + pri_pipe->bottom_pipe->next_odm_pipe->top_pipe = sec_pipe; + sec_pipe->bottom_pipe = pri_pipe->bottom_pipe->next_odm_pipe; + } + pri_pipe->next_odm_pipe = sec_pipe; + sec_pipe->prev_odm_pipe = pri_pipe; + + if (!sec_pipe->top_pipe) + sec_pipe->stream_res.opp = pool->opps[pipe_idx]; + else + sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp; + if (sec_pipe->stream->timing.flags.DSC == 1) { + dcn20_acquire_dsc(dc, res_ctx, &sec_pipe->stream_res.dsc, pipe_idx); + ASSERT(sec_pipe->stream_res.dsc); + if (sec_pipe->stream_res.dsc == NULL) + return false; + } + } else { + if (pri_pipe->bottom_pipe) { + ASSERT(pri_pipe->bottom_pipe != sec_pipe); + sec_pipe->bottom_pipe = pri_pipe->bottom_pipe; + sec_pipe->bottom_pipe->top_pipe = sec_pipe; + } + pri_pipe->bottom_pipe = sec_pipe; + sec_pipe->top_pipe = pri_pipe; + + ASSERT(pri_pipe->plane_state); + } + + return true; +} + +static struct pipe_ctx *dcn30_find_split_pipe( + struct dc *dc, + struct dc_state *context, + int old_index) +{ + struct pipe_ctx *pipe = NULL; + int i; + + if (old_index >= 0 && context->res_ctx.pipe_ctx[old_index].stream == NULL) { + pipe = &context->res_ctx.pipe_ctx[old_index]; + pipe->pipe_idx = old_index; + } + + if (!pipe) + for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { + if (dc->current_state->res_ctx.pipe_ctx[i].top_pipe == NULL + && dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) { + if (context->res_ctx.pipe_ctx[i].stream == NULL) { + pipe = &context->res_ctx.pipe_ctx[i]; + pipe->pipe_idx = i; + break; + } + } + } + + /* + * May need to fix pipes getting tossed from 1 opp to another on flip + * Add for debugging transient underflow during topology updates: + * ASSERT(pipe); + */ + if (!pipe) + for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { + if (context->res_ctx.pipe_ctx[i].stream == NULL) { + pipe = &context->res_ctx.pipe_ctx[i]; + pipe->pipe_idx = i; + break; + } + } + + return pipe; +} + +noinline bool dcn30_internal_validate_bw( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *pipe_cnt_out, + int *vlevel_out, + bool fast_validate, + bool allow_self_refresh_only) +{ + bool out = false; + bool repopulate_pipes = false; + int split[MAX_PIPES] = { 0 }; + bool merge[MAX_PIPES] = { false }; + bool newly_split[MAX_PIPES] = { false }; + int pipe_cnt, i, pipe_idx, vlevel; + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; + + ASSERT(pipes); + if (!pipes) + return false; + + context->bw_ctx.dml.vba.maxMpcComb = 0; + context->bw_ctx.dml.vba.VoltageLevel = 0; + context->bw_ctx.dml.vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; + dc->res_pool->funcs->update_soc_for_wm_a(dc, context); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + + if (!pipe_cnt) { + out = true; + goto validate_out; + } + + dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); + + if (!fast_validate || !allow_self_refresh_only) { + /* + * DML favors voltage over p-state, but we're more interested in + * supporting p-state over voltage. We can't support p-state in + * prefetch mode > 0 so try capping the prefetch mode to start. + */ + context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = + dm_allow_self_refresh_and_mclk_switch; + vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); + /* This may adjust vlevel and maxMpcComb */ + if (vlevel < context->bw_ctx.dml.soc.num_states) + vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); + } + if (allow_self_refresh_only && + (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states || + vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported)) { + /* + * If mode is unsupported or there's still no p-state support + * then fall back to favoring voltage. + * + * We don't actually support prefetch mode 2, so require that we + * at least support prefetch mode 1. + */ + context->bw_ctx.dml.validate_max_state = fast_validate; + context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = + dm_allow_self_refresh; + + vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); + if (vlevel < context->bw_ctx.dml.soc.num_states) { + memset(split, 0, sizeof(split)); + memset(merge, 0, sizeof(merge)); + vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); + } + context->bw_ctx.dml.validate_max_state = false; + } + + dml_log_mode_support_params(&context->bw_ctx.dml); + + if (vlevel == context->bw_ctx.dml.soc.num_states) + goto validate_fail; + + if (!dc->config.enable_windowed_mpo_odm) { + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *mpo_pipe = pipe->bottom_pipe; + + if (!pipe->stream) + continue; + + /* We only support full screen mpo with ODM */ + if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled + && pipe->plane_state && mpo_pipe + && memcmp(&mpo_pipe->plane_state->clip_rect, + &pipe->stream->src, + sizeof(struct rect)) != 0) { + ASSERT(mpo_pipe->plane_state != pipe->plane_state); + goto validate_fail; + } + pipe_idx++; + } + } + + /* merge pipes if necessary */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + /*skip pipes that don't need merging*/ + if (!merge[i]) + continue; + + /* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */ + if (pipe->prev_odm_pipe) { + /*split off odm pipe*/ + pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe; + if (pipe->next_odm_pipe) + pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe; + + pipe->bottom_pipe = NULL; + pipe->next_odm_pipe = NULL; + pipe->plane_state = NULL; + pipe->stream = NULL; + pipe->top_pipe = NULL; + pipe->prev_odm_pipe = NULL; + if (pipe->stream_res.dsc) + dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc); + memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); + memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); + repopulate_pipes = true; + } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { + struct pipe_ctx *top_pipe = pipe->top_pipe; + struct pipe_ctx *bottom_pipe = pipe->bottom_pipe; + + top_pipe->bottom_pipe = bottom_pipe; + if (bottom_pipe) + bottom_pipe->top_pipe = top_pipe; + + pipe->top_pipe = NULL; + pipe->bottom_pipe = NULL; + pipe->plane_state = NULL; + pipe->stream = NULL; + memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); + memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); + repopulate_pipes = true; + } else + ASSERT(0); /* Should never try to merge master pipe */ + + } + + for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *hsplit_pipe = NULL; + bool odm; + int old_index = -1; + + if (!pipe->stream || newly_split[i]) + continue; + + pipe_idx++; + odm = vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled; + + if (!pipe->plane_state && !odm) + continue; + + if (split[i]) { + if (odm) { + if (split[i] == 4 && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe) + old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx; + else if (old_pipe->next_odm_pipe) + old_index = old_pipe->next_odm_pipe->pipe_idx; + } else { + if (split[i] == 4 && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe && + old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state) + old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx; + else if (old_pipe->bottom_pipe && + old_pipe->bottom_pipe->plane_state == old_pipe->plane_state) + old_index = old_pipe->bottom_pipe->pipe_idx; + } + hsplit_pipe = dcn30_find_split_pipe(dc, context, old_index); + ASSERT(hsplit_pipe); + if (!hsplit_pipe) + goto validate_fail; + + if (!dcn30_split_stream_for_mpc_or_odm( + dc, &context->res_ctx, + pipe, hsplit_pipe, odm)) + goto validate_fail; + + newly_split[hsplit_pipe->pipe_idx] = true; + repopulate_pipes = true; + } + if (split[i] == 4) { + struct pipe_ctx *pipe_4to1; + + if (odm && old_pipe->next_odm_pipe) + old_index = old_pipe->next_odm_pipe->pipe_idx; + else if (!odm && old_pipe->bottom_pipe && + old_pipe->bottom_pipe->plane_state == old_pipe->plane_state) + old_index = old_pipe->bottom_pipe->pipe_idx; + else + old_index = -1; + pipe_4to1 = dcn30_find_split_pipe(dc, context, old_index); + ASSERT(pipe_4to1); + if (!pipe_4to1) + goto validate_fail; + if (!dcn30_split_stream_for_mpc_or_odm( + dc, &context->res_ctx, + pipe, pipe_4to1, odm)) + goto validate_fail; + newly_split[pipe_4to1->pipe_idx] = true; + + if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe + && old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe) + old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx; + else if (!odm && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe && + old_pipe->bottom_pipe->bottom_pipe->bottom_pipe && + old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state) + old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx; + else + old_index = -1; + pipe_4to1 = dcn30_find_split_pipe(dc, context, old_index); + ASSERT(pipe_4to1); + if (!pipe_4to1) + goto validate_fail; + if (!dcn30_split_stream_for_mpc_or_odm( + dc, &context->res_ctx, + hsplit_pipe, pipe_4to1, odm)) + goto validate_fail; + newly_split[pipe_4to1->pipe_idx] = true; + } + if (odm) + dcn20_build_mapped_resource(dc, context, pipe->stream); + } + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state) { + if (!resource_build_scaling_params(pipe)) + goto validate_fail; + } + } + + /* Actual dsc count per stream dsc validation*/ + if (!dcn20_validate_dsc(dc, context)) { + vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE; + goto validate_fail; + } + + if (repopulate_pipes) + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + context->bw_ctx.dml.vba.VoltageLevel = vlevel; + *vlevel_out = vlevel; + *pipe_cnt_out = pipe_cnt; + + out = true; + goto validate_out; + +validate_fail: + out = false; + +validate_out: + return out; +} + +static int get_refresh_rate(struct dc_state *context) +{ + int refresh_rate = 0; + int h_v_total = 0; + struct dc_crtc_timing *timing = NULL; + + if (context == NULL || context->streams[0] == NULL) + return 0; + + /* check if refresh rate at least 120hz */ + timing = &context->streams[0]->timing; + if (timing == NULL) + return 0; + + h_v_total = timing->h_total * timing->v_total; + if (h_v_total == 0) + return 0; + + refresh_rate = ((timing->pix_clk_100hz * 100) / (h_v_total)) + 1; + return refresh_rate; +} + +#define MAX_STRETCHED_V_BLANK 500 // in micro-seconds +/* + * Scaling factor for v_blank stretch calculations considering timing in + * micro-seconds and pixel clock in 100hz. + * Note: the parenthesis are necessary to ensure the correct order of + * operation where V_SCALE is used. + */ +#define V_SCALE (10000 / MAX_STRETCHED_V_BLANK) + +static int get_frame_rate_at_max_stretch_100hz(struct dc_state *context) +{ + struct dc_crtc_timing *timing = NULL; + uint32_t sec_per_100_lines; + uint32_t max_v_blank; + uint32_t curr_v_blank; + uint32_t v_stretch_max; + uint32_t stretched_frame_pix_cnt; + uint32_t scaled_stretched_frame_pix_cnt; + uint32_t scaled_refresh_rate; + + if (context == NULL || context->streams[0] == NULL) + return 0; + + /* check if refresh rate at least 120hz */ + timing = &context->streams[0]->timing; + if (timing == NULL) + return 0; + + sec_per_100_lines = timing->pix_clk_100hz / timing->h_total + 1; + max_v_blank = sec_per_100_lines / V_SCALE + 1; + curr_v_blank = timing->v_total - timing->v_addressable; + v_stretch_max = (max_v_blank > curr_v_blank) ? (max_v_blank - curr_v_blank) : (0); + stretched_frame_pix_cnt = (v_stretch_max + timing->v_total) * timing->h_total; + scaled_stretched_frame_pix_cnt = stretched_frame_pix_cnt / 10000; + scaled_refresh_rate = (timing->pix_clk_100hz) / scaled_stretched_frame_pix_cnt + 1; + + return scaled_refresh_rate; +} + +static bool is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(struct dc_state *context) +{ + int refresh_rate_max_stretch_100hz; + int min_refresh_100hz; + + if (context == NULL || context->streams[0] == NULL) + return false; + + refresh_rate_max_stretch_100hz = get_frame_rate_at_max_stretch_100hz(context); + min_refresh_100hz = context->streams[0]->timing.min_refresh_in_uhz / 10000; + + if (refresh_rate_max_stretch_100hz < min_refresh_100hz) + return false; + + return true; +} + +bool dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context) +{ + int refresh_rate = 0; + const int minimum_refreshrate_supported = 120; + + if (context == NULL || context->streams[0] == NULL) + return false; + + if (context->streams[0]->sink->edid_caps.panel_patch.disable_fams) + return false; + + if (dc->debug.disable_fams) + return false; + + if (!dc->caps.dmub_caps.mclk_sw) + return false; + + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down) + return false; + + /* more then 1 monitor connected */ + if (context->stream_count != 1) + return false; + + refresh_rate = get_refresh_rate(context); + if (refresh_rate < minimum_refreshrate_supported) + return false; + + if (!is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(context)) + return false; + + if (!context->streams[0]->allow_freesync) + return false; + + if (context->streams[0]->vrr_active_variable && dc->debug.disable_fams_gaming) + return false; + + context->streams[0]->fpo_in_use = true; + + return true; +} + +/* + * set up FPO watermarks, pstate, dram latency + */ +void dcn30_setup_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context) +{ + ASSERT(dc != NULL && context != NULL); + if (dc == NULL || context == NULL) + return; + + /* Set wm_a.pstate so high natural MCLK switches are impossible: 4 seconds */ + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U; +} + +void dcn30_update_soc_for_wm_a(struct dc *dc, struct dc_state *context) +{ + DC_FP_START(); + dcn30_fpu_update_soc_for_wm_a(dc, context); + DC_FP_END(); +} + +void dcn30_calculate_wm_and_dlg( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel) +{ + DC_FP_START(); + dcn30_fpu_calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + DC_FP_END(); +} + +bool dcn30_validate_bandwidth(struct dc *dc, + struct dc_state *context, + bool fast_validate) +{ + bool out = false; + + BW_VAL_TRACE_SETUP(); + + int vlevel = 0; + int pipe_cnt = 0; + display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); + DC_LOGGER_INIT(dc->ctx->logger); + + BW_VAL_TRACE_COUNT(); + + DC_FP_START(); + out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true); + DC_FP_END(); + + if (pipe_cnt == 0) + goto validate_out; + + if (!out) + goto validate_fail; + + BW_VAL_TRACE_END_VOLTAGE_LEVEL(); + + if (fast_validate) { + BW_VAL_TRACE_SKIP(fast); + goto validate_out; + } + + DC_FP_START(); + if (dc->res_pool->funcs->calculate_wm_and_dlg) + dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + DC_FP_END(); + + BW_VAL_TRACE_END_WATERMARKS(); + + goto validate_out; + +validate_fail: + DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", + dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); + + BW_VAL_TRACE_SKIP(fail); + out = false; + +validate_out: + kfree(pipes); + + BW_VAL_TRACE_FINISH(); + + return out; +} + +void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) +{ + unsigned int i, j; + unsigned int num_states = 0; + + unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0}; + unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0}; + unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0}; + unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0}; + + unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200}; + unsigned int num_dcfclk_sta_targets = 4; + unsigned int num_uclk_states; + + struct dc_bounding_box_max_clk dcn30_bb_max_clk; + + memset(&dcn30_bb_max_clk, 0, sizeof(dcn30_bb_max_clk)); + + if (dc->ctx->dc_bios->vram_info.num_chans) + dcn3_0_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; + + DC_FP_START(); + dcn30_fpu_update_dram_channel_width_bytes(dc); + DC_FP_END(); + + if (bw_params->clk_table.entries[0].memclk_mhz) { + + for (i = 0; i < MAX_NUM_DPM_LVL; i++) { + if (bw_params->clk_table.entries[i].dcfclk_mhz > dcn30_bb_max_clk.max_dcfclk_mhz) + dcn30_bb_max_clk.max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; + if (bw_params->clk_table.entries[i].dispclk_mhz > dcn30_bb_max_clk.max_dispclk_mhz) + dcn30_bb_max_clk.max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; + if (bw_params->clk_table.entries[i].dppclk_mhz > dcn30_bb_max_clk.max_dppclk_mhz) + dcn30_bb_max_clk.max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; + if (bw_params->clk_table.entries[i].phyclk_mhz > dcn30_bb_max_clk.max_phyclk_mhz) + dcn30_bb_max_clk.max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; + } + + DC_FP_START(); + dcn30_fpu_update_max_clk(&dcn30_bb_max_clk); + DC_FP_END(); + + if (dcn30_bb_max_clk.max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { + // If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array + dcfclk_sta_targets[num_dcfclk_sta_targets] = dcn30_bb_max_clk.max_dcfclk_mhz; + num_dcfclk_sta_targets++; + } else if (dcn30_bb_max_clk.max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { + // If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates + for (i = 0; i < num_dcfclk_sta_targets; i++) { + if (dcfclk_sta_targets[i] > dcn30_bb_max_clk.max_dcfclk_mhz) { + dcfclk_sta_targets[i] = dcn30_bb_max_clk.max_dcfclk_mhz; + break; + } + } + // Update size of array since we "removed" duplicates + num_dcfclk_sta_targets = i + 1; + } + + num_uclk_states = bw_params->clk_table.num_entries; + + // Calculate optimal dcfclk for each uclk + for (i = 0; i < num_uclk_states; i++) { + DC_FP_START(); + dcn30_fpu_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16, + &optimal_dcfclk_for_uclk[i], NULL); + DC_FP_END(); + if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) { + optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz; + } + } + + // Calculate optimal uclk for each dcfclk sta target + for (i = 0; i < num_dcfclk_sta_targets; i++) { + for (j = 0; j < num_uclk_states; j++) { + if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) { + optimal_uclk_for_dcfclk_sta_targets[i] = + bw_params->clk_table.entries[j].memclk_mhz * 16; + break; + } + } + } + + i = 0; + j = 0; + // create the final dcfclk and uclk table + while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) { + if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) { + dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; + dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; + } else { + if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= dcn30_bb_max_clk.max_dcfclk_mhz) { + dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; + dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; + } else { + j = num_uclk_states; + } + } + } + + while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) { + dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; + dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; + } + + while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES && + optimal_dcfclk_for_uclk[j] <= dcn30_bb_max_clk.max_dcfclk_mhz) { + dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; + dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; + } + + dcn3_0_soc.num_states = num_states; + DC_FP_START(); + dcn30_fpu_update_bw_bounding_box(dc, bw_params, &dcn30_bb_max_clk, dcfclk_mhz, dram_speed_mts); + DC_FP_END(); + } +} + +static void dcn30_get_panel_config_defaults(struct dc_panel_config *panel_config) +{ + *panel_config = panel_config_defaults; +} + +static const struct resource_funcs dcn30_res_pool_funcs = { + .destroy = dcn30_destroy_resource_pool, + .link_enc_create = dcn30_link_encoder_create, + .panel_cntl_create = dcn30_panel_cntl_create, + .validate_bandwidth = dcn30_validate_bandwidth, + .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, + .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, + .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, + .release_pipe = dcn20_release_pipe, + .add_stream_to_ctx = dcn30_add_stream_to_ctx, + .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, + .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, + .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, + .set_mcif_arb_params = dcn30_set_mcif_arb_params, + .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, + .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, + .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, + .update_bw_bounding_box = dcn30_update_bw_bounding_box, + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, + .get_panel_config_defaults = dcn30_get_panel_config_defaults, +}; + +#define CTX ctx + +#define REG(reg_name) \ + (DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) + +static uint32_t read_pipe_fuses(struct dc_context *ctx) +{ + uint32_t value = REG_READ(CC_DC_PIPE_DIS); + /* Support for max 6 pipes */ + value = value & 0x3f; + return value; +} + +static bool dcn30_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dcn30_resource_pool *pool) +{ + int i; + struct dc_context *ctx = dc->ctx; + struct irq_service_init_data init_data; + struct ddc_service_init_data ddc_init_data = {0}; + uint32_t pipe_fuses = read_pipe_fuses(ctx); + uint32_t num_pipes = 0; + + if (!(pipe_fuses == 0 || pipe_fuses == 0x3e)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: Unexpected fuse recipe for navi2x !\n"); + /* fault to single pipe */ + pipe_fuses = 0x3e; + } + + DC_FP_START(); + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap_dcn3; + + pool->base.funcs = &dcn30_res_pool_funcs; + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->base.pipe_count = pool->base.res_cap->num_timing_generator; + pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; + dc->caps.max_downscale_ratio = 600; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a not applied by default*/ + dc->caps.max_cursor_size = 256; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dmdata_alloc_size = 2048; + dc->caps.mall_size_per_mem_channel = 8; + /* total size = mall per channel * num channels * 1024 * 1024 */ + dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576; + dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; + + dc->caps.max_slave_planes = 2; + dc->caps.max_slave_yuv_planes = 2; + dc->caps.max_slave_rgb_planes = 2; + dc->caps.post_blend_color_processing = true; + dc->caps.force_dp_tps4_for_cp2520 = true; + dc->caps.extended_aux_timeout_support = true; + dc->caps.dmcub_support = true; + + /* Color pipeline capabilities */ + dc->caps.color.dpp.dcn_arch = 1; + dc->caps.color.dpp.input_lut_shared = 0; + dc->caps.color.dpp.icsc = 1; + dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr + dc->caps.color.dpp.dgam_rom_caps.srgb = 1; + dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; + dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; + dc->caps.color.dpp.dgam_rom_caps.pq = 1; + dc->caps.color.dpp.dgam_rom_caps.hlg = 1; + dc->caps.color.dpp.post_csc = 1; + dc->caps.color.dpp.gamma_corr = 1; + dc->caps.color.dpp.dgam_rom_for_yuv = 0; + + dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.ogam_ram = 1; + // no OGAM ROM on DCN3 + dc->caps.color.dpp.ogam_rom_caps.srgb = 0; + dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; + dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.ogam_rom_caps.pq = 0; + dc->caps.color.dpp.ogam_rom_caps.hlg = 0; + dc->caps.color.dpp.ocsc = 0; + + dc->caps.color.mpc.gamut_remap = 1; + dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //3 + dc->caps.color.mpc.ogam_ram = 1; + dc->caps.color.mpc.ogam_rom_caps.srgb = 0; + dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; + dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.mpc.ogam_rom_caps.pq = 0; + dc->caps.color.mpc.ogam_rom_caps.hlg = 0; + dc->caps.color.mpc.ocsc = 1; + + dc->caps.dp_hdmi21_pcon_support = true; + dc->caps.max_v_total = (1 << 15) - 1; + + /* read VBIOS LTTPR caps */ + { + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + if (ctx->dc_bios->funcs->get_lttpr_interop) { + enum bp_result bp_query_result; + uint8_t is_vbios_interop_enabled = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, + &is_vbios_interop_enabled); + dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; + } + } + + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) + dc->debug = debug_defaults_drv; + + // Init the vm_helper + if (dc->vm_helper) + vm_helper_init(dc->vm_helper, 16); + + /************************************************* + * Create resources * + *************************************************/ + + /* Clock Sources for Pixel Clock*/ + pool->base.clock_sources[DCN30_CLK_SRC_PLL0] = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->base.clock_sources[DCN30_CLK_SRC_PLL1] = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + pool->base.clock_sources[DCN30_CLK_SRC_PLL2] = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs[2], false); + pool->base.clock_sources[DCN30_CLK_SRC_PLL3] = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + pool->base.clock_sources[DCN30_CLK_SRC_PLL4] = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL4, + &clk_src_regs[4], false); + pool->base.clock_sources[DCN30_CLK_SRC_PLL5] = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL5, + &clk_src_regs[5], false); + + pool->base.clk_src_count = DCN30_CLK_SRC_TOTAL; + + /* todo: not reuse phy_pll registers */ + pool->base.dp_clock_source = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, + &clk_src_regs[0], true); + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* DCCG */ + pool->base.dccg = dccg30_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); + if (pool->base.dccg == NULL) { + dm_error("DC: failed to create dccg!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* PP Lib and SMU interfaces */ + init_soc_bounding_box(dc, pool); + + num_pipes = dcn3_0_ip.max_num_dpp; + + for (i = 0; i < dcn3_0_ip.max_num_dpp; i++) + if (pipe_fuses & 1 << i) + num_pipes--; + + dcn3_0_ip.max_num_dpp = num_pipes; + dcn3_0_ip.max_num_otg = num_pipes; + + dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30); + + /* IRQ */ + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dcn30_create(&init_data); + if (!pool->base.irqs) + goto create_fail; + + /* HUBBUB */ + pool->base.hubbub = dcn30_hubbub_create(ctx); + if (pool->base.hubbub == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create hubbub!\n"); + goto create_fail; + } + + /* HUBPs, DPPs, OPPs and TGs */ + for (i = 0; i < pool->base.pipe_count; i++) { + pool->base.hubps[i] = dcn30_hubp_create(ctx, i); + if (pool->base.hubps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create hubps!\n"); + goto create_fail; + } + + pool->base.dpps[i] = dcn30_dpp_create(ctx, i); + if (pool->base.dpps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create dpps!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + pool->base.opps[i] = dcn30_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + pool->base.timing_generators[i] = dcn30_timing_generator_create( + ctx, i); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + } + pool->base.timing_generator_count = i; + /* PSR */ + pool->base.psr = dmub_psr_create(ctx); + + if (pool->base.psr == NULL) { + dm_error("DC: failed to create PSR obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* ABM */ + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + pool->base.multiple_abms[i] = dmub_abm_create(ctx, + &abm_regs[i], + &abm_shift, + &abm_mask); + if (pool->base.multiple_abms[i] == NULL) { + dm_error("DC: failed to create abm for pipe %d!\n", i); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + /* MPC and DSC */ + pool->base.mpc = dcn30_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut); + if (pool->base.mpc == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mpc!\n"); + goto create_fail; + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + pool->base.dscs[i] = dcn30_dsc_create(ctx, i); + if (pool->base.dscs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create display stream compressor %d!\n", i); + goto create_fail; + } + } + + /* DWB and MMHUBBUB */ + if (!dcn30_dwbc_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create dwbc!\n"); + goto create_fail; + } + + if (!dcn30_mmhubbub_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mcif_wb!\n"); + goto create_fail; + } + + /* AUX and I2C */ + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dcn30_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto create_fail; + } + pool->base.hw_i2cs[i] = dcn30_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create hw i2c!!\n"); + goto create_fail; + } + pool->base.sw_i2cs[i] = NULL; + } + + /* Audio, Stream Encoders including DIG and virtual, MPC 3D LUTs */ + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto create_fail; + + /* HW Sequencer and Plane caps */ + dcn30_hw_sequencer_construct(dc); + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->cap_funcs = cap_funcs; + + if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { + ddc_init_data.ctx = dc->ctx; + ddc_init_data.link = NULL; + ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; + ddc_init_data.id.enum_id = 0; + ddc_init_data.id.type = OBJECT_TYPE_GENERIC; + pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data); + } else { + pool->base.oem_device = NULL; + } + + DC_FP_END(); + + return true; + +create_fail: + + DC_FP_END(); + dcn30_resource_destruct(pool); + + return false; +} + +struct resource_pool *dcn30_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc) +{ + struct dcn30_resource_pool *pool = + kzalloc(sizeof(struct dcn30_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dcn30_resource_construct(init_data->num_virtual_links, dc, pool)) + return &pool->base; + + BREAK_TO_DEBUGGER(); + kfree(pool); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h new file mode 100644 index 0000000000..8e6b8b7368 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h @@ -0,0 +1,108 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DCN30_RESOURCE_H_ +#define _DCN30_RESOURCE_H_ + +#include "core_types.h" + +#define TO_DCN30_RES_POOL(pool)\ + container_of(pool, struct dcn30_resource_pool, base) + +struct dc; +struct resource_pool; +struct _vcs_dpi_display_pipe_params_st; + +extern struct _vcs_dpi_ip_params_st dcn3_0_ip; +extern struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc; + +struct dcn30_resource_pool { + struct resource_pool base; +}; +struct resource_pool *dcn30_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc); + +void dcn30_set_mcif_arb_params( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt); + +unsigned int dcn30_calc_max_scaled_time( + unsigned int time_per_pixel, + enum mmhubbub_wbif_mode mode, + unsigned int urgent_watermark); + +bool dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context, + bool fast_validate); +bool dcn30_internal_validate_bw( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *pipe_cnt_out, + int *vlevel_out, + bool fast_validate, + bool allow_self_refresh_only); +void dcn30_calculate_wm_and_dlg( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel); +void dcn30_update_soc_for_wm_a(struct dc *dc, struct dc_state *context); +void dcn30_populate_dml_writeback_from_context( + struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes); + +int dcn30_populate_dml_pipes_from_context( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate); + +bool dcn30_acquire_post_bldn_3dlut( + struct resource_context *res_ctx, + const struct resource_pool *pool, + int mpcc_id, + struct dc_3dlut **lut, + struct dc_transfer_func **shaper); + +bool dcn30_release_post_bldn_3dlut( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct dc_3dlut **lut, + struct dc_transfer_func **shaper); + +enum dc_status dcn30_add_stream_to_ctx( + struct dc *dc, + struct dc_state *new_ctx, + struct dc_stream_state *dc_stream); + +void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); + +bool dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context); +void dcn30_setup_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context); +int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, int pipe_cnt, int vlevel); + +#endif /* _DCN30_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c new file mode 100644 index 0000000000..7538b548c5 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c @@ -0,0 +1,1728 @@ +/* + * Copyright 2019-2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "dm_services.h" +#include "dc.h" + +#include "dcn301/dcn301_init.h" + +#include "resource.h" +#include "include/irq_service_interface.h" +#include "dcn30/dcn30_resource.h" +#include "dcn301_resource.h" + +#include "dcn20/dcn20_resource.h" + +#include "dcn10/dcn10_ipp.h" +#include "dcn301/dcn301_hubbub.h" +#include "dcn30/dcn30_mpc.h" +#include "dcn30/dcn30_hubp.h" +#include "irq/dcn30/irq_service_dcn30.h" +#include "dcn30/dcn30_dpp.h" +#include "dcn301/dcn301_optc.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn30/dcn30_hwseq.h" +#include "dce110/dce110_hwseq.h" +#include "dcn30/dcn30_opp.h" +#include "dcn20/dcn20_dsc.h" +#include "dcn30/dcn30_vpg.h" +#include "dcn30/dcn30_afmt.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_audio.h" +#include "dce/dce_hwseq.h" +#include "clk_mgr.h" +#include "virtual/virtual_stream_encoder.h" +#include "dce110/dce110_resource.h" +#include "dml/display_mode_vba.h" +#include "dcn301/dcn301_dccg.h" +#include "dcn10/dcn10_resource.h" +#include "dcn30/dcn30_dio_stream_encoder.h" +#include "dcn301/dcn301_dio_link_encoder.h" +#include "dcn301/dcn301_panel_cntl.h" + +#include "vangogh_ip_offset.h" + +#include "dcn30/dcn30_dwb.h" +#include "dcn30/dcn30_mmhubbub.h" + +#include "dcn/dcn_3_0_1_offset.h" +#include "dcn/dcn_3_0_1_sh_mask.h" + +#include "nbio/nbio_7_2_0_offset.h" + +#include "dpcs/dpcs_3_0_0_offset.h" +#include "dpcs/dpcs_3_0_0_sh_mask.h" + +#include "reg_helper.h" +#include "dce/dmub_abm.h" +#include "dce/dce_aux.h" +#include "dce/dce_i2c.h" + +#include "dml/dcn30/dcn30_fpu.h" + +#include "dml/dcn30/display_mode_vba_30.h" +#include "dml/dcn301/dcn301_fpu.h" +#include "vm_helper.h" +#include "dcn20/dcn20_vmid.h" +#include "amdgpu_socbb.h" + +#define TO_DCN301_RES_POOL(pool)\ + container_of(pool, struct dcn301_resource_pool, base) + +#define DC_LOGGER \ + dc->ctx->logger +#define DC_LOGGER_INIT(logger) + +enum dcn301_clk_src_array_id { + DCN301_CLK_SRC_PLL0, + DCN301_CLK_SRC_PLL1, + DCN301_CLK_SRC_PLL2, + DCN301_CLK_SRC_PLL3, + DCN301_CLK_SRC_TOTAL +}; + +/* begin ********************* + * macros to expend register list macro defined in HW object header file + */ + +/* DCN */ +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +#define SRI(reg_name, block, id)\ + .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define SRI2(reg_name, block, id)\ + .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +#define SRIR(var_name, reg_name, block, id)\ + .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define SRII2(reg_name_pre, reg_name_post, id)\ + .reg_name_pre ## _ ## reg_name_post[id] = BASE(mm ## reg_name_pre \ + ## id ## _ ## reg_name_post ## _BASE_IDX) + \ + mm ## reg_name_pre ## id ## _ ## reg_name_post + +#define SRII_MPC_RMU(reg_name, block, id)\ + .RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define SRII_DWB(reg_name, temp_name, block, id)\ + .reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## temp_name + +#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define DCCG_SRII(reg_name, block, id)\ + .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define VUPDATE_SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + mm ## reg_name ## _ ## block ## id + +/* NBIO */ +#define NBIO_BASE_INNER(seg) \ + NBIO_BASE__INST0_SEG ## seg + +#define NBIO_BASE(seg) \ + NBIO_BASE_INNER(seg) + +#define NBIO_SR(reg_name)\ + .reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \ + regBIF_BX0_ ## reg_name + +/* MMHUB */ +#define MMHUB_BASE_INNER(seg) \ + MMHUB_BASE__INST0_SEG ## seg + +#define MMHUB_BASE(seg) \ + MMHUB_BASE_INNER(seg) + +#define MMHUB_SR(reg_name)\ + .reg_name = MMHUB_BASE(regMM ## reg_name ## _BASE_IDX) + \ + regMM ## reg_name + +/* CLOCK */ +#define CLK_BASE_INNER(seg) \ + CLK_BASE__INST0_SEG ## seg + +#define CLK_BASE(seg) \ + CLK_BASE_INNER(seg) + +#define CLK_SRI(reg_name, block, inst)\ + .reg_name = CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## _ ## inst ## _ ## reg_name + +static const struct bios_registers bios_regs = { + NBIO_SR(BIOS_SCRATCH_3), + NBIO_SR(BIOS_SCRATCH_6) +}; + +#define clk_src_regs(index, pllid)\ +[index] = {\ + CS_COMMON_REG_LIST_DCN3_01(index, pllid),\ +} + +static const struct dce110_clk_src_regs clk_src_regs[] = { + clk_src_regs(0, A), + clk_src_regs(1, B), + clk_src_regs(2, C), + clk_src_regs(3, D) +}; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) +}; + +#define abm_regs(id)\ +[id] = {\ + ABM_DCN301_REG_LIST(id)\ +} + +static const struct dce_abm_registers abm_regs[] = { + abm_regs(0), + abm_regs(1), + abm_regs(2), + abm_regs(3), +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCN30(_MASK) +}; + +#define audio_regs(id)\ +[id] = {\ + AUD_COMMON_REG_LIST(id)\ +} + +static const struct dce_audio_registers audio_regs[] = { + audio_regs(0), + audio_regs(1), + audio_regs(2), + audio_regs(3), + audio_regs(4), + audio_regs(5), + audio_regs(6) +}; + +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ + AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) + +static const struct dce_audio_shift audio_shift = { + DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +#define vpg_regs(id)\ +[id] = {\ + VPG_DCN3_REG_LIST(id)\ +} + +static const struct dcn30_vpg_registers vpg_regs[] = { + vpg_regs(0), + vpg_regs(1), + vpg_regs(2), + vpg_regs(3), +}; + +static const struct dcn30_vpg_shift vpg_shift = { + DCN3_VPG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn30_vpg_mask vpg_mask = { + DCN3_VPG_MASK_SH_LIST(_MASK) +}; + +#define afmt_regs(id)\ +[id] = {\ + AFMT_DCN3_REG_LIST(id)\ +} + +static const struct dcn30_afmt_registers afmt_regs[] = { + afmt_regs(0), + afmt_regs(1), + afmt_regs(2), + afmt_regs(3), +}; + +static const struct dcn30_afmt_shift afmt_shift = { + DCN3_AFMT_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn30_afmt_mask afmt_mask = { + DCN3_AFMT_MASK_SH_LIST(_MASK) +}; + +#define stream_enc_regs(id)\ +[id] = {\ + SE_DCN3_REG_LIST(id)\ +} + +static const struct dcn10_stream_enc_registers stream_enc_regs[] = { + stream_enc_regs(0), + stream_enc_regs(1), + stream_enc_regs(2), + stream_enc_regs(3), +}; + +static const struct dcn10_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn10_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + + +#define aux_regs(id)\ +[id] = {\ + DCN2_AUX_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), + aux_regs(2), + aux_regs(3), +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), +}; + + +#define link_regs(id, phyid)\ +[id] = {\ + LE_DCN301_REG_LIST(id), \ + UNIPHY_DCN2_REG_LIST(phyid), \ + DPCS_DCN2_REG_LIST(id), \ + SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ +} + +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +static const struct dcn10_link_enc_registers link_enc_regs[] = { + link_regs(0, A), + link_regs(1, B), + link_regs(2, C), + link_regs(3, D), +}; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN301(__SHIFT),\ + DPCS_DCN2_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN301(_MASK),\ + DPCS_DCN2_MASK_SH_LIST(_MASK) +}; + +#define panel_cntl_regs(id)\ +[id] = {\ + DCN301_PANEL_CNTL_REG_LIST(id),\ +} + +static const struct dce_panel_cntl_registers panel_cntl_regs[] = { + panel_cntl_regs(0), + panel_cntl_regs(1), +}; + +static const struct dcn301_panel_cntl_shift panel_cntl_shift = { + DCN301_PANEL_CNTL_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn301_panel_cntl_mask panel_cntl_mask = { + DCN301_PANEL_CNTL_MASK_SH_LIST(_MASK) +}; + +#define dpp_regs(id)\ +[id] = {\ + DPP_REG_LIST_DCN30(id),\ +} + +static const struct dcn3_dpp_registers dpp_regs[] = { + dpp_regs(0), + dpp_regs(1), + dpp_regs(2), + dpp_regs(3), +}; + +static const struct dcn3_dpp_shift tf_shift = { + DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) +}; + +static const struct dcn3_dpp_mask tf_mask = { + DPP_REG_LIST_SH_MASK_DCN30(_MASK) +}; + +#define opp_regs(id)\ +[id] = {\ + OPP_REG_LIST_DCN30(id),\ +} + +static const struct dcn20_opp_registers opp_regs[] = { + opp_regs(0), + opp_regs(1), + opp_regs(2), + opp_regs(3), +}; + +static const struct dcn20_opp_shift opp_shift = { + OPP_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn20_opp_mask opp_mask = { + OPP_MASK_SH_LIST_DCN20(_MASK) +}; + +#define aux_engine_regs(id)\ +[id] = {\ + AUX_COMMON_REG_LIST0(id), \ + .AUXN_IMPCAL = 0, \ + .AUXP_IMPCAL = 0, \ + .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ +} + +static const struct dce110_aux_registers aux_engine_regs[] = { + aux_engine_regs(0), + aux_engine_regs(1), + aux_engine_regs(2), + aux_engine_regs(3), +}; + +#define dwbc_regs_dcn3(id)\ +[id] = {\ + DWBC_COMMON_REG_LIST_DCN30(id),\ +} + +static const struct dcn30_dwbc_registers dwbc30_regs[] = { + dwbc_regs_dcn3(0), +}; + +static const struct dcn30_dwbc_shift dwbc30_shift = { + DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_dwbc_mask dwbc30_mask = { + DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define mcif_wb_regs_dcn3(id)\ +[id] = {\ + MCIF_WB_COMMON_REG_LIST_DCN30(id),\ +} + +static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { + mcif_wb_regs_dcn3(0) +}; + +static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define dsc_regsDCN20(id)\ +[id] = {\ + DSC_REG_LIST_DCN20(id)\ +} + +static const struct dcn20_dsc_registers dsc_regs[] = { + dsc_regsDCN20(0), + dsc_regsDCN20(1), + dsc_regsDCN20(2), +}; + +static const struct dcn20_dsc_shift dsc_shift = { + DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) +}; + +static const struct dcn20_dsc_mask dsc_mask = { + DSC_REG_LIST_SH_MASK_DCN20(_MASK) +}; + +static const struct dcn30_mpc_registers mpc_regs = { + MPC_REG_LIST_DCN3_0(0), + MPC_REG_LIST_DCN3_0(1), + MPC_REG_LIST_DCN3_0(2), + MPC_REG_LIST_DCN3_0(3), + MPC_OUT_MUX_REG_LIST_DCN3_0(0), + MPC_OUT_MUX_REG_LIST_DCN3_0(1), + MPC_OUT_MUX_REG_LIST_DCN3_0(2), + MPC_OUT_MUX_REG_LIST_DCN3_0(3), + MPC_RMU_GLOBAL_REG_LIST_DCN3AG, + MPC_RMU_REG_LIST_DCN3AG(0), + MPC_RMU_REG_LIST_DCN3AG(1), + MPC_DWB_MUX_REG_LIST_DCN3_0(0), +}; + +static const struct dcn30_mpc_shift mpc_shift = { + MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_mpc_mask mpc_mask = { + MPC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define optc_regs(id)\ +[id] = {OPTC_COMMON_REG_LIST_DCN3_0(id)} + + +static const struct dcn_optc_registers optc_regs[] = { + optc_regs(0), + optc_regs(1), + optc_regs(2), + optc_regs(3), +}; + +static const struct dcn_optc_shift optc_shift = { + OPTC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn_optc_mask optc_mask = { + OPTC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define hubp_regs(id)\ +[id] = {\ + HUBP_REG_LIST_DCN30(id)\ +} + +static const struct dcn_hubp2_registers hubp_regs[] = { + hubp_regs(0), + hubp_regs(1), + hubp_regs(2), + hubp_regs(3), +}; + +static const struct dcn_hubp2_shift hubp_shift = { + HUBP_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn_hubp2_mask hubp_mask = { + HUBP_MASK_SH_LIST_DCN30(_MASK) +}; + +static const struct dcn_hubbub_registers hubbub_reg = { + HUBBUB_REG_LIST_DCN301(0) +}; + +static const struct dcn_hubbub_shift hubbub_shift = { + HUBBUB_MASK_SH_LIST_DCN301(__SHIFT) +}; + +static const struct dcn_hubbub_mask hubbub_mask = { + HUBBUB_MASK_SH_LIST_DCN301(_MASK) +}; + +static const struct dccg_registers dccg_regs = { + DCCG_REG_LIST_DCN301() +}; + +static const struct dccg_shift dccg_shift = { + DCCG_MASK_SH_LIST_DCN301(__SHIFT) +}; + +static const struct dccg_mask dccg_mask = { + DCCG_MASK_SH_LIST_DCN301(_MASK) +}; + +static const struct dce_hwseq_registers hwseq_reg = { + HWSEQ_DCN301_REG_LIST() +}; + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCN301_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCN301_MASK_SH_LIST(_MASK) +}; +#define vmid_regs(id)\ +[id] = {\ + DCN20_VMID_REG_LIST(id)\ +} + +static const struct dcn_vmid_registers vmid_regs[] = { + vmid_regs(0), + vmid_regs(1), + vmid_regs(2), + vmid_regs(3), + vmid_regs(4), + vmid_regs(5), + vmid_regs(6), + vmid_regs(7), + vmid_regs(8), + vmid_regs(9), + vmid_regs(10), + vmid_regs(11), + vmid_regs(12), + vmid_regs(13), + vmid_regs(14), + vmid_regs(15) +}; + +static const struct dcn20_vmid_shift vmid_shifts = { + DCN20_VMID_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn20_vmid_mask vmid_masks = { + DCN20_VMID_MASK_SH_LIST(_MASK) +}; + +static struct resource_caps res_cap_dcn301 = { + .num_timing_generator = 4, + .num_opp = 4, + .num_video_plane = 4, + .num_audio = 4, + .num_stream_encoder = 4, + .num_pll = 4, + .num_dwb = 1, + .num_ddc = 4, + .num_vmid = 16, + .num_mpc_3dlut = 2, + .num_dsc = 3, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCN_UNIVERSAL, + .per_pixel_alpha = true, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = true, + .fp16 = true, + .p010 = true, + .ayuv = false, + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 16000, + .fp16 = 16000 + }, + + /* 6:1 downscaling ratio: 1000/6 = 166.666 */ + .max_downscale_factor = { + .argb8888 = 167, + .nv12 = 167, + .fp16 = 167 + }, + 64, + 64 +}; + +static const struct dc_debug_options debug_defaults_drv = { + .disable_dmcu = true, + .force_abm_enable = false, + .timing_trace = false, + .clock_trace = true, + .disable_dpp_power_gate = false, + .disable_hubp_power_gate = false, + .disable_clock_gate = true, + .disable_pplib_clock_request = true, + .disable_pplib_wm_range = true, + .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .vsr_support = true, + .performance_trace = false, + .max_downscale_src_width = 7680,/*upto 8K*/ + .scl_reset_length10 = true, + .sanity_checks = false, + .underflow_assert_delay_us = 0xFFFFFFFF, + .dwb_fi_phase = -1, // -1 = disable + .dmub_command_table = true, + .use_max_lb = false, + .exit_idle_opt_for_cursor_updates = true, + .using_dml2 = false, +}; + +static void dcn301_dpp_destroy(struct dpp **dpp) +{ + kfree(TO_DCN20_DPP(*dpp)); + *dpp = NULL; +} + +static struct dpp *dcn301_dpp_create(struct dc_context *ctx, uint32_t inst) +{ + struct dcn3_dpp *dpp = + kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); + + if (!dpp) + return NULL; + + if (dpp3_construct(dpp, ctx, inst, + &dpp_regs[inst], &tf_shift, &tf_mask)) + return &dpp->base; + + BREAK_TO_DEBUGGER(); + kfree(dpp); + return NULL; +} +static struct output_pixel_processor *dcn301_opp_create(struct dc_context *ctx, + uint32_t inst) +{ + struct dcn20_opp *opp = + kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); + + if (!opp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dcn20_opp_construct(opp, ctx, inst, + &opp_regs[inst], &opp_shift, &opp_mask); + return &opp->base; +} + +static struct dce_aux *dcn301_aux_engine_create(struct dc_context *ctx, uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) +}; + +static struct dce_i2c_hw *dcn301_i2c_hw_create(struct dc_context *ctx, uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} +static struct mpc *dcn301_mpc_create( + struct dc_context *ctx, + int num_mpcc, + int num_rmu) +{ + struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), + GFP_KERNEL); + + if (!mpc30) + return NULL; + + dcn30_mpc_construct(mpc30, ctx, + &mpc_regs, + &mpc_shift, + &mpc_mask, + num_mpcc, + num_rmu); + + return &mpc30->base; +} + +static struct hubbub *dcn301_hubbub_create(struct dc_context *ctx) +{ + int i; + + struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), + GFP_KERNEL); + + if (!hubbub3) + return NULL; + + hubbub301_construct(hubbub3, ctx, + &hubbub_reg, + &hubbub_shift, + &hubbub_mask); + + + for (i = 0; i < res_cap_dcn301.num_vmid; i++) { + struct dcn20_vmid *vmid = &hubbub3->vmid[i]; + + vmid->ctx = ctx; + + vmid->regs = &vmid_regs[i]; + vmid->shifts = &vmid_shifts; + vmid->masks = &vmid_masks; + } + + hubbub3->num_vmid = res_cap_dcn301.num_vmid; + + return &hubbub3->base; +} + +static struct timing_generator *dcn301_timing_generator_create( + struct dc_context *ctx, uint32_t instance) +{ + struct optc *tgn10 = + kzalloc(sizeof(struct optc), GFP_KERNEL); + + if (!tgn10) + return NULL; + + tgn10->base.inst = instance; + tgn10->base.ctx = ctx; + + tgn10->tg_regs = &optc_regs[instance]; + tgn10->tg_shift = &optc_shift; + tgn10->tg_mask = &optc_mask; + + dcn301_timing_generator_init(tgn10); + + return &tgn10->base; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .fec_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + +static struct link_encoder *dcn301_link_encoder_create( + struct dc_context *ctx, + const struct encoder_init_data *enc_init_data) +{ + struct dcn20_link_encoder *enc20 = + kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + + if (!enc20) + return NULL; + + dcn301_link_encoder_construct(enc20, + enc_init_data, + &link_enc_feature, + &link_enc_regs[enc_init_data->transmitter], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], + &le_shift, + &le_mask); + + return &enc20->enc10.base; +} + +static struct panel_cntl *dcn301_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dcn301_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dcn301_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dcn301_panel_cntl_construct(panel_cntl, + init_data, + &panel_cntl_regs[init_data->inst], + &panel_cntl_shift, + &panel_cntl_mask); + + return &panel_cntl->base; +} + + +#define CTX ctx + +#define REG(reg_name) \ + (DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) + +static uint32_t read_pipe_fuses(struct dc_context *ctx) +{ + uint32_t value = REG_READ(CC_DC_PIPE_DIS); + /* RV1 support max 4 pipes */ + value = value & 0xf; + return value; +} + + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), + FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); + +} + +static struct audio *dcn301_create_audio( + struct dc_context *ctx, unsigned int inst) +{ + return dce_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + +static struct vpg *dcn301_vpg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL); + + if (!vpg3) + return NULL; + + vpg3_construct(vpg3, ctx, inst, + &vpg_regs[inst], + &vpg_shift, + &vpg_mask); + + return &vpg3->base; +} + +static struct afmt *dcn301_afmt_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL); + + if (!afmt3) + return NULL; + + afmt3_construct(afmt3, ctx, inst, + &afmt_regs[inst], + &afmt_shift, + &afmt_mask); + + return &afmt3->base; +} + +static struct stream_encoder *dcn301_stream_encoder_create(enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn10_stream_encoder *enc1; + struct vpg *vpg; + struct afmt *afmt; + int vpg_inst; + int afmt_inst; + + /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ + if (eng_id <= ENGINE_ID_DIGF) { + vpg_inst = eng_id; + afmt_inst = eng_id; + } else + return NULL; + + enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); + vpg = dcn301_vpg_create(ctx, vpg_inst); + afmt = dcn301_afmt_create(ctx, afmt_inst); + + if (!enc1 || !vpg || !afmt || eng_id >= ARRAY_SIZE(stream_enc_regs)) { + kfree(enc1); + kfree(vpg); + kfree(afmt); + return NULL; + } + + dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, + eng_id, vpg, afmt, + &stream_enc_regs[eng_id], + &se_shift, &se_mask); + + return &enc1->base; +} + +static struct dce_hwseq *dcn301_hwseq_create(struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + } + return hws; +} +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = dcn301_create_audio, + .create_stream_encoder = dcn301_stream_encoder_create, + .create_hwseq = dcn301_hwseq_create, +}; + +static void dcn301_destruct(struct dcn301_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) { + if (pool->base.stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); + pool->base.stream_enc[i]->vpg = NULL; + } + if (pool->base.stream_enc[i]->afmt != NULL) { + kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); + pool->base.stream_enc[i]->afmt = NULL; + } + kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); + pool->base.stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + if (pool->base.dscs[i] != NULL) + dcn20_dsc_destroy(&pool->base.dscs[i]); + } + + if (pool->base.mpc != NULL) { + kfree(TO_DCN20_MPC(pool->base.mpc)); + pool->base.mpc = NULL; + } + if (pool->base.hubbub != NULL) { + kfree(pool->base.hubbub); + pool->base.hubbub = NULL; + } + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.dpps[i] != NULL) + dcn301_dpp_destroy(&pool->base.dpps[i]); + + if (pool->base.ipps[i] != NULL) + pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.hubps[i] != NULL) { + kfree(TO_DCN20_HUBP(pool->base.hubps[i])); + pool->base.hubps[i] = NULL; + } + + if (pool->base.irqs != NULL) { + dal_irq_service_destroy(&pool->base.irqs); + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + if (pool->base.engines[i] != NULL) + dce110_engine_destroy(&pool->base.engines[i]); + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + if (pool->base.opps[i] != NULL) + pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.timing_generators[i] != NULL) { + kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dwb; i++) { + if (pool->base.dwbc[i] != NULL) { + kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); + pool->base.dwbc[i] = NULL; + } + if (pool->base.mcif_wb[i] != NULL) { + kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); + pool->base.mcif_wb[i] = NULL; + } + } + + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i]) + dce_aud_destroy(&pool->base.audios[i]); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) { + dcn20_clock_source_destroy(&pool->base.clock_sources[i]); + pool->base.clock_sources[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { + if (pool->base.mpc_lut[i] != NULL) { + dc_3dlut_func_release(pool->base.mpc_lut[i]); + pool->base.mpc_lut[i] = NULL; + } + if (pool->base.mpc_shaper[i] != NULL) { + dc_transfer_func_release(pool->base.mpc_shaper[i]); + pool->base.mpc_shaper[i] = NULL; + } + } + + if (pool->base.dp_clock_source != NULL) { + dcn20_clock_source_destroy(&pool->base.dp_clock_source); + pool->base.dp_clock_source = NULL; + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.multiple_abms[i] != NULL) + dce_abm_destroy(&pool->base.multiple_abms[i]); + } + + if (pool->base.dccg != NULL) + dcn_dccg_destroy(&pool->base.dccg); +} + +static struct hubp *dcn301_hubp_create(struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_hubp *hubp2 = + kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); + + if (!hubp2) + return NULL; + + if (hubp3_construct(hubp2, ctx, inst, + &hubp_regs[inst], &hubp_shift, &hubp_mask)) + return &hubp2->base; + + BREAK_TO_DEBUGGER(); + kfree(hubp2); + return NULL; +} + +static bool dcn301_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), + GFP_KERNEL); + + if (!dwbc30) { + dm_error("DC: failed to create dwbc30!\n"); + return false; + } + + dcn30_dwbc_construct(dwbc30, ctx, + &dwbc30_regs[i], + &dwbc30_shift, + &dwbc30_mask, + i); + + pool->dwbc[i] = &dwbc30->base; + } + return true; +} + +static bool dcn301_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), + GFP_KERNEL); + + if (!mcif_wb30) { + dm_error("DC: failed to create mcif_wb30!\n"); + return false; + } + + dcn30_mmhubbub_construct(mcif_wb30, ctx, + &mcif_wb30_regs[i], + &mcif_wb30_shift, + &mcif_wb30_mask, + i); + + pool->mcif_wb[i] = &mcif_wb30->base; + } + return true; +} + +static struct display_stream_compressor *dcn301_dsc_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_dsc *dsc = + kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); + + if (!dsc) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); + return &dsc->base; +} + + +static void dcn301_destroy_resource_pool(struct resource_pool **pool) +{ + struct dcn301_resource_pool *dcn301_pool = TO_DCN301_RES_POOL(*pool); + + dcn301_destruct(dcn301_pool); + kfree(dcn301_pool); + *pool = NULL; +} + +static struct clock_source *dcn301_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dcn301_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + kfree(clk_src); + BREAK_TO_DEBUGGER(); + return NULL; +} + +static struct dc_cap_funcs cap_funcs = { + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap +}; + + +static bool is_soc_bounding_box_valid(struct dc *dc) +{ + uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; + + if (ASICREV_IS_VANGOGH(hw_internal_rev)) + return true; + + return false; +} + +static bool init_soc_bounding_box(struct dc *dc, + struct dcn301_resource_pool *pool) +{ + struct _vcs_dpi_soc_bounding_box_st *loaded_bb = &dcn3_01_soc; + struct _vcs_dpi_ip_params_st *loaded_ip = &dcn3_01_ip; + + DC_LOGGER_INIT(dc->ctx->logger); + + if (!is_soc_bounding_box_valid(dc)) { + DC_LOG_ERROR("%s: not valid soc bounding box\n", __func__); + return false; + } + + loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator; + loaded_ip->max_num_dpp = pool->base.pipe_count; + DC_FP_START(); + dcn20_patch_bounding_box(dc, loaded_bb); + DC_FP_END(); + + if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { + struct bp_soc_bb_info bb_info = {0}; + + if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { + DC_FP_START(); + dcn301_fpu_init_soc_bounding_box(bb_info); + DC_FP_END(); + } + } + + return true; +} + + +static void set_wm_ranges( + struct pp_smu_funcs *pp_smu, + struct _vcs_dpi_soc_bounding_box_st *loaded_bb) +{ + struct pp_smu_wm_range_sets ranges = {0}; + int i; + + ranges.num_reader_wm_sets = 0; + + if (loaded_bb->num_states == 1) { + ranges.reader_wm_sets[0].wm_inst = 0; + ranges.reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges.reader_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges.reader_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + + ranges.num_reader_wm_sets = 1; + } else if (loaded_bb->num_states > 1) { + for (i = 0; i < 4 && i < loaded_bb->num_states; i++) { + ranges.reader_wm_sets[i].wm_inst = i; + ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + DC_FP_START(); + dcn301_fpu_set_wm_ranges(i, &ranges, loaded_bb); + DC_FP_END(); + ranges.num_reader_wm_sets = i + 1; + } + + ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges.reader_wm_sets[ranges.num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + } + + ranges.num_writer_wm_sets = 1; + + ranges.writer_wm_sets[0].wm_inst = 0; + ranges.writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges.writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + ranges.writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges.writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + + /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ + pp_smu->nv_funcs.set_wm_ranges(&pp_smu->nv_funcs.pp_smu, &ranges); +} + +static void dcn301_calculate_wm_and_dlg( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel) +{ + DC_FP_START(); + dcn301_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel); + DC_FP_END(); +} + +static struct resource_funcs dcn301_res_pool_funcs = { + .destroy = dcn301_destroy_resource_pool, + .link_enc_create = dcn301_link_encoder_create, + .panel_cntl_create = dcn301_panel_cntl_create, + .validate_bandwidth = dcn30_validate_bandwidth, + .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg, + .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, + .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, + .release_pipe = dcn20_release_pipe, + .add_stream_to_ctx = dcn30_add_stream_to_ctx, + .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, + .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, + .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, + .set_mcif_arb_params = dcn30_set_mcif_arb_params, + .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, + .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, + .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, + .update_bw_bounding_box = dcn301_update_bw_bounding_box, + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state +}; + +static bool dcn301_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dcn301_resource_pool *pool) +{ + int i, j; + struct dc_context *ctx = dc->ctx; + struct irq_service_init_data init_data; + uint32_t pipe_fuses = read_pipe_fuses(ctx); + uint32_t num_pipes = 0; + + DC_LOGGER_INIT(dc->ctx->logger); + + ctx->dc_bios->regs = &bios_regs; + + if (dc->ctx->asic_id.chip_id == DEVICE_ID_VGH_1435) + res_cap_dcn301.num_pll = 2; + pool->base.res_cap = &res_cap_dcn301; + + pool->base.funcs = &dcn301_res_pool_funcs; + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->base.pipe_count = pool->base.res_cap->num_timing_generator; + pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; + dc->caps.max_downscale_ratio = 600; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a enabled by default*/ + dc->caps.max_cursor_size = 256; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dmdata_alloc_size = 2048; + dc->caps.max_slave_planes = 2; + dc->caps.max_slave_yuv_planes = 2; + dc->caps.max_slave_rgb_planes = 2; + dc->caps.is_apu = true; + dc->caps.post_blend_color_processing = true; + dc->caps.force_dp_tps4_for_cp2520 = true; + dc->caps.extended_aux_timeout_support = true; + dc->caps.dmcub_support = true; + + /* Color pipeline capabilities */ + dc->caps.color.dpp.dcn_arch = 1; + dc->caps.color.dpp.input_lut_shared = 0; + dc->caps.color.dpp.icsc = 1; + dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr + dc->caps.color.dpp.dgam_rom_caps.srgb = 1; + dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; + dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; + dc->caps.color.dpp.dgam_rom_caps.pq = 1; + dc->caps.color.dpp.dgam_rom_caps.hlg = 1; + dc->caps.color.dpp.post_csc = 1; + dc->caps.color.dpp.gamma_corr = 1; + dc->caps.color.dpp.dgam_rom_for_yuv = 0; + + dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.ogam_ram = 1; + // no OGAM ROM on DCN301 + dc->caps.color.dpp.ogam_rom_caps.srgb = 0; + dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; + dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.ogam_rom_caps.pq = 0; + dc->caps.color.dpp.ogam_rom_caps.hlg = 0; + dc->caps.color.dpp.ocsc = 0; + + dc->caps.color.mpc.gamut_remap = 1; + dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2 + dc->caps.color.mpc.ogam_ram = 1; + dc->caps.color.mpc.ogam_rom_caps.srgb = 0; + dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; + dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.mpc.ogam_rom_caps.pq = 0; + dc->caps.color.mpc.ogam_rom_caps.hlg = 0; + dc->caps.color.mpc.ocsc = 1; + + dc->caps.dp_hdmi21_pcon_support = true; + + /* read VBIOS LTTPR caps */ + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + if (ctx->dc_bios->funcs->get_lttpr_interop) { + enum bp_result bp_query_result; + uint8_t is_vbios_interop_enabled = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled); + dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; + } + + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) + dc->debug = debug_defaults_drv; + + // Init the vm_helper + if (dc->vm_helper) + vm_helper_init(dc->vm_helper, 16); + + /************************************************* + * Create resources * + *************************************************/ + + /* Clock Sources for Pixel Clock*/ + pool->base.clock_sources[DCN301_CLK_SRC_PLL0] = + dcn301_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->base.clock_sources[DCN301_CLK_SRC_PLL1] = + dcn301_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + pool->base.clock_sources[DCN301_CLK_SRC_PLL2] = + dcn301_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs[2], false); + pool->base.clock_sources[DCN301_CLK_SRC_PLL3] = + dcn301_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + + pool->base.clk_src_count = DCN301_CLK_SRC_TOTAL; + + /* todo: not reuse phy_pll registers */ + pool->base.dp_clock_source = + dcn301_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, + &clk_src_regs[0], true); + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* DCCG */ + pool->base.dccg = dccg301_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); + if (pool->base.dccg == NULL) { + dm_error("DC: failed to create dccg!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + init_soc_bounding_box(dc, pool); + + if (!dc->debug.disable_pplib_wm_range && pool->base.pp_smu->nv_funcs.set_wm_ranges) + set_wm_ranges(pool->base.pp_smu, &dcn3_01_soc); + + num_pipes = dcn3_01_ip.max_num_dpp; + + for (i = 0; i < dcn3_01_ip.max_num_dpp; i++) + if (pipe_fuses & 1 << i) + num_pipes--; + dcn3_01_ip.max_num_dpp = num_pipes; + dcn3_01_ip.max_num_otg = num_pipes; + + + dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30); + + /* IRQ */ + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dcn30_create(&init_data); + if (!pool->base.irqs) + goto create_fail; + + /* HUBBUB */ + pool->base.hubbub = dcn301_hubbub_create(ctx); + if (pool->base.hubbub == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create hubbub!\n"); + goto create_fail; + } + + j = 0; + /* HUBPs, DPPs, OPPs and TGs */ + for (i = 0; i < pool->base.pipe_count; i++) { + + /* if pipe is disabled, skip instance of HW pipe, + * i.e, skip ASIC register instance + */ + if ((pipe_fuses & (1 << i)) != 0) { + DC_LOG_DEBUG("%s: fusing pipe %d\n", __func__, i); + continue; + } + + pool->base.hubps[j] = dcn301_hubp_create(ctx, i); + if (pool->base.hubps[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create hubps!\n"); + goto create_fail; + } + + pool->base.dpps[j] = dcn301_dpp_create(ctx, i); + if (pool->base.dpps[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create dpps!\n"); + goto create_fail; + } + + pool->base.opps[j] = dcn301_opp_create(ctx, i); + if (pool->base.opps[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + goto create_fail; + } + + pool->base.timing_generators[j] = dcn301_timing_generator_create(ctx, i); + if (pool->base.timing_generators[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + j++; + } + pool->base.timing_generator_count = j; + pool->base.pipe_count = j; + pool->base.mpcc_count = j; + + /* ABM (or ABMs for NV2x) */ + /* TODO: */ + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + pool->base.multiple_abms[i] = dmub_abm_create(ctx, + &abm_regs[i], + &abm_shift, + &abm_mask); + if (pool->base.multiple_abms[i] == NULL) { + dm_error("DC: failed to create abm for pipe %d!\n", i); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* MPC and DSC */ + pool->base.mpc = dcn301_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut); + if (pool->base.mpc == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mpc!\n"); + goto create_fail; + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + pool->base.dscs[i] = dcn301_dsc_create(ctx, i); + if (pool->base.dscs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create display stream compressor %d!\n", i); + goto create_fail; + } + } + + /* DWB and MMHUBBUB */ + if (!dcn301_dwbc_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create dwbc!\n"); + goto create_fail; + } + + if (!dcn301_mmhubbub_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mcif_wb!\n"); + goto create_fail; + } + + /* AUX and I2C */ + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dcn301_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto create_fail; + } + pool->base.hw_i2cs[i] = dcn301_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create hw i2c!!\n"); + goto create_fail; + } + pool->base.sw_i2cs[i] = NULL; + } + + /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto create_fail; + + /* HW Sequencer and Plane caps */ + dcn301_hw_sequencer_construct(dc); + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->cap_funcs = cap_funcs; + + return true; + +create_fail: + + dcn301_destruct(pool); + + return false; +} + +struct resource_pool *dcn301_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc) +{ + struct dcn301_resource_pool *pool = + kzalloc(sizeof(struct dcn301_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dcn301_resource_construct(init_data->num_virtual_links, dc, pool)) + return &pool->base; + + BREAK_TO_DEBUGGER(); + kfree(pool); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.h new file mode 100644 index 0000000000..ae8672680c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.h @@ -0,0 +1,45 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DCN301_RESOURCE_H_ +#define _DCN301_RESOURCE_H_ + +#include "core_types.h" + +struct dc; +struct resource_pool; +struct _vcs_dpi_display_pipe_params_st; + +extern struct _vcs_dpi_ip_params_st dcn3_01_ip; +extern struct _vcs_dpi_soc_bounding_box_st dcn3_01_soc; + +struct dcn301_resource_pool { + struct resource_pool base; +}; +struct resource_pool *dcn301_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc); + +#endif /* _DCN301_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c new file mode 100644 index 0000000000..5791b5cc28 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c @@ -0,0 +1,1518 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dcn302/dcn302_init.h" +#include "dcn302_resource.h" +#include "dcn302/dcn302_dccg.h" +#include "irq/dcn302/irq_service_dcn302.h" + +#include "dcn30/dcn30_dio_link_encoder.h" +#include "dcn30/dcn30_dio_stream_encoder.h" +#include "dcn30/dcn30_dwb.h" +#include "dcn30/dcn30_dpp.h" +#include "dcn30/dcn30_hubbub.h" +#include "dcn30/dcn30_hubp.h" +#include "dcn30/dcn30_mmhubbub.h" +#include "dcn30/dcn30_mpc.h" +#include "dcn30/dcn30_opp.h" +#include "dcn30/dcn30_optc.h" +#include "dcn30/dcn30_resource.h" + +#include "dcn20/dcn20_dsc.h" +#include "dcn20/dcn20_resource.h" + +#include "dml/dcn30/dcn30_fpu.h" + +#include "dcn10/dcn10_resource.h" + +#include "link.h" +#include "dce/dce_abm.h" +#include "dce/dce_audio.h" +#include "dce/dce_aux.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_hwseq.h" +#include "dce/dce_i2c_hw.h" +#include "dce/dce_panel_cntl.h" +#include "dce/dmub_abm.h" +#include "dce/dmub_psr.h" +#include "clk_mgr.h" + +#include "hw_sequencer_private.h" +#include "reg_helper.h" +#include "resource.h" +#include "vm_helper.h" + +#include "dml/dcn302/dcn302_fpu.h" + +#include "dimgrey_cavefish_ip_offset.h" +#include "dcn/dcn_3_0_2_offset.h" +#include "dcn/dcn_3_0_2_sh_mask.h" +#include "dpcs/dpcs_3_0_0_offset.h" +#include "dpcs/dpcs_3_0_0_sh_mask.h" +#include "nbio/nbio_7_4_offset.h" +#include "amdgpu_socbb.h" + +#define DC_LOGGER \ + dc->ctx->logger +#define DC_LOGGER_INIT(logger) + +static const struct dc_debug_options debug_defaults_drv = { + .disable_dmcu = true, + .force_abm_enable = false, + .timing_trace = false, + .clock_trace = true, + .disable_pplib_clock_request = true, + .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .vsr_support = true, + .performance_trace = false, + .max_downscale_src_width = 7680,/*upto 8K*/ + .disable_pplib_wm_range = false, + .scl_reset_length10 = true, + .sanity_checks = false, + .underflow_assert_delay_us = 0xFFFFFFFF, + .dwb_fi_phase = -1, // -1 = disable, + .dmub_command_table = true, + .use_max_lb = true, + .exit_idle_opt_for_cursor_updates = true, + .enable_legacy_fast_update = false, + .using_dml2 = false, +}; + +static const struct dc_panel_config panel_config_defaults = { + .psr = { + .disable_psr = false, + .disallow_psrsu = false, + .disallow_replay = false, + }, +}; + +enum dcn302_clk_src_array_id { + DCN302_CLK_SRC_PLL0, + DCN302_CLK_SRC_PLL1, + DCN302_CLK_SRC_PLL2, + DCN302_CLK_SRC_PLL3, + DCN302_CLK_SRC_PLL4, + DCN302_CLK_SRC_TOTAL +}; + +static const struct resource_caps res_cap_dcn302 = { + .num_timing_generator = 5, + .num_opp = 5, + .num_video_plane = 5, + .num_audio = 5, + .num_stream_encoder = 5, + .num_dwb = 1, + .num_ddc = 5, + .num_vmid = 16, + .num_mpc_3dlut = 2, + .num_dsc = 5, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCN_UNIVERSAL, + .per_pixel_alpha = true, + .pixel_format_support = { + .argb8888 = true, + .nv12 = true, + .fp16 = true, + .p010 = true, + .ayuv = false, + }, + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 16000, + .fp16 = 16000 + }, + /* 6:1 downscaling ratio: 1000/6 = 166.666 */ + .max_downscale_factor = { + .argb8888 = 167, + .nv12 = 167, + .fp16 = 167 + }, + 16, + 16 +}; + +/* NBIO */ +#define NBIO_BASE_INNER(seg) \ + NBIO_BASE__INST0_SEG ## seg + +#define NBIO_BASE(seg) \ + NBIO_BASE_INNER(seg) + +#define NBIO_SR(reg_name)\ + .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +/* DCN */ +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name + +#define SF(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define SRI(reg_name, block, id)\ + .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + mm ## block ## id ## _ ## reg_name + +#define SRI2(reg_name, block, id)\ + .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name + +#define SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define DCCG_SRII(reg_name, block, id)\ + .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define VUPDATE_SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + mm ## reg_name ## _ ## block ## id + +#define SRII_DWB(reg_name, temp_name, block, id)\ + .reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## temp_name + +#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define SRII_MPC_RMU(reg_name, block, id)\ + .RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +static const struct dcn_hubbub_registers hubbub_reg = { + HUBBUB_REG_LIST_DCN30(0) +}; + +static const struct dcn_hubbub_shift hubbub_shift = { + HUBBUB_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn_hubbub_mask hubbub_mask = { + HUBBUB_MASK_SH_LIST_DCN30(_MASK) +}; + +#define vmid_regs(id)\ + [id] = { DCN20_VMID_REG_LIST(id) } + +static const struct dcn_vmid_registers vmid_regs[] = { + vmid_regs(0), + vmid_regs(1), + vmid_regs(2), + vmid_regs(3), + vmid_regs(4), + vmid_regs(5), + vmid_regs(6), + vmid_regs(7), + vmid_regs(8), + vmid_regs(9), + vmid_regs(10), + vmid_regs(11), + vmid_regs(12), + vmid_regs(13), + vmid_regs(14), + vmid_regs(15) +}; + +static const struct dcn20_vmid_shift vmid_shifts = { + DCN20_VMID_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn20_vmid_mask vmid_masks = { + DCN20_VMID_MASK_SH_LIST(_MASK) +}; + +static struct hubbub *dcn302_hubbub_create(struct dc_context *ctx) +{ + int i; + + struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL); + + if (!hubbub3) + return NULL; + + hubbub3_construct(hubbub3, ctx, &hubbub_reg, &hubbub_shift, &hubbub_mask); + + for (i = 0; i < res_cap_dcn302.num_vmid; i++) { + struct dcn20_vmid *vmid = &hubbub3->vmid[i]; + + vmid->ctx = ctx; + + vmid->regs = &vmid_regs[i]; + vmid->shifts = &vmid_shifts; + vmid->masks = &vmid_masks; + } + + return &hubbub3->base; +} + +#define vpg_regs(id)\ + [id] = { VPG_DCN3_REG_LIST(id) } + +static const struct dcn30_vpg_registers vpg_regs[] = { + vpg_regs(0), + vpg_regs(1), + vpg_regs(2), + vpg_regs(3), + vpg_regs(4), + vpg_regs(5) +}; + +static const struct dcn30_vpg_shift vpg_shift = { + DCN3_VPG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn30_vpg_mask vpg_mask = { + DCN3_VPG_MASK_SH_LIST(_MASK) +}; + +static struct vpg *dcn302_vpg_create(struct dc_context *ctx, uint32_t inst) +{ + struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL); + + if (!vpg3) + return NULL; + + vpg3_construct(vpg3, ctx, inst, &vpg_regs[inst], &vpg_shift, &vpg_mask); + + return &vpg3->base; +} + +#define afmt_regs(id)\ + [id] = { AFMT_DCN3_REG_LIST(id) } + +static const struct dcn30_afmt_registers afmt_regs[] = { + afmt_regs(0), + afmt_regs(1), + afmt_regs(2), + afmt_regs(3), + afmt_regs(4), + afmt_regs(5) +}; + +static const struct dcn30_afmt_shift afmt_shift = { + DCN3_AFMT_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn30_afmt_mask afmt_mask = { + DCN3_AFMT_MASK_SH_LIST(_MASK) +}; + +static struct afmt *dcn302_afmt_create(struct dc_context *ctx, uint32_t inst) +{ + struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL); + + if (!afmt3) + return NULL; + + afmt3_construct(afmt3, ctx, inst, &afmt_regs[inst], &afmt_shift, &afmt_mask); + + return &afmt3->base; +} + +#define audio_regs(id)\ + [id] = { AUD_COMMON_REG_LIST(id) } + +static const struct dce_audio_registers audio_regs[] = { + audio_regs(0), + audio_regs(1), + audio_regs(2), + audio_regs(3), + audio_regs(4), + audio_regs(5), + audio_regs(6) +}; + +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ + AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) + +static const struct dce_audio_shift audio_shift = { + DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +static struct audio *dcn302_create_audio(struct dc_context *ctx, unsigned int inst) +{ + return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); +} + +#define stream_enc_regs(id)\ + [id] = { SE_DCN3_REG_LIST(id) } + +static const struct dcn10_stream_enc_registers stream_enc_regs[] = { + stream_enc_regs(0), + stream_enc_regs(1), + stream_enc_regs(2), + stream_enc_regs(3), + stream_enc_regs(4) +}; + +static const struct dcn10_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn10_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +static struct stream_encoder *dcn302_stream_encoder_create(enum engine_id eng_id, struct dc_context *ctx) +{ + struct dcn10_stream_encoder *enc1; + struct vpg *vpg; + struct afmt *afmt; + int vpg_inst; + int afmt_inst; + + /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ + if (eng_id <= ENGINE_ID_DIGE) { + vpg_inst = eng_id; + afmt_inst = eng_id; + } else + return NULL; + + enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); + vpg = dcn302_vpg_create(ctx, vpg_inst); + afmt = dcn302_afmt_create(ctx, afmt_inst); + + if (!enc1 || !vpg || !afmt) { + kfree(enc1); + kfree(vpg); + kfree(afmt); + return NULL; + } + + dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, &stream_enc_regs[eng_id], + &se_shift, &se_mask); + + return &enc1->base; +} + +#define clk_src_regs(index, pllid)\ + [index] = { CS_COMMON_REG_LIST_DCN3_02(index, pllid) } + +static const struct dce110_clk_src_regs clk_src_regs[] = { + clk_src_regs(0, A), + clk_src_regs(1, B), + clk_src_regs(2, C), + clk_src_regs(3, D), + clk_src_regs(4, E) +}; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) +}; + +static struct clock_source *dcn302_clock_source_create(struct dc_context *ctx, struct dc_bios *bios, + enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dcn3_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + kfree(clk_src); + BREAK_TO_DEBUGGER(); + return NULL; +} + +static const struct dce_hwseq_registers hwseq_reg = { + HWSEQ_DCN302_REG_LIST() +}; + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCN302_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCN302_MASK_SH_LIST(_MASK) +}; + +static struct dce_hwseq *dcn302_hwseq_create(struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + } + return hws; +} + +#define hubp_regs(id)\ + [id] = { HUBP_REG_LIST_DCN30(id) } + +static const struct dcn_hubp2_registers hubp_regs[] = { + hubp_regs(0), + hubp_regs(1), + hubp_regs(2), + hubp_regs(3), + hubp_regs(4) +}; + +static const struct dcn_hubp2_shift hubp_shift = { + HUBP_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn_hubp2_mask hubp_mask = { + HUBP_MASK_SH_LIST_DCN30(_MASK) +}; + +static struct hubp *dcn302_hubp_create(struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_hubp *hubp2 = kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); + + if (!hubp2) + return NULL; + + if (hubp3_construct(hubp2, ctx, inst, &hubp_regs[inst], &hubp_shift, &hubp_mask)) + return &hubp2->base; + + BREAK_TO_DEBUGGER(); + kfree(hubp2); + return NULL; +} + +#define dpp_regs(id)\ + [id] = { DPP_REG_LIST_DCN30(id) } + +static const struct dcn3_dpp_registers dpp_regs[] = { + dpp_regs(0), + dpp_regs(1), + dpp_regs(2), + dpp_regs(3), + dpp_regs(4) +}; + +static const struct dcn3_dpp_shift tf_shift = { + DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) +}; + +static const struct dcn3_dpp_mask tf_mask = { + DPP_REG_LIST_SH_MASK_DCN30(_MASK) +}; + +static struct dpp *dcn302_dpp_create(struct dc_context *ctx, uint32_t inst) +{ + struct dcn3_dpp *dpp = kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); + + if (!dpp) + return NULL; + + if (dpp3_construct(dpp, ctx, inst, &dpp_regs[inst], &tf_shift, &tf_mask)) + return &dpp->base; + + BREAK_TO_DEBUGGER(); + kfree(dpp); + return NULL; +} + +#define opp_regs(id)\ + [id] = { OPP_REG_LIST_DCN30(id) } + +static const struct dcn20_opp_registers opp_regs[] = { + opp_regs(0), + opp_regs(1), + opp_regs(2), + opp_regs(3), + opp_regs(4) +}; + +static const struct dcn20_opp_shift opp_shift = { + OPP_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn20_opp_mask opp_mask = { + OPP_MASK_SH_LIST_DCN20(_MASK) +}; + +static struct output_pixel_processor *dcn302_opp_create(struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_opp *opp = kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); + + if (!opp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dcn20_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); + return &opp->base; +} + +#define optc_regs(id)\ + [id] = { OPTC_COMMON_REG_LIST_DCN3_0(id) } + +static const struct dcn_optc_registers optc_regs[] = { + optc_regs(0), + optc_regs(1), + optc_regs(2), + optc_regs(3), + optc_regs(4) +}; + +static const struct dcn_optc_shift optc_shift = { + OPTC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn_optc_mask optc_mask = { + OPTC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +static struct timing_generator *dcn302_timing_generator_create(struct dc_context *ctx, uint32_t instance) +{ + struct optc *tgn10 = kzalloc(sizeof(struct optc), GFP_KERNEL); + + if (!tgn10) + return NULL; + + tgn10->base.inst = instance; + tgn10->base.ctx = ctx; + + tgn10->tg_regs = &optc_regs[instance]; + tgn10->tg_shift = &optc_shift; + tgn10->tg_mask = &optc_mask; + + dcn30_timing_generator_init(tgn10); + + return &tgn10->base; +} + +static const struct dcn30_mpc_registers mpc_regs = { + MPC_REG_LIST_DCN3_0(0), + MPC_REG_LIST_DCN3_0(1), + MPC_REG_LIST_DCN3_0(2), + MPC_REG_LIST_DCN3_0(3), + MPC_REG_LIST_DCN3_0(4), + MPC_OUT_MUX_REG_LIST_DCN3_0(0), + MPC_OUT_MUX_REG_LIST_DCN3_0(1), + MPC_OUT_MUX_REG_LIST_DCN3_0(2), + MPC_OUT_MUX_REG_LIST_DCN3_0(3), + MPC_OUT_MUX_REG_LIST_DCN3_0(4), + MPC_RMU_GLOBAL_REG_LIST_DCN3AG, + MPC_RMU_REG_LIST_DCN3AG(0), + MPC_RMU_REG_LIST_DCN3AG(1), + MPC_RMU_REG_LIST_DCN3AG(2), + MPC_DWB_MUX_REG_LIST_DCN3_0(0), +}; + +static const struct dcn30_mpc_shift mpc_shift = { + MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_mpc_mask mpc_mask = { + MPC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +static struct mpc *dcn302_mpc_create(struct dc_context *ctx, int num_mpcc, int num_rmu) +{ + struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), GFP_KERNEL); + + if (!mpc30) + return NULL; + + dcn30_mpc_construct(mpc30, ctx, &mpc_regs, &mpc_shift, &mpc_mask, num_mpcc, num_rmu); + + return &mpc30->base; +} + +#define dsc_regsDCN20(id)\ +[id] = { DSC_REG_LIST_DCN20(id) } + +static const struct dcn20_dsc_registers dsc_regs[] = { + dsc_regsDCN20(0), + dsc_regsDCN20(1), + dsc_regsDCN20(2), + dsc_regsDCN20(3), + dsc_regsDCN20(4) +}; + +static const struct dcn20_dsc_shift dsc_shift = { + DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) +}; + +static const struct dcn20_dsc_mask dsc_mask = { + DSC_REG_LIST_SH_MASK_DCN20(_MASK) +}; + +static struct display_stream_compressor *dcn302_dsc_create(struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_dsc *dsc = kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); + + if (!dsc) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); + return &dsc->base; +} + +#define dwbc_regs_dcn3(id)\ +[id] = { DWBC_COMMON_REG_LIST_DCN30(id) } + +static const struct dcn30_dwbc_registers dwbc30_regs[] = { + dwbc_regs_dcn3(0) +}; + +static const struct dcn30_dwbc_shift dwbc30_shift = { + DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_dwbc_mask dwbc30_mask = { + DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +static bool dcn302_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), GFP_KERNEL); + + if (!dwbc30) { + dm_error("DC: failed to create dwbc30!\n"); + return false; + } + + dcn30_dwbc_construct(dwbc30, ctx, &dwbc30_regs[i], &dwbc30_shift, &dwbc30_mask, i); + + pool->dwbc[i] = &dwbc30->base; + } + return true; +} + +#define mcif_wb_regs_dcn3(id)\ +[id] = { MCIF_WB_COMMON_REG_LIST_DCN30(id) } + +static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { + mcif_wb_regs_dcn3(0) +}; + +static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +static bool dcn302_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), GFP_KERNEL); + + if (!mcif_wb30) { + dm_error("DC: failed to create mcif_wb30!\n"); + return false; + } + + dcn30_mmhubbub_construct(mcif_wb30, ctx, &mcif_wb30_regs[i], &mcif_wb30_shift, &mcif_wb30_mask, i); + + pool->mcif_wb[i] = &mcif_wb30->base; + } + return true; +} + +#define aux_engine_regs(id)\ +[id] = {\ + AUX_COMMON_REG_LIST0(id), \ + .AUXN_IMPCAL = 0, \ + .AUXP_IMPCAL = 0, \ + .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ +} + +static const struct dce110_aux_registers aux_engine_regs[] = { + aux_engine_regs(0), + aux_engine_regs(1), + aux_engine_regs(2), + aux_engine_regs(3), + aux_engine_regs(4) +}; + +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +static struct dce_aux *dcn302_aux_engine_create(struct dc_context *ctx, uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + + dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], &aux_mask, &aux_shift, ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} + +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5) +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) +}; + +static struct dce_i2c_hw *dcn302_i2c_hw_create(struct dc_context *ctx, uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .fec_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + +#define link_regs(id, phyid)\ + [id] = {\ + LE_DCN3_REG_LIST(id), \ + UNIPHY_DCN2_REG_LIST(phyid), \ + DPCS_DCN2_REG_LIST(id), \ + SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ + } + +static const struct dcn10_link_enc_registers link_enc_regs[] = { + link_regs(0, A), + link_regs(1, B), + link_regs(2, C), + link_regs(3, D), + link_regs(4, E) +}; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN30(__SHIFT), + DPCS_DCN2_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN30(_MASK), + DPCS_DCN2_MASK_SH_LIST(_MASK) +}; + +#define aux_regs(id)\ + [id] = { DCN2_AUX_REG_LIST(id) } + +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), + aux_regs(2), + aux_regs(3), + aux_regs(4) +}; + +#define hpd_regs(id)\ + [id] = { HPD_REG_LIST(id) } + +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), + hpd_regs(4) +}; + +static struct link_encoder *dcn302_link_encoder_create( + struct dc_context *ctx, + const struct encoder_init_data *enc_init_data) +{ + struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + + if (!enc20) + return NULL; + + dcn30_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, + &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, &le_mask); + + return &enc20->enc10.base; +} + +static const struct dce_panel_cntl_registers panel_cntl_regs[] = { + { DCN_PANEL_CNTL_REG_LIST() } +}; + +static const struct dce_panel_cntl_shift panel_cntl_shift = { + DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_panel_cntl_mask panel_cntl_mask = { + DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) +}; + +static struct panel_cntl *dcn302_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dce_panel_cntl *panel_cntl = kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dce_panel_cntl_construct(panel_cntl, init_data, &panel_cntl_regs[init_data->inst], + &panel_cntl_shift, &panel_cntl_mask); + + return &panel_cntl->base; +} + +static void read_dce_straps(struct dc_context *ctx, struct resource_straps *straps) +{ + generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), + FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); +} + +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = dcn302_create_audio, + .create_stream_encoder = dcn302_stream_encoder_create, + .create_hwseq = dcn302_hwseq_create, +}; + +static bool is_soc_bounding_box_valid(struct dc *dc) +{ + uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; + + if (ASICREV_IS_DIMGREY_CAVEFISH_P(hw_internal_rev)) + return true; + + return false; +} + +static bool init_soc_bounding_box(struct dc *dc, struct resource_pool *pool) +{ + struct _vcs_dpi_soc_bounding_box_st *loaded_bb = &dcn3_02_soc; + struct _vcs_dpi_ip_params_st *loaded_ip = &dcn3_02_ip; + + DC_LOGGER_INIT(dc->ctx->logger); + + if (!is_soc_bounding_box_valid(dc)) { + DC_LOG_ERROR("%s: not valid soc bounding box\n", __func__); + return false; + } + + loaded_ip->max_num_otg = pool->pipe_count; + loaded_ip->max_num_dpp = pool->pipe_count; + loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk; + DC_FP_START(); + dcn20_patch_bounding_box(dc, loaded_bb); + DC_FP_END(); + + if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { + struct bp_soc_bb_info bb_info = { 0 }; + + if (dc->ctx->dc_bios->funcs->get_soc_bb_info( + dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { + + DC_FP_START(); + dcn302_fpu_init_soc_bounding_box(bb_info); + DC_FP_END(); + } + } + + return true; +} + +static void dcn302_resource_destruct(struct resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->stream_enc_count; i++) { + if (pool->stream_enc[i] != NULL) { + if (pool->stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->stream_enc[i]->vpg)); + pool->stream_enc[i]->vpg = NULL; + } + if (pool->stream_enc[i]->afmt != NULL) { + kfree(DCN30_AFMT_FROM_AFMT(pool->stream_enc[i]->afmt)); + pool->stream_enc[i]->afmt = NULL; + } + kfree(DCN10STRENC_FROM_STRENC(pool->stream_enc[i])); + pool->stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->res_cap->num_dsc; i++) { + if (pool->dscs[i] != NULL) + dcn20_dsc_destroy(&pool->dscs[i]); + } + + if (pool->mpc != NULL) { + kfree(TO_DCN20_MPC(pool->mpc)); + pool->mpc = NULL; + } + + if (pool->hubbub != NULL) { + kfree(pool->hubbub); + pool->hubbub = NULL; + } + + for (i = 0; i < pool->pipe_count; i++) { + if (pool->dpps[i] != NULL) { + kfree(TO_DCN20_DPP(pool->dpps[i])); + pool->dpps[i] = NULL; + } + + if (pool->hubps[i] != NULL) { + kfree(TO_DCN20_HUBP(pool->hubps[i])); + pool->hubps[i] = NULL; + } + + if (pool->irqs != NULL) + dal_irq_service_destroy(&pool->irqs); + } + + for (i = 0; i < pool->res_cap->num_ddc; i++) { + if (pool->engines[i] != NULL) + dce110_engine_destroy(&pool->engines[i]); + if (pool->hw_i2cs[i] != NULL) { + kfree(pool->hw_i2cs[i]); + pool->hw_i2cs[i] = NULL; + } + if (pool->sw_i2cs[i] != NULL) { + kfree(pool->sw_i2cs[i]); + pool->sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->res_cap->num_opp; i++) { + if (pool->opps[i] != NULL) + pool->opps[i]->funcs->opp_destroy(&pool->opps[i]); + } + + for (i = 0; i < pool->res_cap->num_timing_generator; i++) { + if (pool->timing_generators[i] != NULL) { + kfree(DCN10TG_FROM_TG(pool->timing_generators[i])); + pool->timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->res_cap->num_dwb; i++) { + if (pool->dwbc[i] != NULL) { + kfree(TO_DCN30_DWBC(pool->dwbc[i])); + pool->dwbc[i] = NULL; + } + if (pool->mcif_wb[i] != NULL) { + kfree(TO_DCN30_MMHUBBUB(pool->mcif_wb[i])); + pool->mcif_wb[i] = NULL; + } + } + + for (i = 0; i < pool->audio_count; i++) { + if (pool->audios[i]) + dce_aud_destroy(&pool->audios[i]); + } + + for (i = 0; i < pool->clk_src_count; i++) { + if (pool->clock_sources[i] != NULL) + dcn20_clock_source_destroy(&pool->clock_sources[i]); + } + + if (pool->dp_clock_source != NULL) + dcn20_clock_source_destroy(&pool->dp_clock_source); + + for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) { + if (pool->mpc_lut[i] != NULL) { + dc_3dlut_func_release(pool->mpc_lut[i]); + pool->mpc_lut[i] = NULL; + } + if (pool->mpc_shaper[i] != NULL) { + dc_transfer_func_release(pool->mpc_shaper[i]); + pool->mpc_shaper[i] = NULL; + } + } + + for (i = 0; i < pool->pipe_count; i++) { + if (pool->multiple_abms[i] != NULL) + dce_abm_destroy(&pool->multiple_abms[i]); + } + + if (pool->psr != NULL) + dmub_psr_destroy(&pool->psr); + + if (pool->dccg != NULL) + dcn_dccg_destroy(&pool->dccg); + + if (pool->oem_device != NULL) { + struct dc *dc = pool->oem_device->ctx->dc; + + dc->link_srv->destroy_ddc_service(&pool->oem_device); + } +} + +static void dcn302_destroy_resource_pool(struct resource_pool **pool) +{ + dcn302_resource_destruct(*pool); + kfree(*pool); + *pool = NULL; +} + +void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) +{ + DC_FP_START(); + dcn302_fpu_update_bw_bounding_box(dc, bw_params); + DC_FP_END(); +} + +static void dcn302_get_panel_config_defaults(struct dc_panel_config *panel_config) +{ + *panel_config = panel_config_defaults; +} + +static struct resource_funcs dcn302_res_pool_funcs = { + .destroy = dcn302_destroy_resource_pool, + .link_enc_create = dcn302_link_encoder_create, + .panel_cntl_create = dcn302_panel_cntl_create, + .validate_bandwidth = dcn30_validate_bandwidth, + .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, + .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, + .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, + .release_pipe = dcn20_release_pipe, + .add_stream_to_ctx = dcn30_add_stream_to_ctx, + .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, + .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, + .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, + .set_mcif_arb_params = dcn30_set_mcif_arb_params, + .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, + .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, + .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, + .update_bw_bounding_box = dcn302_update_bw_bounding_box, + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, + .get_panel_config_defaults = dcn302_get_panel_config_defaults, +}; + +static struct dc_cap_funcs cap_funcs = { + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap +}; + +static const struct bios_registers bios_regs = { + NBIO_SR(BIOS_SCRATCH_3), + NBIO_SR(BIOS_SCRATCH_6) +}; + +static const struct dccg_registers dccg_regs = { + DCCG_REG_LIST_DCN3_02() +}; + +static const struct dccg_shift dccg_shift = { + DCCG_MASK_SH_LIST_DCN3_02(__SHIFT) +}; + +static const struct dccg_mask dccg_mask = { + DCCG_MASK_SH_LIST_DCN3_02(_MASK) +}; + +#define abm_regs(id)\ + [id] = { ABM_DCN302_REG_LIST(id) } + +static const struct dce_abm_registers abm_regs[] = { + abm_regs(0), + abm_regs(1), + abm_regs(2), + abm_regs(3), + abm_regs(4) +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCN30(_MASK) +}; + +static bool dcn302_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct resource_pool *pool) +{ + int i; + struct dc_context *ctx = dc->ctx; + struct irq_service_init_data init_data; + struct ddc_service_init_data ddc_init_data = {0}; + + ctx->dc_bios->regs = &bios_regs; + + pool->res_cap = &res_cap_dcn302; + + pool->funcs = &dcn302_res_pool_funcs; + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->pipe_count = pool->res_cap->num_timing_generator; + pool->mpcc_count = pool->res_cap->num_timing_generator; + dc->caps.max_downscale_ratio = 600; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a applied by derfault*/ + dc->caps.max_cursor_size = 256; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dmdata_alloc_size = 2048; + dc->caps.mall_size_per_mem_channel = 4; + /* total size = mall per channel * num channels * 1024 * 1024 */ + dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576; + dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; + dc->caps.max_slave_planes = 2; + dc->caps.max_slave_yuv_planes = 2; + dc->caps.max_slave_rgb_planes = 2; + dc->caps.post_blend_color_processing = true; + dc->caps.force_dp_tps4_for_cp2520 = true; + dc->caps.extended_aux_timeout_support = true; + dc->caps.dmcub_support = true; + dc->caps.max_v_total = (1 << 15) - 1; + + /* Color pipeline capabilities */ + dc->caps.color.dpp.dcn_arch = 1; + dc->caps.color.dpp.input_lut_shared = 0; + dc->caps.color.dpp.icsc = 1; + dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr + dc->caps.color.dpp.dgam_rom_caps.srgb = 1; + dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; + dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; + dc->caps.color.dpp.dgam_rom_caps.pq = 1; + dc->caps.color.dpp.dgam_rom_caps.hlg = 1; + dc->caps.color.dpp.post_csc = 1; + dc->caps.color.dpp.gamma_corr = 1; + dc->caps.color.dpp.dgam_rom_for_yuv = 0; + + dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.ogam_ram = 1; + // no OGAM ROM on DCN3 + dc->caps.color.dpp.ogam_rom_caps.srgb = 0; + dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; + dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.ogam_rom_caps.pq = 0; + dc->caps.color.dpp.ogam_rom_caps.hlg = 0; + dc->caps.color.dpp.ocsc = 0; + + dc->caps.color.mpc.gamut_remap = 1; + dc->caps.color.mpc.num_3dluts = pool->res_cap->num_mpc_3dlut; //3 + dc->caps.color.mpc.ogam_ram = 1; + dc->caps.color.mpc.ogam_rom_caps.srgb = 0; + dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; + dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.mpc.ogam_rom_caps.pq = 0; + dc->caps.color.mpc.ogam_rom_caps.hlg = 0; + dc->caps.color.mpc.ocsc = 1; + + dc->caps.dp_hdmi21_pcon_support = true; + + /* read VBIOS LTTPR caps */ + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + if (ctx->dc_bios->funcs->get_lttpr_interop) { + enum bp_result bp_query_result; + uint8_t is_vbios_interop_enabled = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, + &is_vbios_interop_enabled); + dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; + } + + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) + dc->debug = debug_defaults_drv; + + // Init the vm_helper + if (dc->vm_helper) + vm_helper_init(dc->vm_helper, 16); + + /************************************************* + * Create resources * + *************************************************/ + + /* Clock Sources for Pixel Clock*/ + pool->clock_sources[DCN302_CLK_SRC_PLL0] = + dcn302_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->clock_sources[DCN302_CLK_SRC_PLL1] = + dcn302_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + pool->clock_sources[DCN302_CLK_SRC_PLL2] = + dcn302_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs[2], false); + pool->clock_sources[DCN302_CLK_SRC_PLL3] = + dcn302_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + pool->clock_sources[DCN302_CLK_SRC_PLL4] = + dcn302_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL4, + &clk_src_regs[4], false); + + pool->clk_src_count = DCN302_CLK_SRC_TOTAL; + + /* todo: not reuse phy_pll registers */ + pool->dp_clock_source = + dcn302_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, + &clk_src_regs[0], true); + + for (i = 0; i < pool->clk_src_count; i++) { + if (pool->clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* DCCG */ + pool->dccg = dccg30_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); + if (pool->dccg == NULL) { + dm_error("DC: failed to create dccg!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* PP Lib and SMU interfaces */ + init_soc_bounding_box(dc, pool); + + /* DML */ + dml_init_instance(&dc->dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30); + + /* IRQ */ + init_data.ctx = dc->ctx; + pool->irqs = dal_irq_service_dcn302_create(&init_data); + if (!pool->irqs) + goto create_fail; + + /* HUBBUB */ + pool->hubbub = dcn302_hubbub_create(ctx); + if (pool->hubbub == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create hubbub!\n"); + goto create_fail; + } + + /* HUBPs, DPPs, OPPs and TGs */ + for (i = 0; i < pool->pipe_count; i++) { + pool->hubps[i] = dcn302_hubp_create(ctx, i); + if (pool->hubps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create hubps!\n"); + goto create_fail; + } + + pool->dpps[i] = dcn302_dpp_create(ctx, i); + if (pool->dpps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create dpps!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->res_cap->num_opp; i++) { + pool->opps[i] = dcn302_opp_create(ctx, i); + if (pool->opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create output pixel processor!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->res_cap->num_timing_generator; i++) { + pool->timing_generators[i] = dcn302_timing_generator_create(ctx, i); + if (pool->timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + } + pool->timing_generator_count = i; + + /* PSR */ + pool->psr = dmub_psr_create(ctx); + if (pool->psr == NULL) { + dm_error("DC: failed to create psr!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* ABMs */ + for (i = 0; i < pool->res_cap->num_timing_generator; i++) { + pool->multiple_abms[i] = dmub_abm_create(ctx, &abm_regs[i], &abm_shift, &abm_mask); + if (pool->multiple_abms[i] == NULL) { + dm_error("DC: failed to create abm for pipe %d!\n", i); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* MPC and DSC */ + pool->mpc = dcn302_mpc_create(ctx, pool->mpcc_count, pool->res_cap->num_mpc_3dlut); + if (pool->mpc == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mpc!\n"); + goto create_fail; + } + + for (i = 0; i < pool->res_cap->num_dsc; i++) { + pool->dscs[i] = dcn302_dsc_create(ctx, i); + if (pool->dscs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create display stream compressor %d!\n", i); + goto create_fail; + } + } + + /* DWB and MMHUBBUB */ + if (!dcn302_dwbc_create(ctx, pool)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create dwbc!\n"); + goto create_fail; + } + + if (!dcn302_mmhubbub_create(ctx, pool)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mcif_wb!\n"); + goto create_fail; + } + + /* AUX and I2C */ + for (i = 0; i < pool->res_cap->num_ddc; i++) { + pool->engines[i] = dcn302_aux_engine_create(ctx, i); + if (pool->engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC:failed to create aux engine!!\n"); + goto create_fail; + } + pool->hw_i2cs[i] = dcn302_i2c_hw_create(ctx, i); + if (pool->hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC:failed to create hw i2c!!\n"); + goto create_fail; + } + pool->sw_i2cs[i] = NULL; + } + + /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ + if (!resource_construct(num_virtual_links, dc, pool, + &res_create_funcs)) + goto create_fail; + + /* HW Sequencer and Plane caps */ + dcn302_hw_sequencer_construct(dc); + + dc->caps.max_planes = pool->pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->cap_funcs = cap_funcs; + + if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { + ddc_init_data.ctx = dc->ctx; + ddc_init_data.link = NULL; + ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; + ddc_init_data.id.enum_id = 0; + ddc_init_data.id.type = OBJECT_TYPE_GENERIC; + pool->oem_device = dc->link_srv->create_ddc_service(&ddc_init_data); + } else { + pool->oem_device = NULL; + } + + return true; + +create_fail: + + dcn302_resource_destruct(pool); + + return false; +} + +struct resource_pool *dcn302_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc) +{ + struct resource_pool *pool = kzalloc(sizeof(struct resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dcn302_resource_construct(init_data->num_virtual_links, dc, pool)) + return pool; + + BREAK_TO_DEBUGGER(); + kfree(pool); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.h new file mode 100644 index 0000000000..9f24e73b92 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.h @@ -0,0 +1,38 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DCN302_RESOURCE_H_ +#define _DCN302_RESOURCE_H_ + +#include "core_types.h" + +extern struct _vcs_dpi_ip_params_st dcn3_02_ip; +extern struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc; + +struct resource_pool *dcn302_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc); + +void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); + +#endif /* _DCN302_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c new file mode 100644 index 0000000000..25cd6236b0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c @@ -0,0 +1,1448 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + */ + +#include "dcn303/dcn303_init.h" +#include "dcn303_resource.h" +#include "dcn303/dcn303_dccg.h" +#include "irq/dcn303/irq_service_dcn303.h" + +#include "dcn30/dcn30_dio_link_encoder.h" +#include "dcn30/dcn30_dio_stream_encoder.h" +#include "dcn30/dcn30_dpp.h" +#include "dcn30/dcn30_dwb.h" +#include "dcn30/dcn30_hubbub.h" +#include "dcn30/dcn30_hubp.h" +#include "dcn30/dcn30_mmhubbub.h" +#include "dcn30/dcn30_mpc.h" +#include "dcn30/dcn30_opp.h" +#include "dcn30/dcn30_optc.h" +#include "dcn30/dcn30_resource.h" + +#include "dcn20/dcn20_dsc.h" +#include "dcn20/dcn20_resource.h" + +#include "dml/dcn30/dcn30_fpu.h" + +#include "dcn10/dcn10_resource.h" + +#include "link.h" + +#include "dce/dce_abm.h" +#include "dce/dce_audio.h" +#include "dce/dce_aux.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_hwseq.h" +#include "dce/dce_i2c_hw.h" +#include "dce/dce_panel_cntl.h" +#include "dce/dmub_abm.h" +#include "dce/dmub_psr.h" +#include "clk_mgr.h" + +#include "hw_sequencer_private.h" +#include "reg_helper.h" +#include "resource.h" +#include "vm_helper.h" + +#include "sienna_cichlid_ip_offset.h" +#include "dcn/dcn_3_0_3_offset.h" +#include "dcn/dcn_3_0_3_sh_mask.h" +#include "dpcs/dpcs_3_0_3_offset.h" +#include "dpcs/dpcs_3_0_3_sh_mask.h" +#include "nbio/nbio_2_3_offset.h" + +#include "dml/dcn303/dcn303_fpu.h" + +#define DC_LOGGER \ + dc->ctx->logger +#define DC_LOGGER_INIT(logger) + + +static const struct dc_debug_options debug_defaults_drv = { + .disable_dmcu = true, + .force_abm_enable = false, + .timing_trace = false, + .clock_trace = true, + .disable_pplib_clock_request = true, + .pipe_split_policy = MPC_SPLIT_AVOID, + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .vsr_support = true, + .performance_trace = false, + .max_downscale_src_width = 7680,/*upto 8K*/ + .disable_pplib_wm_range = false, + .scl_reset_length10 = true, + .sanity_checks = false, + .underflow_assert_delay_us = 0xFFFFFFFF, + .dwb_fi_phase = -1, // -1 = disable, + .dmub_command_table = true, + .exit_idle_opt_for_cursor_updates = true, + .disable_idle_power_optimizations = false, + .using_dml2 = false, +}; + +static const struct dc_panel_config panel_config_defaults = { + .psr = { + .disable_psr = false, + .disallow_psrsu = false, + .disallow_replay = false, + }, +}; + +enum dcn303_clk_src_array_id { + DCN303_CLK_SRC_PLL0, + DCN303_CLK_SRC_PLL1, + DCN303_CLK_SRC_TOTAL +}; + +static const struct resource_caps res_cap_dcn303 = { + .num_timing_generator = 2, + .num_opp = 2, + .num_video_plane = 2, + .num_audio = 2, + .num_stream_encoder = 2, + .num_dwb = 1, + .num_ddc = 2, + .num_vmid = 16, + .num_mpc_3dlut = 1, + .num_dsc = 2, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCN_UNIVERSAL, + .per_pixel_alpha = true, + .pixel_format_support = { + .argb8888 = true, + .nv12 = true, + .fp16 = true, + .p010 = true, + .ayuv = false, + }, + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 16000, + .fp16 = 16000 + }, + .max_downscale_factor = { + .argb8888 = 600, + .nv12 = 600, + .fp16 = 600 + }, + 16, + 16 +}; + +/* NBIO */ +#define NBIO_BASE_INNER(seg) \ + NBIO_BASE__INST0_SEG ## seg + +#define NBIO_BASE(seg) \ + NBIO_BASE_INNER(seg) + +#define NBIO_SR(reg_name)\ + .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +/* DCN */ +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name + +#define SF(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define SRI(reg_name, block, id)\ + .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + mm ## block ## id ## _ ## reg_name + +#define SRI2(reg_name, block, id)\ + .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name + +#define SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define DCCG_SRII(reg_name, block, id)\ + .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define VUPDATE_SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + mm ## reg_name ## _ ## block ## id + +#define SRII_DWB(reg_name, temp_name, block, id)\ + .reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## temp_name + +#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define SRII_MPC_RMU(reg_name, block, id)\ + .RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +static const struct dcn_hubbub_registers hubbub_reg = { + HUBBUB_REG_LIST_DCN30(0) +}; + +static const struct dcn_hubbub_shift hubbub_shift = { + HUBBUB_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn_hubbub_mask hubbub_mask = { + HUBBUB_MASK_SH_LIST_DCN30(_MASK) +}; + +#define vmid_regs(id)\ + [id] = { DCN20_VMID_REG_LIST(id) } + +static const struct dcn_vmid_registers vmid_regs[] = { + vmid_regs(0), + vmid_regs(1), + vmid_regs(2), + vmid_regs(3), + vmid_regs(4), + vmid_regs(5), + vmid_regs(6), + vmid_regs(7), + vmid_regs(8), + vmid_regs(9), + vmid_regs(10), + vmid_regs(11), + vmid_regs(12), + vmid_regs(13), + vmid_regs(14), + vmid_regs(15) +}; + +static const struct dcn20_vmid_shift vmid_shifts = { + DCN20_VMID_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn20_vmid_mask vmid_masks = { + DCN20_VMID_MASK_SH_LIST(_MASK) +}; + +static struct hubbub *dcn303_hubbub_create(struct dc_context *ctx) +{ + int i; + + struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL); + + if (!hubbub3) + return NULL; + + hubbub3_construct(hubbub3, ctx, &hubbub_reg, &hubbub_shift, &hubbub_mask); + + for (i = 0; i < res_cap_dcn303.num_vmid; i++) { + struct dcn20_vmid *vmid = &hubbub3->vmid[i]; + + vmid->ctx = ctx; + + vmid->regs = &vmid_regs[i]; + vmid->shifts = &vmid_shifts; + vmid->masks = &vmid_masks; + } + + return &hubbub3->base; +} + +#define vpg_regs(id)\ + [id] = { VPG_DCN3_REG_LIST(id) } + +static const struct dcn30_vpg_registers vpg_regs[] = { + vpg_regs(0), + vpg_regs(1), + vpg_regs(2) +}; + +static const struct dcn30_vpg_shift vpg_shift = { + DCN3_VPG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn30_vpg_mask vpg_mask = { + DCN3_VPG_MASK_SH_LIST(_MASK) +}; + +static struct vpg *dcn303_vpg_create(struct dc_context *ctx, uint32_t inst) +{ + struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL); + + if (!vpg3) + return NULL; + + vpg3_construct(vpg3, ctx, inst, &vpg_regs[inst], &vpg_shift, &vpg_mask); + + return &vpg3->base; +} + +#define afmt_regs(id)\ + [id] = { AFMT_DCN3_REG_LIST(id) } + +static const struct dcn30_afmt_registers afmt_regs[] = { + afmt_regs(0), + afmt_regs(1), + afmt_regs(2) +}; + +static const struct dcn30_afmt_shift afmt_shift = { + DCN3_AFMT_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn30_afmt_mask afmt_mask = { + DCN3_AFMT_MASK_SH_LIST(_MASK) +}; + +static struct afmt *dcn303_afmt_create(struct dc_context *ctx, uint32_t inst) +{ + struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL); + + if (!afmt3) + return NULL; + + afmt3_construct(afmt3, ctx, inst, &afmt_regs[inst], &afmt_shift, &afmt_mask); + + return &afmt3->base; +} + +#define audio_regs(id)\ + [id] = { AUD_COMMON_REG_LIST(id) } + +static const struct dce_audio_registers audio_regs[] = { + audio_regs(0), + audio_regs(1), + audio_regs(2), + audio_regs(3), + audio_regs(4), + audio_regs(5), + audio_regs(6) +}; + +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ + AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) + +static const struct dce_audio_shift audio_shift = { + DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +static struct audio *dcn303_create_audio(struct dc_context *ctx, unsigned int inst) +{ + return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); +} + +#define stream_enc_regs(id)\ + [id] = { SE_DCN3_REG_LIST(id) } + +static const struct dcn10_stream_enc_registers stream_enc_regs[] = { + stream_enc_regs(0), + stream_enc_regs(1) +}; + +static const struct dcn10_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn10_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +static struct stream_encoder *dcn303_stream_encoder_create(enum engine_id eng_id, struct dc_context *ctx) +{ + struct dcn10_stream_encoder *enc1; + struct vpg *vpg; + struct afmt *afmt; + int vpg_inst; + int afmt_inst; + + /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ + if (eng_id <= ENGINE_ID_DIGB) { + vpg_inst = eng_id; + afmt_inst = eng_id; + } else + return NULL; + + enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); + vpg = dcn303_vpg_create(ctx, vpg_inst); + afmt = dcn303_afmt_create(ctx, afmt_inst); + + if (!enc1 || !vpg || !afmt) { + kfree(enc1); + kfree(vpg); + kfree(afmt); + return NULL; + } + + dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, &stream_enc_regs[eng_id], + &se_shift, &se_mask); + + return &enc1->base; +} + +#define clk_src_regs(index, pllid)\ + [index] = { CS_COMMON_REG_LIST_DCN3_03(index, pllid) } + +static const struct dce110_clk_src_regs clk_src_regs[] = { + clk_src_regs(0, A), + clk_src_regs(1, B) +}; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) +}; + +static struct clock_source *dcn303_clock_source_create(struct dc_context *ctx, struct dc_bios *bios, + enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dcn3_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + kfree(clk_src); + BREAK_TO_DEBUGGER(); + return NULL; +} + +static const struct dce_hwseq_registers hwseq_reg = { + HWSEQ_DCN303_REG_LIST() +}; + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCN303_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCN303_MASK_SH_LIST(_MASK) +}; + +static struct dce_hwseq *dcn303_hwseq_create(struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + } + return hws; +} + +#define hubp_regs(id)\ + [id] = { HUBP_REG_LIST_DCN30(id) } + +static const struct dcn_hubp2_registers hubp_regs[] = { + hubp_regs(0), + hubp_regs(1) +}; + +static const struct dcn_hubp2_shift hubp_shift = { + HUBP_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn_hubp2_mask hubp_mask = { + HUBP_MASK_SH_LIST_DCN30(_MASK) +}; + +static struct hubp *dcn303_hubp_create(struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_hubp *hubp2 = kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); + + if (!hubp2) + return NULL; + + if (hubp3_construct(hubp2, ctx, inst, &hubp_regs[inst], &hubp_shift, &hubp_mask)) + return &hubp2->base; + + BREAK_TO_DEBUGGER(); + kfree(hubp2); + return NULL; +} + +#define dpp_regs(id)\ + [id] = { DPP_REG_LIST_DCN30(id) } + +static const struct dcn3_dpp_registers dpp_regs[] = { + dpp_regs(0), + dpp_regs(1) +}; + +static const struct dcn3_dpp_shift tf_shift = { + DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) +}; + +static const struct dcn3_dpp_mask tf_mask = { + DPP_REG_LIST_SH_MASK_DCN30(_MASK) +}; + +static struct dpp *dcn303_dpp_create(struct dc_context *ctx, uint32_t inst) +{ + struct dcn3_dpp *dpp = kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); + + if (!dpp) + return NULL; + + if (dpp3_construct(dpp, ctx, inst, &dpp_regs[inst], &tf_shift, &tf_mask)) + return &dpp->base; + + BREAK_TO_DEBUGGER(); + kfree(dpp); + return NULL; +} + +#define opp_regs(id)\ + [id] = { OPP_REG_LIST_DCN30(id) } + +static const struct dcn20_opp_registers opp_regs[] = { + opp_regs(0), + opp_regs(1) +}; + +static const struct dcn20_opp_shift opp_shift = { + OPP_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn20_opp_mask opp_mask = { + OPP_MASK_SH_LIST_DCN20(_MASK) +}; + +static struct output_pixel_processor *dcn303_opp_create(struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_opp *opp = kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); + + if (!opp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dcn20_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); + return &opp->base; +} + +#define optc_regs(id)\ + [id] = { OPTC_COMMON_REG_LIST_DCN3_0(id) } + +static const struct dcn_optc_registers optc_regs[] = { + optc_regs(0), + optc_regs(1) +}; + +static const struct dcn_optc_shift optc_shift = { + OPTC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn_optc_mask optc_mask = { + OPTC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +static struct timing_generator *dcn303_timing_generator_create(struct dc_context *ctx, uint32_t instance) +{ + struct optc *tgn10 = kzalloc(sizeof(struct optc), GFP_KERNEL); + + if (!tgn10) + return NULL; + + tgn10->base.inst = instance; + tgn10->base.ctx = ctx; + + tgn10->tg_regs = &optc_regs[instance]; + tgn10->tg_shift = &optc_shift; + tgn10->tg_mask = &optc_mask; + + dcn30_timing_generator_init(tgn10); + + return &tgn10->base; +} + +static const struct dcn30_mpc_registers mpc_regs = { + MPC_REG_LIST_DCN3_0(0), + MPC_REG_LIST_DCN3_0(1), + MPC_OUT_MUX_REG_LIST_DCN3_0(0), + MPC_OUT_MUX_REG_LIST_DCN3_0(1), + MPC_RMU_GLOBAL_REG_LIST_DCN3AG, + MPC_RMU_REG_LIST_DCN3AG(0), + MPC_DWB_MUX_REG_LIST_DCN3_0(0), +}; + +static const struct dcn30_mpc_shift mpc_shift = { + MPC_COMMON_MASK_SH_LIST_DCN303(__SHIFT) +}; + +static const struct dcn30_mpc_mask mpc_mask = { + MPC_COMMON_MASK_SH_LIST_DCN303(_MASK) +}; + +static struct mpc *dcn303_mpc_create(struct dc_context *ctx, int num_mpcc, int num_rmu) +{ + struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), GFP_KERNEL); + + if (!mpc30) + return NULL; + + dcn30_mpc_construct(mpc30, ctx, &mpc_regs, &mpc_shift, &mpc_mask, num_mpcc, num_rmu); + + return &mpc30->base; +} + +#define dsc_regsDCN20(id)\ +[id] = { DSC_REG_LIST_DCN20(id) } + +static const struct dcn20_dsc_registers dsc_regs[] = { + dsc_regsDCN20(0), + dsc_regsDCN20(1) +}; + +static const struct dcn20_dsc_shift dsc_shift = { + DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) +}; + +static const struct dcn20_dsc_mask dsc_mask = { + DSC_REG_LIST_SH_MASK_DCN20(_MASK) +}; + +static struct display_stream_compressor *dcn303_dsc_create(struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_dsc *dsc = kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); + + if (!dsc) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); + return &dsc->base; +} + +#define dwbc_regs_dcn3(id)\ +[id] = { DWBC_COMMON_REG_LIST_DCN30(id) } + +static const struct dcn30_dwbc_registers dwbc30_regs[] = { + dwbc_regs_dcn3(0) +}; + +static const struct dcn30_dwbc_shift dwbc30_shift = { + DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_dwbc_mask dwbc30_mask = { + DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +static bool dcn303_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), GFP_KERNEL); + + if (!dwbc30) { + dm_error("DC: failed to create dwbc30!\n"); + return false; + } + + dcn30_dwbc_construct(dwbc30, ctx, &dwbc30_regs[i], &dwbc30_shift, &dwbc30_mask, i); + + pool->dwbc[i] = &dwbc30->base; + } + return true; +} + +#define mcif_wb_regs_dcn3(id)\ +[id] = { MCIF_WB_COMMON_REG_LIST_DCN30(id) } + +static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { + mcif_wb_regs_dcn3(0) +}; + +static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +static bool dcn303_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), GFP_KERNEL); + + if (!mcif_wb30) { + dm_error("DC: failed to create mcif_wb30!\n"); + return false; + } + + dcn30_mmhubbub_construct(mcif_wb30, ctx, &mcif_wb30_regs[i], &mcif_wb30_shift, &mcif_wb30_mask, i); + + pool->mcif_wb[i] = &mcif_wb30->base; + } + return true; +} + +#define aux_engine_regs(id)\ +[id] = {\ + AUX_COMMON_REG_LIST0(id), \ + .AUXN_IMPCAL = 0, \ + .AUXP_IMPCAL = 0, \ + .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ +} + +static const struct dce110_aux_registers aux_engine_regs[] = { + aux_engine_regs(0), + aux_engine_regs(1) +}; + +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +static struct dce_aux *dcn303_aux_engine_create(struct dc_context *ctx, uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + + dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], &aux_mask, &aux_shift, ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} + +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2) +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) +}; + +static struct dce_i2c_hw *dcn303_i2c_hw_create(struct dc_context *ctx, uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .fec_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + +#define link_regs(id, phyid)\ + [id] = {\ + LE_DCN3_REG_LIST(id), \ + UNIPHY_DCN2_REG_LIST(phyid), \ + SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ + } + +static const struct dcn10_link_enc_registers link_enc_regs[] = { + link_regs(0, A), + link_regs(1, B) +}; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN30(__SHIFT), + DPCS_DCN2_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN30(_MASK), + DPCS_DCN2_MASK_SH_LIST(_MASK) +}; + +#define aux_regs(id)\ + [id] = { DCN2_AUX_REG_LIST(id) } + +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1) +}; + +#define hpd_regs(id)\ + [id] = { HPD_REG_LIST(id) } + +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1) +}; + +static struct link_encoder *dcn303_link_encoder_create( + struct dc_context *ctx, + const struct encoder_init_data *enc_init_data) +{ + struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + + if (!enc20) + return NULL; + + dcn30_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, + &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, &le_mask); + + return &enc20->enc10.base; +} + +static const struct dce_panel_cntl_registers panel_cntl_regs[] = { + { DCN_PANEL_CNTL_REG_LIST() } +}; + +static const struct dce_panel_cntl_shift panel_cntl_shift = { + DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_panel_cntl_mask panel_cntl_mask = { + DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) +}; + +static struct panel_cntl *dcn303_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dce_panel_cntl *panel_cntl = kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dce_panel_cntl_construct(panel_cntl, init_data, &panel_cntl_regs[init_data->inst], + &panel_cntl_shift, &panel_cntl_mask); + + return &panel_cntl->base; +} + +static void read_dce_straps(struct dc_context *ctx, struct resource_straps *straps) +{ + generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), + FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); +} + +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = dcn303_create_audio, + .create_stream_encoder = dcn303_stream_encoder_create, + .create_hwseq = dcn303_hwseq_create, +}; + +static bool is_soc_bounding_box_valid(struct dc *dc) +{ + uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; + + if (ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev)) + return true; + + return false; +} + +static bool init_soc_bounding_box(struct dc *dc, struct resource_pool *pool) +{ + struct _vcs_dpi_soc_bounding_box_st *loaded_bb = &dcn3_03_soc; + struct _vcs_dpi_ip_params_st *loaded_ip = &dcn3_03_ip; + + DC_LOGGER_INIT(dc->ctx->logger); + + if (!is_soc_bounding_box_valid(dc)) { + DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__); + return false; + } + + loaded_ip->max_num_otg = pool->pipe_count; + loaded_ip->max_num_dpp = pool->pipe_count; + loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk; + DC_FP_START(); + dcn20_patch_bounding_box(dc, loaded_bb); + DC_FP_END(); + + if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { + struct bp_soc_bb_info bb_info = { 0 }; + + if (dc->ctx->dc_bios->funcs->get_soc_bb_info( + dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { + DC_FP_START(); + dcn303_fpu_init_soc_bounding_box(bb_info); + DC_FP_END(); + } + } + + return true; +} + +static void dcn303_resource_destruct(struct resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->stream_enc_count; i++) { + if (pool->stream_enc[i] != NULL) { + if (pool->stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->stream_enc[i]->vpg)); + pool->stream_enc[i]->vpg = NULL; + } + if (pool->stream_enc[i]->afmt != NULL) { + kfree(DCN30_AFMT_FROM_AFMT(pool->stream_enc[i]->afmt)); + pool->stream_enc[i]->afmt = NULL; + } + kfree(DCN10STRENC_FROM_STRENC(pool->stream_enc[i])); + pool->stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->res_cap->num_dsc; i++) { + if (pool->dscs[i] != NULL) + dcn20_dsc_destroy(&pool->dscs[i]); + } + + if (pool->mpc != NULL) { + kfree(TO_DCN20_MPC(pool->mpc)); + pool->mpc = NULL; + } + + if (pool->hubbub != NULL) { + kfree(pool->hubbub); + pool->hubbub = NULL; + } + + for (i = 0; i < pool->pipe_count; i++) { + if (pool->dpps[i] != NULL) { + kfree(TO_DCN20_DPP(pool->dpps[i])); + pool->dpps[i] = NULL; + } + + if (pool->hubps[i] != NULL) { + kfree(TO_DCN20_HUBP(pool->hubps[i])); + pool->hubps[i] = NULL; + } + + if (pool->irqs != NULL) + dal_irq_service_destroy(&pool->irqs); + } + + for (i = 0; i < pool->res_cap->num_ddc; i++) { + if (pool->engines[i] != NULL) + dce110_engine_destroy(&pool->engines[i]); + if (pool->hw_i2cs[i] != NULL) { + kfree(pool->hw_i2cs[i]); + pool->hw_i2cs[i] = NULL; + } + if (pool->sw_i2cs[i] != NULL) { + kfree(pool->sw_i2cs[i]); + pool->sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->res_cap->num_opp; i++) { + if (pool->opps[i] != NULL) + pool->opps[i]->funcs->opp_destroy(&pool->opps[i]); + } + + for (i = 0; i < pool->res_cap->num_timing_generator; i++) { + if (pool->timing_generators[i] != NULL) { + kfree(DCN10TG_FROM_TG(pool->timing_generators[i])); + pool->timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->res_cap->num_dwb; i++) { + if (pool->dwbc[i] != NULL) { + kfree(TO_DCN30_DWBC(pool->dwbc[i])); + pool->dwbc[i] = NULL; + } + if (pool->mcif_wb[i] != NULL) { + kfree(TO_DCN30_MMHUBBUB(pool->mcif_wb[i])); + pool->mcif_wb[i] = NULL; + } + } + + for (i = 0; i < pool->audio_count; i++) { + if (pool->audios[i]) + dce_aud_destroy(&pool->audios[i]); + } + + for (i = 0; i < pool->clk_src_count; i++) { + if (pool->clock_sources[i] != NULL) + dcn20_clock_source_destroy(&pool->clock_sources[i]); + } + + if (pool->dp_clock_source != NULL) + dcn20_clock_source_destroy(&pool->dp_clock_source); + + for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) { + if (pool->mpc_lut[i] != NULL) { + dc_3dlut_func_release(pool->mpc_lut[i]); + pool->mpc_lut[i] = NULL; + } + if (pool->mpc_shaper[i] != NULL) { + dc_transfer_func_release(pool->mpc_shaper[i]); + pool->mpc_shaper[i] = NULL; + } + } + + for (i = 0; i < pool->pipe_count; i++) { + if (pool->multiple_abms[i] != NULL) + dce_abm_destroy(&pool->multiple_abms[i]); + } + + if (pool->psr != NULL) + dmub_psr_destroy(&pool->psr); + + if (pool->dccg != NULL) + dcn_dccg_destroy(&pool->dccg); + + if (pool->oem_device != NULL) { + struct dc *dc = pool->oem_device->ctx->dc; + + dc->link_srv->destroy_ddc_service(&pool->oem_device); + } +} + +static void dcn303_destroy_resource_pool(struct resource_pool **pool) +{ + dcn303_resource_destruct(*pool); + kfree(*pool); + *pool = NULL; +} + +static void dcn303_get_panel_config_defaults(struct dc_panel_config *panel_config) +{ + *panel_config = panel_config_defaults; +} + +void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) +{ + DC_FP_START(); + dcn303_fpu_update_bw_bounding_box(dc, bw_params); + DC_FP_END(); +} + +static struct resource_funcs dcn303_res_pool_funcs = { + .destroy = dcn303_destroy_resource_pool, + .link_enc_create = dcn303_link_encoder_create, + .panel_cntl_create = dcn303_panel_cntl_create, + .validate_bandwidth = dcn30_validate_bandwidth, + .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, + .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, + .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, + .release_pipe = dcn20_release_pipe, + .add_stream_to_ctx = dcn30_add_stream_to_ctx, + .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, + .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, + .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, + .set_mcif_arb_params = dcn30_set_mcif_arb_params, + .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, + .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, + .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, + .update_bw_bounding_box = dcn303_update_bw_bounding_box, + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, + .get_panel_config_defaults = dcn303_get_panel_config_defaults, +}; + +static struct dc_cap_funcs cap_funcs = { + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap +}; + +static const struct bios_registers bios_regs = { + NBIO_SR(BIOS_SCRATCH_3), + NBIO_SR(BIOS_SCRATCH_6) +}; + +static const struct dccg_registers dccg_regs = { + DCCG_REG_LIST_DCN3_03() +}; + +static const struct dccg_shift dccg_shift = { + DCCG_MASK_SH_LIST_DCN3_03(__SHIFT) +}; + +static const struct dccg_mask dccg_mask = { + DCCG_MASK_SH_LIST_DCN3_03(_MASK) +}; + +#define abm_regs(id)\ + [id] = { ABM_DCN302_REG_LIST(id) } + +static const struct dce_abm_registers abm_regs[] = { + abm_regs(0), + abm_regs(1) +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCN30(_MASK) +}; + +static bool dcn303_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct resource_pool *pool) +{ + int i; + struct dc_context *ctx = dc->ctx; + struct irq_service_init_data init_data; + struct ddc_service_init_data ddc_init_data; + + ctx->dc_bios->regs = &bios_regs; + + pool->res_cap = &res_cap_dcn303; + + pool->funcs = &dcn303_res_pool_funcs; + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->pipe_count = pool->res_cap->num_timing_generator; + pool->mpcc_count = pool->res_cap->num_timing_generator; + dc->caps.max_downscale_ratio = 600; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a applied by derfault*/ + dc->caps.max_cursor_size = 256; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dmdata_alloc_size = 2048; + dc->caps.mall_size_per_mem_channel = 4; + /* total size = mall per channel * num channels * 1024 * 1024 */ + dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * + dc->ctx->dc_bios->vram_info.num_chans * + 1024 * 1024; + dc->caps.cursor_cache_size = + dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; + dc->caps.max_slave_planes = 1; + dc->caps.post_blend_color_processing = true; + dc->caps.force_dp_tps4_for_cp2520 = true; + dc->caps.extended_aux_timeout_support = true; + dc->caps.dmcub_support = true; + dc->caps.max_v_total = (1 << 15) - 1; + + /* Color pipeline capabilities */ + dc->caps.color.dpp.dcn_arch = 1; + dc->caps.color.dpp.input_lut_shared = 0; + dc->caps.color.dpp.icsc = 1; + dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr + dc->caps.color.dpp.dgam_rom_caps.srgb = 1; + dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; + dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; + dc->caps.color.dpp.dgam_rom_caps.pq = 1; + dc->caps.color.dpp.dgam_rom_caps.hlg = 1; + dc->caps.color.dpp.post_csc = 1; + dc->caps.color.dpp.gamma_corr = 1; + dc->caps.color.dpp.dgam_rom_for_yuv = 0; + + dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.ogam_ram = 1; + // no OGAM ROM on DCN3 + dc->caps.color.dpp.ogam_rom_caps.srgb = 0; + dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; + dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.ogam_rom_caps.pq = 0; + dc->caps.color.dpp.ogam_rom_caps.hlg = 0; + dc->caps.color.dpp.ocsc = 0; + + dc->caps.color.mpc.gamut_remap = 1; + dc->caps.color.mpc.num_3dluts = pool->res_cap->num_mpc_3dlut; //3 + dc->caps.color.mpc.ogam_ram = 1; + dc->caps.color.mpc.ogam_rom_caps.srgb = 0; + dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; + dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.mpc.ogam_rom_caps.pq = 0; + dc->caps.color.mpc.ogam_rom_caps.hlg = 0; + dc->caps.color.mpc.ocsc = 1; + + dc->caps.dp_hdmi21_pcon_support = true; + + dc->config.dc_mode_clk_limit_support = true; + /* read VBIOS LTTPR caps */ + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + if (ctx->dc_bios->funcs->get_lttpr_interop) { + enum bp_result bp_query_result; + uint8_t is_vbios_interop_enabled = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled); + dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; + } + + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) + dc->debug = debug_defaults_drv; + + // Init the vm_helper + if (dc->vm_helper) + vm_helper_init(dc->vm_helper, 16); + + /************************************************* + * Create resources * + *************************************************/ + + /* Clock Sources for Pixel Clock*/ + pool->clock_sources[DCN303_CLK_SRC_PLL0] = + dcn303_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->clock_sources[DCN303_CLK_SRC_PLL1] = + dcn303_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + + pool->clk_src_count = DCN303_CLK_SRC_TOTAL; + + /* todo: not reuse phy_pll registers */ + pool->dp_clock_source = + dcn303_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, + &clk_src_regs[0], true); + + for (i = 0; i < pool->clk_src_count; i++) { + if (pool->clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* DCCG */ + pool->dccg = dccg30_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); + if (pool->dccg == NULL) { + dm_error("DC: failed to create dccg!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* PP Lib and SMU interfaces */ + init_soc_bounding_box(dc, pool); + + /* DML */ + dml_init_instance(&dc->dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30); + + /* IRQ */ + init_data.ctx = dc->ctx; + pool->irqs = dal_irq_service_dcn303_create(&init_data); + if (!pool->irqs) + goto create_fail; + + /* HUBBUB */ + pool->hubbub = dcn303_hubbub_create(ctx); + if (pool->hubbub == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create hubbub!\n"); + goto create_fail; + } + + /* HUBPs, DPPs, OPPs and TGs */ + for (i = 0; i < pool->pipe_count; i++) { + pool->hubps[i] = dcn303_hubp_create(ctx, i); + if (pool->hubps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create hubps!\n"); + goto create_fail; + } + + pool->dpps[i] = dcn303_dpp_create(ctx, i); + if (pool->dpps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create dpps!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->res_cap->num_opp; i++) { + pool->opps[i] = dcn303_opp_create(ctx, i); + if (pool->opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create output pixel processor!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->res_cap->num_timing_generator; i++) { + pool->timing_generators[i] = dcn303_timing_generator_create(ctx, i); + if (pool->timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + } + pool->timing_generator_count = i; + + /* PSR */ + pool->psr = dmub_psr_create(ctx); + if (pool->psr == NULL) { + dm_error("DC: failed to create psr!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* ABM */ + for (i = 0; i < pool->res_cap->num_timing_generator; i++) { + pool->multiple_abms[i] = dmub_abm_create(ctx, &abm_regs[i], &abm_shift, &abm_mask); + if (pool->multiple_abms[i] == NULL) { + dm_error("DC: failed to create abm for pipe %d!\n", i); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* MPC and DSC */ + pool->mpc = dcn303_mpc_create(ctx, pool->mpcc_count, pool->res_cap->num_mpc_3dlut); + if (pool->mpc == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mpc!\n"); + goto create_fail; + } + + for (i = 0; i < pool->res_cap->num_dsc; i++) { + pool->dscs[i] = dcn303_dsc_create(ctx, i); + if (pool->dscs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create display stream compressor %d!\n", i); + goto create_fail; + } + } + + /* DWB and MMHUBBUB */ + if (!dcn303_dwbc_create(ctx, pool)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create dwbc!\n"); + goto create_fail; + } + + if (!dcn303_mmhubbub_create(ctx, pool)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mcif_wb!\n"); + goto create_fail; + } + + /* AUX and I2C */ + for (i = 0; i < pool->res_cap->num_ddc; i++) { + pool->engines[i] = dcn303_aux_engine_create(ctx, i); + if (pool->engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC:failed to create aux engine!!\n"); + goto create_fail; + } + pool->hw_i2cs[i] = dcn303_i2c_hw_create(ctx, i); + if (pool->hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC:failed to create hw i2c!!\n"); + goto create_fail; + } + pool->sw_i2cs[i] = NULL; + } + + /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ + if (!resource_construct(num_virtual_links, dc, pool, + &res_create_funcs)) + goto create_fail; + + /* HW Sequencer and Plane caps */ + dcn303_hw_sequencer_construct(dc); + + dc->caps.max_planes = pool->pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->cap_funcs = cap_funcs; + + if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { + ddc_init_data.ctx = dc->ctx; + ddc_init_data.link = NULL; + ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; + ddc_init_data.id.enum_id = 0; + ddc_init_data.id.type = OBJECT_TYPE_GENERIC; + pool->oem_device = dc->link_srv->create_ddc_service(&ddc_init_data); + } else { + pool->oem_device = NULL; + } + + return true; + +create_fail: + + dcn303_resource_destruct(pool); + + return false; +} + +struct resource_pool *dcn303_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc) +{ + struct resource_pool *pool = kzalloc(sizeof(struct resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dcn303_resource_construct(init_data->num_virtual_links, dc, pool)) + return pool; + + BREAK_TO_DEBUGGER(); + kfree(pool); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.h new file mode 100644 index 0000000000..37cf152582 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.h @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + */ + +#ifndef _DCN303_RESOURCE_H_ +#define _DCN303_RESOURCE_H_ + +#include "core_types.h" + +extern struct _vcs_dpi_ip_params_st dcn3_03_ip; +extern struct _vcs_dpi_soc_bounding_box_st dcn3_03_soc; + +struct resource_pool *dcn303_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc); + +void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); + +#endif /* _DCN303_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c new file mode 100644 index 0000000000..31035fc3d8 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -0,0 +1,2218 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "dm_services.h" +#include "dc.h" + +#include "dcn31/dcn31_init.h" + +#include "resource.h" +#include "include/irq_service_interface.h" +#include "dcn31_resource.h" + +#include "dcn20/dcn20_resource.h" +#include "dcn30/dcn30_resource.h" + +#include "dml/dcn30/dcn30_fpu.h" + +#include "dcn10/dcn10_ipp.h" +#include "dcn30/dcn30_hubbub.h" +#include "dcn31/dcn31_hubbub.h" +#include "dcn30/dcn30_mpc.h" +#include "dcn31/dcn31_hubp.h" +#include "irq/dcn31/irq_service_dcn31.h" +#include "dcn30/dcn30_dpp.h" +#include "dcn31/dcn31_optc.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn30/dcn30_hwseq.h" +#include "dce110/dce110_hwseq.h" +#include "dcn30/dcn30_opp.h" +#include "dcn20/dcn20_dsc.h" +#include "dcn30/dcn30_vpg.h" +#include "dcn30/dcn30_afmt.h" +#include "dcn30/dcn30_dio_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_link_encoder.h" +#include "dcn31/dcn31_apg.h" +#include "dcn31/dcn31_dio_link_encoder.h" +#include "dcn31/dcn31_vpg.h" +#include "dcn31/dcn31_afmt.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_audio.h" +#include "dce/dce_hwseq.h" +#include "clk_mgr.h" +#include "virtual/virtual_stream_encoder.h" +#include "dce110/dce110_resource.h" +#include "dml/display_mode_vba.h" +#include "dml/dcn31/dcn31_fpu.h" +#include "dcn31/dcn31_dccg.h" +#include "dcn10/dcn10_resource.h" +#include "dcn31/dcn31_panel_cntl.h" + +#include "dcn30/dcn30_dwb.h" +#include "dcn30/dcn30_mmhubbub.h" + +// TODO: change include headers /amd/include/asic_reg after upstream +#include "yellow_carp_offset.h" +#include "dcn/dcn_3_1_2_offset.h" +#include "dcn/dcn_3_1_2_sh_mask.h" +#include "nbio/nbio_7_2_0_offset.h" +#include "dpcs/dpcs_4_2_0_offset.h" +#include "dpcs/dpcs_4_2_0_sh_mask.h" +#include "mmhub/mmhub_2_3_0_offset.h" +#include "mmhub/mmhub_2_3_0_sh_mask.h" + + +#define regDCHUBBUB_DEBUG_CTRL_0 0x04d6 +#define regDCHUBBUB_DEBUG_CTRL_0_BASE_IDX 2 +#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10 +#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x01FF0000L + +#include "reg_helper.h" +#include "dce/dmub_abm.h" +#include "dce/dmub_psr.h" +#include "dce/dce_aux.h" +#include "dce/dce_i2c.h" +#include "dce/dmub_replay.h" + +#include "dml/dcn30/display_mode_vba_30.h" +#include "vm_helper.h" +#include "dcn20/dcn20_vmid.h" + +#include "link_enc_cfg.h" + +#define DC_LOGGER \ + dc->ctx->logger +#define DC_LOGGER_INIT(logger) + +enum dcn31_clk_src_array_id { + DCN31_CLK_SRC_PLL0, + DCN31_CLK_SRC_PLL1, + DCN31_CLK_SRC_PLL2, + DCN31_CLK_SRC_PLL3, + DCN31_CLK_SRC_PLL4, + DCN30_CLK_SRC_TOTAL +}; + +/* begin ********************* + * macros to expend register list macro defined in HW object header file + */ + +/* DCN */ +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define SRI(reg_name, block, id)\ + .reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRI2(reg_name, block, id)\ + .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define SRIR(var_name, reg_name, block, id)\ + .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII(reg_name, block, id)\ + .reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_MPC_RMU(reg_name, block, id)\ + .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_DWB(reg_name, temp_name, block, id)\ + .reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## temp_name + +#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define DCCG_SRII(reg_name, block, id)\ + .block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define VUPDATE_SRII(reg_name, block, id)\ + .reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + reg ## reg_name ## _ ## block ## id + +/* NBIO */ +#define NBIO_BASE_INNER(seg) \ + NBIO_BASE__INST0_SEG ## seg + +#define NBIO_BASE(seg) \ + NBIO_BASE_INNER(seg) + +#define NBIO_SR(reg_name)\ + .reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \ + regBIF_BX1_ ## reg_name + +/* MMHUB */ +#define MMHUB_BASE_INNER(seg) \ + MMHUB_BASE__INST0_SEG ## seg + +#define MMHUB_BASE(seg) \ + MMHUB_BASE_INNER(seg) + +#define MMHUB_SR(reg_name)\ + .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +/* CLOCK */ +#define CLK_BASE_INNER(seg) \ + CLK_BASE__INST0_SEG ## seg + +#define CLK_BASE(seg) \ + CLK_BASE_INNER(seg) + +#define CLK_SRI(reg_name, block, inst)\ + .reg_name = CLK_BASE(reg ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## _ ## inst ## _ ## reg_name + + +static const struct bios_registers bios_regs = { + NBIO_SR(BIOS_SCRATCH_3), + NBIO_SR(BIOS_SCRATCH_6) +}; + +#define clk_src_regs(index, pllid)\ +[index] = {\ + CS_COMMON_REG_LIST_DCN3_0(index, pllid),\ +} + +static const struct dce110_clk_src_regs clk_src_regs[] = { + clk_src_regs(0, A), + clk_src_regs(1, B), + clk_src_regs(2, C), + clk_src_regs(3, D), + clk_src_regs(4, E) +}; +/*pll_id being rempped in dmub, in driver it is logical instance*/ +static const struct dce110_clk_src_regs clk_src_regs_b0[] = { + clk_src_regs(0, A), + clk_src_regs(1, B), + clk_src_regs(2, F), + clk_src_regs(3, G), + clk_src_regs(4, E) +}; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) +}; + +#define abm_regs(id)\ +[id] = {\ + ABM_DCN302_REG_LIST(id)\ +} + +static const struct dce_abm_registers abm_regs[] = { + abm_regs(0), + abm_regs(1), + abm_regs(2), + abm_regs(3), +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCN30(_MASK) +}; + +#define audio_regs(id)\ +[id] = {\ + AUD_COMMON_REG_LIST(id)\ +} + +static const struct dce_audio_registers audio_regs[] = { + audio_regs(0), + audio_regs(1), + audio_regs(2), + audio_regs(3), + audio_regs(4), + audio_regs(5), + audio_regs(6) +}; + +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ + AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) + +static const struct dce_audio_shift audio_shift = { + DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +#define vpg_regs(id)\ +[id] = {\ + VPG_DCN31_REG_LIST(id)\ +} + +static const struct dcn31_vpg_registers vpg_regs[] = { + vpg_regs(0), + vpg_regs(1), + vpg_regs(2), + vpg_regs(3), + vpg_regs(4), + vpg_regs(5), + vpg_regs(6), + vpg_regs(7), + vpg_regs(8), + vpg_regs(9), +}; + +static const struct dcn31_vpg_shift vpg_shift = { + DCN31_VPG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_vpg_mask vpg_mask = { + DCN31_VPG_MASK_SH_LIST(_MASK) +}; + +#define afmt_regs(id)\ +[id] = {\ + AFMT_DCN31_REG_LIST(id)\ +} + +static const struct dcn31_afmt_registers afmt_regs[] = { + afmt_regs(0), + afmt_regs(1), + afmt_regs(2), + afmt_regs(3), + afmt_regs(4), + afmt_regs(5) +}; + +static const struct dcn31_afmt_shift afmt_shift = { + DCN31_AFMT_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_afmt_mask afmt_mask = { + DCN31_AFMT_MASK_SH_LIST(_MASK) +}; + +#define apg_regs(id)\ +[id] = {\ + APG_DCN31_REG_LIST(id)\ +} + +static const struct dcn31_apg_registers apg_regs[] = { + apg_regs(0), + apg_regs(1), + apg_regs(2), + apg_regs(3) +}; + +static const struct dcn31_apg_shift apg_shift = { + DCN31_APG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_apg_mask apg_mask = { + DCN31_APG_MASK_SH_LIST(_MASK) +}; + +#define stream_enc_regs(id)\ +[id] = {\ + SE_DCN3_REG_LIST(id)\ +} + +/* Some encoders won't be initialized here - but they're logical, not physical. */ +static const struct dcn10_stream_enc_registers stream_enc_regs[ENGINE_ID_COUNT] = { + stream_enc_regs(0), + stream_enc_regs(1), + stream_enc_regs(2), + stream_enc_regs(3), + stream_enc_regs(4) +}; + +static const struct dcn10_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn10_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + + +#define aux_regs(id)\ +[id] = {\ + DCN2_AUX_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), + aux_regs(2), + aux_regs(3), + aux_regs(4) +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), + hpd_regs(4) +}; + +#define link_regs(id, phyid)\ +[id] = {\ + LE_DCN31_REG_LIST(id), \ + UNIPHY_DCN2_REG_LIST(phyid), \ + DPCS_DCN31_REG_LIST(id), \ +} + +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +static const struct dcn10_link_enc_registers link_enc_regs[] = { + link_regs(0, A), + link_regs(1, B), + link_regs(2, C), + link_regs(3, D), + link_regs(4, E) +}; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \ + DPCS_DCN31_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), \ + DPCS_DCN31_MASK_SH_LIST(_MASK) +}; + +#define hpo_dp_stream_encoder_reg_list(id)\ +[id] = {\ + DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\ +} + +static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = { + hpo_dp_stream_encoder_reg_list(0), + hpo_dp_stream_encoder_reg_list(1), + hpo_dp_stream_encoder_reg_list(2), + hpo_dp_stream_encoder_reg_list(3), +}; + +static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) +}; + +#define hpo_dp_link_encoder_reg_list(id)\ +[id] = {\ + DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\ + DCN3_1_RDPCSTX_REG_LIST(0),\ + DCN3_1_RDPCSTX_REG_LIST(1),\ + DCN3_1_RDPCSTX_REG_LIST(2),\ + DCN3_1_RDPCSTX_REG_LIST(3),\ + DCN3_1_RDPCSTX_REG_LIST(4)\ +} + +static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = { + hpo_dp_link_encoder_reg_list(0), + hpo_dp_link_encoder_reg_list(1), +}; + +static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { + DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { + DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK) +}; + +#define dpp_regs(id)\ +[id] = {\ + DPP_REG_LIST_DCN30(id),\ +} + +static const struct dcn3_dpp_registers dpp_regs[] = { + dpp_regs(0), + dpp_regs(1), + dpp_regs(2), + dpp_regs(3) +}; + +static const struct dcn3_dpp_shift tf_shift = { + DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) +}; + +static const struct dcn3_dpp_mask tf_mask = { + DPP_REG_LIST_SH_MASK_DCN30(_MASK) +}; + +#define opp_regs(id)\ +[id] = {\ + OPP_REG_LIST_DCN30(id),\ +} + +static const struct dcn20_opp_registers opp_regs[] = { + opp_regs(0), + opp_regs(1), + opp_regs(2), + opp_regs(3) +}; + +static const struct dcn20_opp_shift opp_shift = { + OPP_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn20_opp_mask opp_mask = { + OPP_MASK_SH_LIST_DCN20(_MASK) +}; + +#define aux_engine_regs(id)\ +[id] = {\ + AUX_COMMON_REG_LIST0(id), \ + .AUXN_IMPCAL = 0, \ + .AUXP_IMPCAL = 0, \ + .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ +} + +static const struct dce110_aux_registers aux_engine_regs[] = { + aux_engine_regs(0), + aux_engine_regs(1), + aux_engine_regs(2), + aux_engine_regs(3), + aux_engine_regs(4) +}; + +#define dwbc_regs_dcn3(id)\ +[id] = {\ + DWBC_COMMON_REG_LIST_DCN30(id),\ +} + +static const struct dcn30_dwbc_registers dwbc30_regs[] = { + dwbc_regs_dcn3(0), +}; + +static const struct dcn30_dwbc_shift dwbc30_shift = { + DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_dwbc_mask dwbc30_mask = { + DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define mcif_wb_regs_dcn3(id)\ +[id] = {\ + MCIF_WB_COMMON_REG_LIST_DCN30(id),\ +} + +static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { + mcif_wb_regs_dcn3(0) +}; + +static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define dsc_regsDCN20(id)\ +[id] = {\ + DSC_REG_LIST_DCN20(id)\ +} + +static const struct dcn20_dsc_registers dsc_regs[] = { + dsc_regsDCN20(0), + dsc_regsDCN20(1), + dsc_regsDCN20(2) +}; + +static const struct dcn20_dsc_shift dsc_shift = { + DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) +}; + +static const struct dcn20_dsc_mask dsc_mask = { + DSC_REG_LIST_SH_MASK_DCN20(_MASK) +}; + +static const struct dcn30_mpc_registers mpc_regs = { + MPC_REG_LIST_DCN3_0(0), + MPC_REG_LIST_DCN3_0(1), + MPC_REG_LIST_DCN3_0(2), + MPC_REG_LIST_DCN3_0(3), + MPC_OUT_MUX_REG_LIST_DCN3_0(0), + MPC_OUT_MUX_REG_LIST_DCN3_0(1), + MPC_OUT_MUX_REG_LIST_DCN3_0(2), + MPC_OUT_MUX_REG_LIST_DCN3_0(3), + MPC_RMU_GLOBAL_REG_LIST_DCN3AG, + MPC_RMU_REG_LIST_DCN3AG(0), + MPC_RMU_REG_LIST_DCN3AG(1), + //MPC_RMU_REG_LIST_DCN3AG(2), + MPC_DWB_MUX_REG_LIST_DCN3_0(0), +}; + +static const struct dcn30_mpc_shift mpc_shift = { + MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_mpc_mask mpc_mask = { + MPC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define optc_regs(id)\ +[id] = {OPTC_COMMON_REG_LIST_DCN3_1(id)} + +static const struct dcn_optc_registers optc_regs[] = { + optc_regs(0), + optc_regs(1), + optc_regs(2), + optc_regs(3) +}; + +static const struct dcn_optc_shift optc_shift = { + OPTC_COMMON_MASK_SH_LIST_DCN3_1(__SHIFT) +}; + +static const struct dcn_optc_mask optc_mask = { + OPTC_COMMON_MASK_SH_LIST_DCN3_1(_MASK) +}; + +#define hubp_regs(id)\ +[id] = {\ + HUBP_REG_LIST_DCN30(id)\ +} + +static const struct dcn_hubp2_registers hubp_regs[] = { + hubp_regs(0), + hubp_regs(1), + hubp_regs(2), + hubp_regs(3) +}; + + +static const struct dcn_hubp2_shift hubp_shift = { + HUBP_MASK_SH_LIST_DCN31(__SHIFT) +}; + +static const struct dcn_hubp2_mask hubp_mask = { + HUBP_MASK_SH_LIST_DCN31(_MASK) +}; +static const struct dcn_hubbub_registers hubbub_reg = { + HUBBUB_REG_LIST_DCN31(0) +}; + +static const struct dcn_hubbub_shift hubbub_shift = { + HUBBUB_MASK_SH_LIST_DCN31(__SHIFT) +}; + +static const struct dcn_hubbub_mask hubbub_mask = { + HUBBUB_MASK_SH_LIST_DCN31(_MASK) +}; + +static const struct dccg_registers dccg_regs = { + DCCG_REG_LIST_DCN31() +}; + +static const struct dccg_shift dccg_shift = { + DCCG_MASK_SH_LIST_DCN31(__SHIFT) +}; + +static const struct dccg_mask dccg_mask = { + DCCG_MASK_SH_LIST_DCN31(_MASK) +}; + + +#define SRII2(reg_name_pre, reg_name_post, id)\ + .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \ + ## id ## _ ## reg_name_post ## _BASE_IDX) + \ + reg ## reg_name_pre ## id ## _ ## reg_name_post + + +#define HWSEQ_DCN31_REG_LIST()\ + SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ + SR(DCHUBBUB_ARB_HOSTVM_CNTL), \ + SR(DIO_MEM_PWR_CTRL), \ + SR(ODM_MEM_PWR_CTRL3), \ + SR(DMU_MEM_PWR_CNTL), \ + SR(MMHUBBUB_MEM_PWR_CNTL), \ + SR(DCCG_GATE_DISABLE_CNTL), \ + SR(DCCG_GATE_DISABLE_CNTL2), \ + SR(DCFCLK_CNTL),\ + SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ + SRII(PIXEL_RATE_CNTL, OTG, 0), \ + SRII(PIXEL_RATE_CNTL, OTG, 1),\ + SRII(PIXEL_RATE_CNTL, OTG, 2),\ + SRII(PIXEL_RATE_CNTL, OTG, 3),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\ + SR(MICROSECOND_TIME_BASE_DIV), \ + SR(MILLISECOND_TIME_BASE_DIV), \ + SR(DISPCLK_FREQ_CHANGE_CNTL), \ + SR(RBBMIF_TIMEOUT_DIS), \ + SR(RBBMIF_TIMEOUT_DIS_2), \ + SR(DCHUBBUB_CRC_CTRL), \ + SR(DPP_TOP0_DPP_CRC_CTRL), \ + SR(DPP_TOP0_DPP_CRC_VAL_B_A), \ + SR(DPP_TOP0_DPP_CRC_VAL_R_G), \ + SR(MPC_CRC_CTRL), \ + SR(MPC_CRC_RESULT_GB), \ + SR(MPC_CRC_RESULT_C), \ + SR(MPC_CRC_RESULT_AR), \ + SR(DOMAIN0_PG_CONFIG), \ + SR(DOMAIN1_PG_CONFIG), \ + SR(DOMAIN2_PG_CONFIG), \ + SR(DOMAIN3_PG_CONFIG), \ + SR(DOMAIN16_PG_CONFIG), \ + SR(DOMAIN17_PG_CONFIG), \ + SR(DOMAIN18_PG_CONFIG), \ + SR(DOMAIN0_PG_STATUS), \ + SR(DOMAIN1_PG_STATUS), \ + SR(DOMAIN2_PG_STATUS), \ + SR(DOMAIN3_PG_STATUS), \ + SR(DOMAIN16_PG_STATUS), \ + SR(DOMAIN17_PG_STATUS), \ + SR(DOMAIN18_PG_STATUS), \ + SR(D1VGA_CONTROL), \ + SR(D2VGA_CONTROL), \ + SR(D3VGA_CONTROL), \ + SR(D4VGA_CONTROL), \ + SR(D5VGA_CONTROL), \ + SR(D6VGA_CONTROL), \ + SR(DC_IP_REQUEST_CNTL), \ + SR(AZALIA_AUDIO_DTO), \ + SR(AZALIA_CONTROLLER_CLOCK_GATING), \ + SR(HPO_TOP_HW_CONTROL) + +static const struct dce_hwseq_registers hwseq_reg = { + HWSEQ_DCN31_REG_LIST() +}; + +#define HWSEQ_DCN31_MASK_SH_LIST(mask_sh)\ + HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ + HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ + HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \ + HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ + HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ + HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \ + HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \ + HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ + HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ + HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \ + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \ + HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh) + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCN31_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCN31_MASK_SH_LIST(_MASK) +}; +#define vmid_regs(id)\ +[id] = {\ + DCN20_VMID_REG_LIST(id)\ +} + +static const struct dcn_vmid_registers vmid_regs[] = { + vmid_regs(0), + vmid_regs(1), + vmid_regs(2), + vmid_regs(3), + vmid_regs(4), + vmid_regs(5), + vmid_regs(6), + vmid_regs(7), + vmid_regs(8), + vmid_regs(9), + vmid_regs(10), + vmid_regs(11), + vmid_regs(12), + vmid_regs(13), + vmid_regs(14), + vmid_regs(15) +}; + +static const struct dcn20_vmid_shift vmid_shifts = { + DCN20_VMID_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn20_vmid_mask vmid_masks = { + DCN20_VMID_MASK_SH_LIST(_MASK) +}; + +static const struct resource_caps res_cap_dcn31 = { + .num_timing_generator = 4, + .num_opp = 4, + .num_video_plane = 4, + .num_audio = 5, + .num_stream_encoder = 5, + .num_dig_link_enc = 5, + .num_hpo_dp_stream_encoder = 4, + .num_hpo_dp_link_encoder = 2, + .num_pll = 5, + .num_dwb = 1, + .num_ddc = 5, + .num_vmid = 16, + .num_mpc_3dlut = 2, + .num_dsc = 3, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCN_UNIVERSAL, + .per_pixel_alpha = true, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = true, + .fp16 = true, + .p010 = true, + .ayuv = false, + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 16000, + .fp16 = 16000 + }, + + // 6:1 downscaling ratio: 1000/6 = 166.666 + .max_downscale_factor = { + .argb8888 = 167, + .nv12 = 167, + .fp16 = 167 + }, + 64, + 64 +}; + +static const struct dc_debug_options debug_defaults_drv = { + .disable_dmcu = true, + .force_abm_enable = false, + .timing_trace = false, + .clock_trace = true, + .disable_pplib_clock_request = false, + .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .vsr_support = true, + .performance_trace = false, + .max_downscale_src_width = 4096,/*upto true 4K*/ + .disable_pplib_wm_range = false, + .scl_reset_length10 = true, + .sanity_checks = true, + .underflow_assert_delay_us = 0xFFFFFFFF, + .dwb_fi_phase = -1, // -1 = disable, + .dmub_command_table = true, + .pstate_enabled = true, + .use_max_lb = true, + .enable_mem_low_power = { + .bits = { + .vga = true, + .i2c = true, + .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled + .dscl = true, + .cm = true, + .mpc = true, + .optc = true, + .vpg = true, + .afmt = true, + } + }, + .disable_z10 = true, + .enable_legacy_fast_update = true, + .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ + .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE, + .using_dml2 = false, +}; + +static const struct dc_panel_config panel_config_defaults = { + .psr = { + .disable_psr = false, + .disallow_psrsu = false, + .disallow_replay = false, + }, + .ilr = { + .optimize_edp_link_rate = true, + }, +}; + +static void dcn31_dpp_destroy(struct dpp **dpp) +{ + kfree(TO_DCN20_DPP(*dpp)); + *dpp = NULL; +} + +static struct dpp *dcn31_dpp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn3_dpp *dpp = + kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); + + if (!dpp) + return NULL; + + if (dpp3_construct(dpp, ctx, inst, + &dpp_regs[inst], &tf_shift, &tf_mask)) + return &dpp->base; + + BREAK_TO_DEBUGGER(); + kfree(dpp); + return NULL; +} + +static struct output_pixel_processor *dcn31_opp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_opp *opp = + kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); + + if (!opp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dcn20_opp_construct(opp, ctx, inst, + &opp_regs[inst], &opp_shift, &opp_mask); + return &opp->base; +} + +static struct dce_aux *dcn31_aux_engine_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +static struct dce_i2c_hw *dcn31_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} +static struct mpc *dcn31_mpc_create( + struct dc_context *ctx, + int num_mpcc, + int num_rmu) +{ + struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), + GFP_KERNEL); + + if (!mpc30) + return NULL; + + dcn30_mpc_construct(mpc30, ctx, + &mpc_regs, + &mpc_shift, + &mpc_mask, + num_mpcc, + num_rmu); + + return &mpc30->base; +} + +static struct hubbub *dcn31_hubbub_create(struct dc_context *ctx) +{ + int i; + + struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), + GFP_KERNEL); + + if (!hubbub3) + return NULL; + + hubbub31_construct(hubbub3, ctx, + &hubbub_reg, + &hubbub_shift, + &hubbub_mask, + dcn3_1_ip.det_buffer_size_kbytes, + dcn3_1_ip.pixel_chunk_size_kbytes, + dcn3_1_ip.config_return_buffer_size_in_kbytes); + + + for (i = 0; i < res_cap_dcn31.num_vmid; i++) { + struct dcn20_vmid *vmid = &hubbub3->vmid[i]; + + vmid->ctx = ctx; + + vmid->regs = &vmid_regs[i]; + vmid->shifts = &vmid_shifts; + vmid->masks = &vmid_masks; + } + + return &hubbub3->base; +} + +static struct timing_generator *dcn31_timing_generator_create( + struct dc_context *ctx, + uint32_t instance) +{ + struct optc *tgn10 = + kzalloc(sizeof(struct optc), GFP_KERNEL); + + if (!tgn10) + return NULL; + + tgn10->base.inst = instance; + tgn10->base.ctx = ctx; + + tgn10->tg_regs = &optc_regs[instance]; + tgn10->tg_shift = &optc_shift; + tgn10->tg_mask = &optc_mask; + + dcn31_timing_generator_init(tgn10); + + return &tgn10->base; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .fec_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + +static struct link_encoder *dcn31_link_encoder_create( + struct dc_context *ctx, + const struct encoder_init_data *enc_init_data) +{ + struct dcn20_link_encoder *enc20 = + kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + + if (!enc20) + return NULL; + + dcn31_link_encoder_construct(enc20, + enc_init_data, + &link_enc_feature, + &link_enc_regs[enc_init_data->transmitter], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], + &le_shift, + &le_mask); + + return &enc20->enc10.base; +} + +/* Create a minimal link encoder object not associated with a particular + * physical connector. + * resource_funcs.link_enc_create_minimal + */ +static struct link_encoder *dcn31_link_enc_create_minimal( + struct dc_context *ctx, enum engine_id eng_id) +{ + struct dcn20_link_encoder *enc20; + + if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc) + return NULL; + + enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + if (!enc20) + return NULL; + + dcn31_link_encoder_construct_minimal( + enc20, + ctx, + &link_enc_feature, + &link_enc_regs[eng_id - ENGINE_ID_DIGA], + eng_id); + + return &enc20->enc10.base; +} + +static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dcn31_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dcn31_panel_cntl_construct(panel_cntl, init_data); + + return &panel_cntl->base; +} + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX), + FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); + +} + +static struct audio *dcn31_create_audio( + struct dc_context *ctx, unsigned int inst) +{ + return dce_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + +static struct vpg *dcn31_vpg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL); + + if (!vpg31) + return NULL; + + vpg31_construct(vpg31, ctx, inst, + &vpg_regs[inst], + &vpg_shift, + &vpg_mask); + + return &vpg31->base; +} + +static struct afmt *dcn31_afmt_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL); + + if (!afmt31) + return NULL; + + afmt31_construct(afmt31, ctx, inst, + &afmt_regs[inst], + &afmt_shift, + &afmt_mask); + + // Light sleep by default, no need to power down here + + return &afmt31->base; +} + +static struct apg *dcn31_apg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); + + if (!apg31) + return NULL; + + apg31_construct(apg31, ctx, inst, + &apg_regs[inst], + &apg_shift, + &apg_mask); + + return &apg31->base; +} + +static struct stream_encoder *dcn31_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn10_stream_encoder *enc1; + struct vpg *vpg; + struct afmt *afmt; + int vpg_inst; + int afmt_inst; + + /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ + if (eng_id <= ENGINE_ID_DIGF) { + vpg_inst = eng_id; + afmt_inst = eng_id; + } else + return NULL; + + enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); + vpg = dcn31_vpg_create(ctx, vpg_inst); + afmt = dcn31_afmt_create(ctx, afmt_inst); + + if (!enc1 || !vpg || !afmt) { + kfree(enc1); + kfree(vpg); + kfree(afmt); + return NULL; + } + + dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, + eng_id, vpg, afmt, + &stream_enc_regs[eng_id], + &se_shift, &se_mask); + + return &enc1->base; +} + +static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; + struct vpg *vpg; + struct apg *apg; + uint32_t hpo_dp_inst; + uint32_t vpg_inst; + uint32_t apg_inst; + + ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); + hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; + + /* Mapping of VPG register blocks to HPO DP block instance: + * VPG[6] -> HPO_DP[0] + * VPG[7] -> HPO_DP[1] + * VPG[8] -> HPO_DP[2] + * VPG[9] -> HPO_DP[3] + */ + vpg_inst = hpo_dp_inst + 6; + + /* Mapping of APG register blocks to HPO DP block instance: + * APG[0] -> HPO_DP[0] + * APG[1] -> HPO_DP[1] + * APG[2] -> HPO_DP[2] + * APG[3] -> HPO_DP[3] + */ + apg_inst = hpo_dp_inst; + + /* allocate HPO stream encoder and create VPG sub-block */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); + vpg = dcn31_vpg_create(ctx, vpg_inst); + apg = dcn31_apg_create(ctx, apg_inst); + + if (!hpo_dp_enc31 || !vpg || !apg) { + kfree(hpo_dp_enc31); + kfree(vpg); + kfree(apg); + return NULL; + } + + dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, + hpo_dp_inst, eng_id, vpg, apg, + &hpo_dp_stream_enc_regs[hpo_dp_inst], + &hpo_dp_se_shift, &hpo_dp_se_mask); + + return &hpo_dp_enc31->base; +} + +static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( + uint8_t inst, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; + + /* allocate HPO link encoder */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + + hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, + &hpo_dp_link_enc_regs[inst], + &hpo_dp_le_shift, &hpo_dp_le_mask); + + return &hpo_dp_enc31->base; +} + +static struct dce_hwseq *dcn31_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + } + return hws; +} +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = dcn31_create_audio, + .create_stream_encoder = dcn31_stream_encoder_create, + .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, + .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, + .create_hwseq = dcn31_hwseq_create, +}; + +static void dcn31_resource_destruct(struct dcn31_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) { + if (pool->base.stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); + pool->base.stream_enc[i]->vpg = NULL; + } + if (pool->base.stream_enc[i]->afmt != NULL) { + kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); + pool->base.stream_enc[i]->afmt = NULL; + } + kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); + pool->base.stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { + if (pool->base.hpo_dp_stream_enc[i] != NULL) { + if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); + pool->base.hpo_dp_stream_enc[i]->vpg = NULL; + } + if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { + kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); + pool->base.hpo_dp_stream_enc[i]->apg = NULL; + } + kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); + pool->base.hpo_dp_stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { + if (pool->base.hpo_dp_link_enc[i] != NULL) { + kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); + pool->base.hpo_dp_link_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + if (pool->base.dscs[i] != NULL) + dcn20_dsc_destroy(&pool->base.dscs[i]); + } + + if (pool->base.mpc != NULL) { + kfree(TO_DCN20_MPC(pool->base.mpc)); + pool->base.mpc = NULL; + } + if (pool->base.hubbub != NULL) { + kfree(pool->base.hubbub); + pool->base.hubbub = NULL; + } + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.dpps[i] != NULL) + dcn31_dpp_destroy(&pool->base.dpps[i]); + + if (pool->base.ipps[i] != NULL) + pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.hubps[i] != NULL) { + kfree(TO_DCN20_HUBP(pool->base.hubps[i])); + pool->base.hubps[i] = NULL; + } + + if (pool->base.irqs != NULL) { + dal_irq_service_destroy(&pool->base.irqs); + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + if (pool->base.engines[i] != NULL) + dce110_engine_destroy(&pool->base.engines[i]); + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + if (pool->base.opps[i] != NULL) + pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.timing_generators[i] != NULL) { + kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dwb; i++) { + if (pool->base.dwbc[i] != NULL) { + kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); + pool->base.dwbc[i] = NULL; + } + if (pool->base.mcif_wb[i] != NULL) { + kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); + pool->base.mcif_wb[i] = NULL; + } + } + + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i]) + dce_aud_destroy(&pool->base.audios[i]); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) { + dcn20_clock_source_destroy(&pool->base.clock_sources[i]); + pool->base.clock_sources[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { + if (pool->base.mpc_lut[i] != NULL) { + dc_3dlut_func_release(pool->base.mpc_lut[i]); + pool->base.mpc_lut[i] = NULL; + } + if (pool->base.mpc_shaper[i] != NULL) { + dc_transfer_func_release(pool->base.mpc_shaper[i]); + pool->base.mpc_shaper[i] = NULL; + } + } + + if (pool->base.dp_clock_source != NULL) { + dcn20_clock_source_destroy(&pool->base.dp_clock_source); + pool->base.dp_clock_source = NULL; + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.multiple_abms[i] != NULL) + dce_abm_destroy(&pool->base.multiple_abms[i]); + } + + if (pool->base.psr != NULL) + dmub_psr_destroy(&pool->base.psr); + + if (pool->base.replay != NULL) + dmub_replay_destroy(&pool->base.replay); + + if (pool->base.dccg != NULL) + dcn_dccg_destroy(&pool->base.dccg); +} + +static struct hubp *dcn31_hubp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn20_hubp *hubp2 = + kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); + + if (!hubp2) + return NULL; + + if (hubp31_construct(hubp2, ctx, inst, + &hubp_regs[inst], &hubp_shift, &hubp_mask)) + return &hubp2->base; + + BREAK_TO_DEBUGGER(); + kfree(hubp2); + return NULL; +} + +static bool dcn31_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), + GFP_KERNEL); + + if (!dwbc30) { + dm_error("DC: failed to create dwbc30!\n"); + return false; + } + + dcn30_dwbc_construct(dwbc30, ctx, + &dwbc30_regs[i], + &dwbc30_shift, + &dwbc30_mask, + i); + + pool->dwbc[i] = &dwbc30->base; + } + return true; +} + +static bool dcn31_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), + GFP_KERNEL); + + if (!mcif_wb30) { + dm_error("DC: failed to create mcif_wb30!\n"); + return false; + } + + dcn30_mmhubbub_construct(mcif_wb30, ctx, + &mcif_wb30_regs[i], + &mcif_wb30_shift, + &mcif_wb30_mask, + i); + + pool->mcif_wb[i] = &mcif_wb30->base; + } + return true; +} + +static struct display_stream_compressor *dcn31_dsc_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_dsc *dsc = + kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); + + if (!dsc) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); + return &dsc->base; +} + +static void dcn31_destroy_resource_pool(struct resource_pool **pool) +{ + struct dcn31_resource_pool *dcn31_pool = TO_DCN31_RES_POOL(*pool); + + dcn31_resource_destruct(dcn31_pool); + kfree(dcn31_pool); + *pool = NULL; +} + +static struct clock_source *dcn31_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dcn3_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + kfree(clk_src); + BREAK_TO_DEBUGGER(); + return NULL; +} + +static bool is_dual_plane(enum surface_pixel_format format) +{ + return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; +} + +int dcn31x_populate_dml_pipes_from_context(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate) +{ + uint32_t pipe_cnt; + int i; + + dc_assert_fp_enabled(); + + pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + + for (i = 0; i < pipe_cnt; i++) { + pipes[i].pipe.src.gpuvm = 1; + if (dc->debug.dml_hostvm_override == DML_HOSTVM_NO_OVERRIDE) { + //pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active; + pipes[i].pipe.src.hostvm = dc->vm_pa_config.is_hvm_enabled; + } else if (dc->debug.dml_hostvm_override == DML_HOSTVM_OVERRIDE_FALSE) + pipes[i].pipe.src.hostvm = false; + else if (dc->debug.dml_hostvm_override == DML_HOSTVM_OVERRIDE_TRUE) + pipes[i].pipe.src.hostvm = true; + } + return pipe_cnt; +} + +int dcn31_populate_dml_pipes_from_context( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate) +{ + int i, pipe_cnt; + struct resource_context *res_ctx = &context->res_ctx; + struct pipe_ctx *pipe; + bool upscaled = false; + + DC_FP_START(); + dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + DC_FP_END(); + + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + struct dc_crtc_timing *timing; + + if (!res_ctx->pipe_ctx[i].stream) + continue; + pipe = &res_ctx->pipe_ctx[i]; + timing = &pipe->stream->timing; + if (pipe->plane_state && + (pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height || + pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width)) + upscaled = true; + + /* + * Immediate flip can be set dynamically after enabling the plane. + * We need to require support for immediate flip or underflow can be + * intermittently experienced depending on peak b/w requirements. + */ + pipes[pipe_cnt].pipe.src.immediate_flip = true; + pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; + pipes[pipe_cnt].pipe.src.gpuvm = true; + pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; + pipes[pipe_cnt].pipe.src.dcc_rate = 3; + pipes[pipe_cnt].dout.dsc_input_bpc = 0; + DC_FP_START(); + dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt); + DC_FP_END(); + + + if (pipes[pipe_cnt].dout.dsc_enable) { + switch (timing->display_color_depth) { + case COLOR_DEPTH_888: + pipes[pipe_cnt].dout.dsc_input_bpc = 8; + break; + case COLOR_DEPTH_101010: + pipes[pipe_cnt].dout.dsc_input_bpc = 10; + break; + case COLOR_DEPTH_121212: + pipes[pipe_cnt].dout.dsc_input_bpc = 12; + break; + default: + ASSERT(0); + break; + } + } + + pipe_cnt++; + } + context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_1_DEFAULT_DET_SIZE; + dc->config.enable_4to1MPC = false; + if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { + if (is_dual_plane(pipe->plane_state->format) + && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) { + dc->config.enable_4to1MPC = true; + } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) { + /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */ + context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; + pipes[0].pipe.src.unbounded_req_mode = true; + } + } else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count + && dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) { + context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64; + } else if (context->stream_count >= 3 && upscaled) { + context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; + } + + return pipe_cnt; +} + +void dcn31_calculate_wm_and_dlg( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel) +{ + DC_FP_START(); + dcn31_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel); + DC_FP_END(); +} + +void +dcn31_populate_dml_writeback_from_context(struct dc *dc, + struct resource_context *res_ctx, + display_e2e_pipe_params_st *pipes) +{ + DC_FP_START(); + dcn30_populate_dml_writeback_from_context(dc, res_ctx, pipes); + DC_FP_END(); +} + +void +dcn31_set_mcif_arb_params(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt) +{ + DC_FP_START(); + dcn30_set_mcif_arb_params(dc, context, pipes, pipe_cnt); + DC_FP_END(); +} + +bool dcn31_validate_bandwidth(struct dc *dc, + struct dc_state *context, + bool fast_validate) +{ + bool out = false; + + BW_VAL_TRACE_SETUP(); + + int vlevel = 0; + int pipe_cnt = 0; + display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); + DC_LOGGER_INIT(dc->ctx->logger); + + BW_VAL_TRACE_COUNT(); + + DC_FP_START(); + out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true); + DC_FP_END(); + + // Disable fast_validate to set min dcfclk in alculate_wm_and_dlg + if (pipe_cnt == 0) + fast_validate = false; + + if (!out) + goto validate_fail; + + BW_VAL_TRACE_END_VOLTAGE_LEVEL(); + + if (fast_validate) { + BW_VAL_TRACE_SKIP(fast); + goto validate_out; + } + if (dc->res_pool->funcs->calculate_wm_and_dlg) + dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + + BW_VAL_TRACE_END_WATERMARKS(); + + goto validate_out; + +validate_fail: + DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", + dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); + + BW_VAL_TRACE_SKIP(fail); + out = false; + +validate_out: + kfree(pipes); + + BW_VAL_TRACE_FINISH(); + + return out; +} + +static void dcn31_get_panel_config_defaults(struct dc_panel_config *panel_config) +{ + *panel_config = panel_config_defaults; +} + +static struct dc_cap_funcs cap_funcs = { + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap +}; + +static struct resource_funcs dcn31_res_pool_funcs = { + .destroy = dcn31_destroy_resource_pool, + .link_enc_create = dcn31_link_encoder_create, + .link_enc_create_minimal = dcn31_link_enc_create_minimal, + .link_encs_assign = link_enc_cfg_link_encs_assign, + .link_enc_unassign = link_enc_cfg_link_enc_unassign, + .panel_cntl_create = dcn31_panel_cntl_create, + .validate_bandwidth = dcn31_validate_bandwidth, + .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, + .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, + .populate_dml_pipes = dcn31_populate_dml_pipes_from_context, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, + .release_pipe = dcn20_release_pipe, + .add_stream_to_ctx = dcn30_add_stream_to_ctx, + .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, + .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, + .populate_dml_writeback_from_context = dcn31_populate_dml_writeback_from_context, + .set_mcif_arb_params = dcn31_set_mcif_arb_params, + .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, + .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, + .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, + .update_bw_bounding_box = dcn31_update_bw_bounding_box, + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, + .get_panel_config_defaults = dcn31_get_panel_config_defaults, +}; + +static struct clock_source *dcn30_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dcn31_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + BREAK_TO_DEBUGGER(); + return NULL; +} + +static bool dcn31_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dcn31_resource_pool *pool) +{ + int i; + struct dc_context *ctx = dc->ctx; + struct irq_service_init_data init_data; + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap_dcn31; + + pool->base.funcs = &dcn31_res_pool_funcs; + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->base.pipe_count = pool->base.res_cap->num_timing_generator; + pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; + dc->caps.max_downscale_ratio = 600; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a applied by default*/ + dc->caps.max_cursor_size = 256; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dmdata_alloc_size = 2048; + + dc->caps.max_slave_planes = 2; + dc->caps.max_slave_yuv_planes = 2; + dc->caps.max_slave_rgb_planes = 2; + dc->caps.post_blend_color_processing = true; + dc->caps.force_dp_tps4_for_cp2520 = true; + if (dc->config.forceHBR2CP2520) + dc->caps.force_dp_tps4_for_cp2520 = false; + dc->caps.dp_hpo = true; + dc->caps.dp_hdmi21_pcon_support = true; + dc->caps.edp_dsc_support = true; + dc->caps.extended_aux_timeout_support = true; + dc->caps.dmcub_support = true; + dc->caps.is_apu = true; + dc->caps.zstate_support = true; + + /* Color pipeline capabilities */ + dc->caps.color.dpp.dcn_arch = 1; + dc->caps.color.dpp.input_lut_shared = 0; + dc->caps.color.dpp.icsc = 1; + dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr + dc->caps.color.dpp.dgam_rom_caps.srgb = 1; + dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; + dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; + dc->caps.color.dpp.dgam_rom_caps.pq = 1; + dc->caps.color.dpp.dgam_rom_caps.hlg = 1; + dc->caps.color.dpp.post_csc = 1; + dc->caps.color.dpp.gamma_corr = 1; + dc->caps.color.dpp.dgam_rom_for_yuv = 0; + + dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.ogam_ram = 1; + // no OGAM ROM on DCN301 + dc->caps.color.dpp.ogam_rom_caps.srgb = 0; + dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; + dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.ogam_rom_caps.pq = 0; + dc->caps.color.dpp.ogam_rom_caps.hlg = 0; + dc->caps.color.dpp.ocsc = 0; + + dc->caps.color.mpc.gamut_remap = 1; + dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2 + dc->caps.color.mpc.ogam_ram = 1; + dc->caps.color.mpc.ogam_rom_caps.srgb = 0; + dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; + dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.mpc.ogam_rom_caps.pq = 0; + dc->caps.color.mpc.ogam_rom_caps.hlg = 0; + dc->caps.color.mpc.ocsc = 1; + + dc->config.use_old_fixed_vs_sequence = true; + + /* Use pipe context based otg sync logic */ + dc->config.use_pipe_ctx_sync_logic = true; + + /* read VBIOS LTTPR caps */ + { + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + /* interop bit is implicit */ + { + dc->caps.vbios_lttpr_aware = true; + } + } + + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) + dc->debug = debug_defaults_drv; + + // Init the vm_helper + if (dc->vm_helper) + vm_helper_init(dc->vm_helper, 16); + + /************************************************* + * Create resources * + *************************************************/ + + /* Clock Sources for Pixel Clock*/ + pool->base.clock_sources[DCN31_CLK_SRC_PLL0] = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL1] = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + /*move phypllx_pixclk_resync to dmub next*/ + if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { + pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs_b0[2], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs_b0[3], false); + } else { + pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs[2], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + } + + pool->base.clock_sources[DCN31_CLK_SRC_PLL4] = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL4, + &clk_src_regs[4], false); + + pool->base.clk_src_count = DCN30_CLK_SRC_TOTAL; + + /* todo: not reuse phy_pll registers */ + pool->base.dp_clock_source = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, + &clk_src_regs[0], true); + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* TODO: DCCG */ + pool->base.dccg = dccg31_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); + if (pool->base.dccg == NULL) { + dm_error("DC: failed to create dccg!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* TODO: IRQ */ + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dcn31_create(&init_data); + if (!pool->base.irqs) + goto create_fail; + + /* HUBBUB */ + pool->base.hubbub = dcn31_hubbub_create(ctx); + if (pool->base.hubbub == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create hubbub!\n"); + goto create_fail; + } + + /* HUBPs, DPPs, OPPs and TGs */ + for (i = 0; i < pool->base.pipe_count; i++) { + pool->base.hubps[i] = dcn31_hubp_create(ctx, i); + if (pool->base.hubps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create hubps!\n"); + goto create_fail; + } + + pool->base.dpps[i] = dcn31_dpp_create(ctx, i); + if (pool->base.dpps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create dpps!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + pool->base.opps[i] = dcn31_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + pool->base.timing_generators[i] = dcn31_timing_generator_create( + ctx, i); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + } + pool->base.timing_generator_count = i; + + /* PSR */ + pool->base.psr = dmub_psr_create(ctx); + if (pool->base.psr == NULL) { + dm_error("DC: failed to create psr obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* Replay */ + pool->base.replay = dmub_replay_create(ctx); + if (pool->base.replay == NULL) { + dm_error("DC: failed to create replay obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* ABM */ + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + pool->base.multiple_abms[i] = dmub_abm_create(ctx, + &abm_regs[i], + &abm_shift, + &abm_mask); + if (pool->base.multiple_abms[i] == NULL) { + dm_error("DC: failed to create abm for pipe %d!\n", i); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* MPC and DSC */ + pool->base.mpc = dcn31_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut); + if (pool->base.mpc == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mpc!\n"); + goto create_fail; + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + pool->base.dscs[i] = dcn31_dsc_create(ctx, i); + if (pool->base.dscs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create display stream compressor %d!\n", i); + goto create_fail; + } + } + + /* DWB and MMHUBBUB */ + if (!dcn31_dwbc_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create dwbc!\n"); + goto create_fail; + } + + if (!dcn31_mmhubbub_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mcif_wb!\n"); + goto create_fail; + } + + /* AUX and I2C */ + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dcn31_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto create_fail; + } + pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create hw i2c!!\n"); + goto create_fail; + } + pool->base.sw_i2cs[i] = NULL; + } + + if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && + dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && + !dc->debug.dpia_debug.bits.disable_dpia) { + /* YELLOW CARP B0 has 4 DPIA's */ + pool->base.usb4_dpia_count = 4; + } + + if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1) + pool->base.usb4_dpia_count = 4; + + /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto create_fail; + + /* HW Sequencer and Plane caps */ + dcn31_hw_sequencer_construct(dc); + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->cap_funcs = cap_funcs; + + dc->dcn_ip->max_num_dpp = dcn3_1_ip.max_num_dpp; + + return true; + +create_fail: + dcn31_resource_destruct(pool); + + return false; +} + +struct resource_pool *dcn31_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc) +{ + struct dcn31_resource_pool *pool = + kzalloc(sizeof(struct dcn31_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dcn31_resource_construct(init_data->num_virtual_links, dc, pool)) + return &pool->base; + + BREAK_TO_DEBUGGER(); + kfree(pool); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h new file mode 100644 index 0000000000..901436591e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h @@ -0,0 +1,97 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DCN31_RESOURCE_H_ +#define _DCN31_RESOURCE_H_ + +#include "core_types.h" + +#define TO_DCN31_RES_POOL(pool)\ + container_of(pool, struct dcn31_resource_pool, base) + +extern struct _vcs_dpi_ip_params_st dcn3_1_ip; + +struct dcn31_resource_pool { + struct resource_pool base; +}; + +bool dcn31_validate_bandwidth(struct dc *dc, + struct dc_state *context, + bool fast_validate); +void dcn31_calculate_wm_and_dlg( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel); +int dcn31_populate_dml_pipes_from_context( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate); +void +dcn31_populate_dml_writeback_from_context(struct dc *dc, + struct resource_context *res_ctx, + display_e2e_pipe_params_st *pipes); +void +dcn31_set_mcif_arb_params(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt); + +struct resource_pool *dcn31_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc); + +/*temp: B0 specific before switch to dcn313 headers*/ +#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL +#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e +#define regPHYPLLF_PIXCLK_RESYNC_CNTL_BASE_IDX 1 +#define regPHYPLLG_PIXCLK_RESYNC_CNTL 0x005f +#define regPHYPLLG_PIXCLK_RESYNC_CNTL_BASE_IDX 1 + +//PHYPLLF_PIXCLK_RESYNC_CNTL +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE__SHIFT 0x0 +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1 +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4 +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE__SHIFT 0x8 +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9 +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE_MASK 0x00000100L +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L + +//PHYPLLG_PIXCLK_RESYNC_CNTL +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE__SHIFT 0x0 +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1 +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4 +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE__SHIFT 0x8 +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9 +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE_MASK 0x00000100L +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L +#endif +#endif /* _DCN31_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c new file mode 100644 index 0000000000..c97391edb5 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c @@ -0,0 +1,2180 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "dm_services.h" +#include "dc.h" + +#include "dcn31/dcn31_init.h" +#include "dcn314/dcn314_init.h" + +#include "resource.h" +#include "include/irq_service_interface.h" +#include "dcn314_resource.h" + +#include "dcn20/dcn20_resource.h" +#include "dcn30/dcn30_resource.h" +#include "dcn31/dcn31_resource.h" + +#include "dcn10/dcn10_ipp.h" +#include "dcn30/dcn30_hubbub.h" +#include "dcn31/dcn31_hubbub.h" +#include "dcn30/dcn30_mpc.h" +#include "dcn31/dcn31_hubp.h" +#include "irq/dcn31/irq_service_dcn31.h" +#include "irq/dcn314/irq_service_dcn314.h" +#include "dcn30/dcn30_dpp.h" +#include "dcn314/dcn314_optc.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn30/dcn30_hwseq.h" +#include "dce110/dce110_hwseq.h" +#include "dcn30/dcn30_opp.h" +#include "dcn20/dcn20_dsc.h" +#include "dcn30/dcn30_vpg.h" +#include "dcn30/dcn30_afmt.h" +#include "dcn31/dcn31_dio_link_encoder.h" +#include "dcn314/dcn314_dio_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_link_encoder.h" +#include "dcn31/dcn31_apg.h" +#include "dcn31/dcn31_vpg.h" +#include "dcn31/dcn31_afmt.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_audio.h" +#include "dce/dce_hwseq.h" +#include "clk_mgr.h" +#include "virtual/virtual_stream_encoder.h" +#include "dce110/dce110_resource.h" +#include "dml/display_mode_vba.h" +#include "dml/dcn31/dcn31_fpu.h" +#include "dml/dcn314/dcn314_fpu.h" +#include "dcn314/dcn314_dccg.h" +#include "dcn10/dcn10_resource.h" +#include "dcn31/dcn31_panel_cntl.h" +#include "dcn314/dcn314_hwseq.h" + +#include "dcn30/dcn30_dwb.h" +#include "dcn30/dcn30_mmhubbub.h" + +#include "dcn/dcn_3_1_4_offset.h" +#include "dcn/dcn_3_1_4_sh_mask.h" +#include "dpcs/dpcs_3_1_4_offset.h" +#include "dpcs/dpcs_3_1_4_sh_mask.h" + +#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10 +#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x01FF0000L + +#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0 +#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL + +#include "reg_helper.h" +#include "dce/dmub_abm.h" +#include "dce/dmub_psr.h" +#include "dce/dmub_replay.h" +#include "dce/dce_aux.h" +#include "dce/dce_i2c.h" +#include "dml/dcn314/display_mode_vba_314.h" +#include "vm_helper.h" +#include "dcn20/dcn20_vmid.h" + +#include "link_enc_cfg.h" + +#define DCN_BASE__INST0_SEG1 0x000000C0 +#define DCN_BASE__INST0_SEG2 0x000034C0 +#define DCN_BASE__INST0_SEG3 0x00009000 + +#define NBIO_BASE__INST0_SEG1 0x00000014 + +#define MAX_INSTANCE 7 +#define MAX_SEGMENT 8 + +#define regBIF_BX2_BIOS_SCRATCH_2 0x003a +#define regBIF_BX2_BIOS_SCRATCH_2_BASE_IDX 1 +#define regBIF_BX2_BIOS_SCRATCH_3 0x003b +#define regBIF_BX2_BIOS_SCRATCH_3_BASE_IDX 1 +#define regBIF_BX2_BIOS_SCRATCH_6 0x003e +#define regBIF_BX2_BIOS_SCRATCH_6_BASE_IDX 1 + +#define DC_LOGGER \ + dc->ctx->logger +#define DC_LOGGER_INIT(logger) + +enum dcn31_clk_src_array_id { + DCN31_CLK_SRC_PLL0, + DCN31_CLK_SRC_PLL1, + DCN31_CLK_SRC_PLL2, + DCN31_CLK_SRC_PLL3, + DCN31_CLK_SRC_PLL4, + DCN30_CLK_SRC_TOTAL +}; + +/* begin ********************* + * macros to expend register list macro defined in HW object header file + */ + +/* DCN */ +/* TODO awful hack. fixup dcn20_dwb.h */ +#undef BASE_INNER +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define SRI(reg_name, block, id)\ + .reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRI2(reg_name, block, id)\ + .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define SRIR(var_name, reg_name, block, id)\ + .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII(reg_name, block, id)\ + .reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_MPC_RMU(reg_name, block, id)\ + .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_DWB(reg_name, temp_name, block, id)\ + .reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## temp_name + +#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define DCCG_SRII(reg_name, block, id)\ + .block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define VUPDATE_SRII(reg_name, block, id)\ + .reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + reg ## reg_name ## _ ## block ## id + +/* NBIO */ +#define NBIO_BASE_INNER(seg) \ + NBIO_BASE__INST0_SEG ## seg + +#define NBIO_BASE(seg) \ + NBIO_BASE_INNER(seg) + +#define NBIO_SR(reg_name)\ + .reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \ + regBIF_BX2_ ## reg_name + +/* MMHUB */ +#define MMHUB_BASE_INNER(seg) \ + MMHUB_BASE__INST0_SEG ## seg + +#define MMHUB_BASE(seg) \ + MMHUB_BASE_INNER(seg) + +#define MMHUB_SR(reg_name)\ + .reg_name = MMHUB_BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +/* CLOCK */ +#define CLK_BASE_INNER(seg) \ + CLK_BASE__INST0_SEG ## seg + +#define CLK_BASE(seg) \ + CLK_BASE_INNER(seg) + +#define CLK_SRI(reg_name, block, inst)\ + .reg_name = CLK_BASE(reg ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## _ ## inst ## _ ## reg_name + + +static const struct bios_registers bios_regs = { + NBIO_SR(BIOS_SCRATCH_3), + NBIO_SR(BIOS_SCRATCH_6) +}; + +#define clk_src_regs(index, pllid)\ +[index] = {\ + CS_COMMON_REG_LIST_DCN3_0(index, pllid),\ +} + +static const struct dce110_clk_src_regs clk_src_regs[] = { + clk_src_regs(0, A), + clk_src_regs(1, B), + clk_src_regs(2, C), + clk_src_regs(3, D), + clk_src_regs(4, E) +}; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCN3_1_4(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCN3_1_4(_MASK) +}; + +#define abm_regs(id)\ +[id] = {\ + ABM_DCN302_REG_LIST(id)\ +} + +static const struct dce_abm_registers abm_regs[] = { + abm_regs(0), + abm_regs(1), + abm_regs(2), + abm_regs(3), +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCN30(_MASK) +}; + +#define audio_regs(id)\ +[id] = {\ + AUD_COMMON_REG_LIST(id)\ +} + +static const struct dce_audio_registers audio_regs[] = { + audio_regs(0), + audio_regs(1), + audio_regs(2), + audio_regs(3), + audio_regs(4), + audio_regs(5), + audio_regs(6) +}; + +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ + AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) + +static const struct dce_audio_shift audio_shift = { + DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +#define vpg_regs(id)\ +[id] = {\ + VPG_DCN31_REG_LIST(id)\ +} + +static const struct dcn31_vpg_registers vpg_regs[] = { + vpg_regs(0), + vpg_regs(1), + vpg_regs(2), + vpg_regs(3), + vpg_regs(4), + vpg_regs(5), + vpg_regs(6), + vpg_regs(7), + vpg_regs(8), + vpg_regs(9), +}; + +static const struct dcn31_vpg_shift vpg_shift = { + DCN31_VPG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_vpg_mask vpg_mask = { + DCN31_VPG_MASK_SH_LIST(_MASK) +}; + +#define afmt_regs(id)\ +[id] = {\ + AFMT_DCN31_REG_LIST(id)\ +} + +static const struct dcn31_afmt_registers afmt_regs[] = { + afmt_regs(0), + afmt_regs(1), + afmt_regs(2), + afmt_regs(3), + afmt_regs(4), + afmt_regs(5) +}; + +static const struct dcn31_afmt_shift afmt_shift = { + DCN31_AFMT_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_afmt_mask afmt_mask = { + DCN31_AFMT_MASK_SH_LIST(_MASK) +}; + +#define apg_regs(id)\ +[id] = {\ + APG_DCN31_REG_LIST(id)\ +} + +static const struct dcn31_apg_registers apg_regs[] = { + apg_regs(0), + apg_regs(1), + apg_regs(2), + apg_regs(3) +}; + +static const struct dcn31_apg_shift apg_shift = { + DCN31_APG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_apg_mask apg_mask = { + DCN31_APG_MASK_SH_LIST(_MASK) +}; + +#define stream_enc_regs(id)\ +[id] = {\ + SE_DCN314_REG_LIST(id)\ +} + +static const struct dcn10_stream_enc_registers stream_enc_regs[] = { + stream_enc_regs(0), + stream_enc_regs(1), + stream_enc_regs(2), + stream_enc_regs(3), + stream_enc_regs(4) +}; + +static const struct dcn10_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCN314(__SHIFT) +}; + +static const struct dcn10_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCN314(_MASK) +}; + + +#define aux_regs(id)\ +[id] = {\ + DCN2_AUX_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), + aux_regs(2), + aux_regs(3), + aux_regs(4) +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), + hpd_regs(4) +}; + +#define link_regs(id, phyid)\ +[id] = {\ + LE_DCN31_REG_LIST(id), \ + UNIPHY_DCN2_REG_LIST(phyid), \ +} + +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +static const struct dcn10_link_enc_registers link_enc_regs[] = { + link_regs(0, A), + link_regs(1, B), + link_regs(2, C), + link_regs(3, D), + link_regs(4, E) +}; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), + DPCS_DCN31_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), + DPCS_DCN31_MASK_SH_LIST(_MASK) +}; + +#define hpo_dp_stream_encoder_reg_list(id)\ +[id] = {\ + DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\ +} + +static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = { + hpo_dp_stream_encoder_reg_list(0), + hpo_dp_stream_encoder_reg_list(1), + hpo_dp_stream_encoder_reg_list(2), + hpo_dp_stream_encoder_reg_list(3) +}; + +static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) +}; + + +#define hpo_dp_link_encoder_reg_list(id)\ +[id] = {\ + DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\ + DCN3_1_RDPCSTX_REG_LIST(0),\ + DCN3_1_RDPCSTX_REG_LIST(1),\ + DCN3_1_RDPCSTX_REG_LIST(2),\ +} + +static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = { + hpo_dp_link_encoder_reg_list(0), + hpo_dp_link_encoder_reg_list(1), +}; + +static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { + DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { + DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK) +}; + +#define dpp_regs(id)\ +[id] = {\ + DPP_REG_LIST_DCN30(id),\ +} + +static const struct dcn3_dpp_registers dpp_regs[] = { + dpp_regs(0), + dpp_regs(1), + dpp_regs(2), + dpp_regs(3) +}; + +static const struct dcn3_dpp_shift tf_shift = { + DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) +}; + +static const struct dcn3_dpp_mask tf_mask = { + DPP_REG_LIST_SH_MASK_DCN30(_MASK) +}; + +#define opp_regs(id)\ +[id] = {\ + OPP_REG_LIST_DCN30(id),\ +} + +static const struct dcn20_opp_registers opp_regs[] = { + opp_regs(0), + opp_regs(1), + opp_regs(2), + opp_regs(3) +}; + +static const struct dcn20_opp_shift opp_shift = { + OPP_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn20_opp_mask opp_mask = { + OPP_MASK_SH_LIST_DCN20(_MASK) +}; + +#define aux_engine_regs(id)\ +[id] = {\ + AUX_COMMON_REG_LIST0(id), \ + .AUXN_IMPCAL = 0, \ + .AUXP_IMPCAL = 0, \ + .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ +} + +static const struct dce110_aux_registers aux_engine_regs[] = { + aux_engine_regs(0), + aux_engine_regs(1), + aux_engine_regs(2), + aux_engine_regs(3), + aux_engine_regs(4) +}; + +#define dwbc_regs_dcn3(id)\ +[id] = {\ + DWBC_COMMON_REG_LIST_DCN30(id),\ +} + +static const struct dcn30_dwbc_registers dwbc30_regs[] = { + dwbc_regs_dcn3(0), +}; + +static const struct dcn30_dwbc_shift dwbc30_shift = { + DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_dwbc_mask dwbc30_mask = { + DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define mcif_wb_regs_dcn3(id)\ +[id] = {\ + MCIF_WB_COMMON_REG_LIST_DCN30(id),\ +} + +static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { + mcif_wb_regs_dcn3(0) +}; + +static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define dsc_regsDCN314(id)\ +[id] = {\ + DSC_REG_LIST_DCN20(id)\ +} + +static const struct dcn20_dsc_registers dsc_regs[] = { + dsc_regsDCN314(0), + dsc_regsDCN314(1), + dsc_regsDCN314(2), + dsc_regsDCN314(3) +}; + +static const struct dcn20_dsc_shift dsc_shift = { + DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) +}; + +static const struct dcn20_dsc_mask dsc_mask = { + DSC_REG_LIST_SH_MASK_DCN20(_MASK) +}; + +static const struct dcn30_mpc_registers mpc_regs = { + MPC_REG_LIST_DCN3_0(0), + MPC_REG_LIST_DCN3_0(1), + MPC_REG_LIST_DCN3_0(2), + MPC_REG_LIST_DCN3_0(3), + MPC_OUT_MUX_REG_LIST_DCN3_0(0), + MPC_OUT_MUX_REG_LIST_DCN3_0(1), + MPC_OUT_MUX_REG_LIST_DCN3_0(2), + MPC_OUT_MUX_REG_LIST_DCN3_0(3), + MPC_RMU_GLOBAL_REG_LIST_DCN3AG, + MPC_RMU_REG_LIST_DCN3AG(0), + MPC_RMU_REG_LIST_DCN3AG(1), + //MPC_RMU_REG_LIST_DCN3AG(2), + MPC_DWB_MUX_REG_LIST_DCN3_0(0), +}; + +static const struct dcn30_mpc_shift mpc_shift = { + MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_mpc_mask mpc_mask = { + MPC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define optc_regs(id)\ +[id] = {OPTC_COMMON_REG_LIST_DCN3_14(id)} + +static const struct dcn_optc_registers optc_regs[] = { + optc_regs(0), + optc_regs(1), + optc_regs(2), + optc_regs(3) +}; + +static const struct dcn_optc_shift optc_shift = { + OPTC_COMMON_MASK_SH_LIST_DCN3_14(__SHIFT) +}; + +static const struct dcn_optc_mask optc_mask = { + OPTC_COMMON_MASK_SH_LIST_DCN3_14(_MASK) +}; + +#define hubp_regs(id)\ +[id] = {\ + HUBP_REG_LIST_DCN30(id)\ +} + +static const struct dcn_hubp2_registers hubp_regs[] = { + hubp_regs(0), + hubp_regs(1), + hubp_regs(2), + hubp_regs(3) +}; + + +static const struct dcn_hubp2_shift hubp_shift = { + HUBP_MASK_SH_LIST_DCN31(__SHIFT) +}; + +static const struct dcn_hubp2_mask hubp_mask = { + HUBP_MASK_SH_LIST_DCN31(_MASK) +}; +static const struct dcn_hubbub_registers hubbub_reg = { + HUBBUB_REG_LIST_DCN31(0) +}; + +static const struct dcn_hubbub_shift hubbub_shift = { + HUBBUB_MASK_SH_LIST_DCN31(__SHIFT) +}; + +static const struct dcn_hubbub_mask hubbub_mask = { + HUBBUB_MASK_SH_LIST_DCN31(_MASK) +}; + +static const struct dccg_registers dccg_regs = { + DCCG_REG_LIST_DCN314() +}; + +static const struct dccg_shift dccg_shift = { + DCCG_MASK_SH_LIST_DCN314(__SHIFT) +}; + +static const struct dccg_mask dccg_mask = { + DCCG_MASK_SH_LIST_DCN314(_MASK) +}; + + +#define SRII2(reg_name_pre, reg_name_post, id)\ + .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \ + ## id ## _ ## reg_name_post ## _BASE_IDX) + \ + reg ## reg_name_pre ## id ## _ ## reg_name_post + + +#define HWSEQ_DCN31_REG_LIST()\ + SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ + SR(DCHUBBUB_ARB_HOSTVM_CNTL), \ + SR(DIO_MEM_PWR_CTRL), \ + SR(ODM_MEM_PWR_CTRL3), \ + SR(DMU_MEM_PWR_CNTL), \ + SR(MMHUBBUB_MEM_PWR_CNTL), \ + SR(DCCG_GATE_DISABLE_CNTL), \ + SR(DCCG_GATE_DISABLE_CNTL2), \ + SR(DCFCLK_CNTL),\ + SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ + SRII(PIXEL_RATE_CNTL, OTG, 0), \ + SRII(PIXEL_RATE_CNTL, OTG, 1),\ + SRII(PIXEL_RATE_CNTL, OTG, 2),\ + SRII(PIXEL_RATE_CNTL, OTG, 3),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\ + SR(MICROSECOND_TIME_BASE_DIV), \ + SR(MILLISECOND_TIME_BASE_DIV), \ + SR(DISPCLK_FREQ_CHANGE_CNTL), \ + SR(RBBMIF_TIMEOUT_DIS), \ + SR(RBBMIF_TIMEOUT_DIS_2), \ + SR(DCHUBBUB_CRC_CTRL), \ + SR(DPP_TOP0_DPP_CRC_CTRL), \ + SR(DPP_TOP0_DPP_CRC_VAL_B_A), \ + SR(DPP_TOP0_DPP_CRC_VAL_R_G), \ + SR(MPC_CRC_CTRL), \ + SR(MPC_CRC_RESULT_GB), \ + SR(MPC_CRC_RESULT_C), \ + SR(MPC_CRC_RESULT_AR), \ + SR(DOMAIN0_PG_CONFIG), \ + SR(DOMAIN1_PG_CONFIG), \ + SR(DOMAIN2_PG_CONFIG), \ + SR(DOMAIN3_PG_CONFIG), \ + SR(DOMAIN16_PG_CONFIG), \ + SR(DOMAIN17_PG_CONFIG), \ + SR(DOMAIN18_PG_CONFIG), \ + SR(DOMAIN19_PG_CONFIG), \ + SR(DOMAIN0_PG_STATUS), \ + SR(DOMAIN1_PG_STATUS), \ + SR(DOMAIN2_PG_STATUS), \ + SR(DOMAIN3_PG_STATUS), \ + SR(DOMAIN16_PG_STATUS), \ + SR(DOMAIN17_PG_STATUS), \ + SR(DOMAIN18_PG_STATUS), \ + SR(DOMAIN19_PG_STATUS), \ + SR(D1VGA_CONTROL), \ + SR(D2VGA_CONTROL), \ + SR(D3VGA_CONTROL), \ + SR(D4VGA_CONTROL), \ + SR(D5VGA_CONTROL), \ + SR(D6VGA_CONTROL), \ + SR(DC_IP_REQUEST_CNTL), \ + SR(AZALIA_AUDIO_DTO), \ + SR(AZALIA_CONTROLLER_CLOCK_GATING), \ + SR(HPO_TOP_HW_CONTROL) + +static const struct dce_hwseq_registers hwseq_reg = { + HWSEQ_DCN31_REG_LIST() +}; + +#define HWSEQ_DCN31_MASK_SH_LIST(mask_sh)\ + HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ + HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ + HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \ + HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ + HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ + HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \ + HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \ + HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ + HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ + HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \ + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \ + HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh) + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCN31_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCN31_MASK_SH_LIST(_MASK) +}; +#define vmid_regs(id)\ +[id] = {\ + DCN20_VMID_REG_LIST(id)\ +} + +static const struct dcn_vmid_registers vmid_regs[] = { + vmid_regs(0), + vmid_regs(1), + vmid_regs(2), + vmid_regs(3), + vmid_regs(4), + vmid_regs(5), + vmid_regs(6), + vmid_regs(7), + vmid_regs(8), + vmid_regs(9), + vmid_regs(10), + vmid_regs(11), + vmid_regs(12), + vmid_regs(13), + vmid_regs(14), + vmid_regs(15) +}; + +static const struct dcn20_vmid_shift vmid_shifts = { + DCN20_VMID_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn20_vmid_mask vmid_masks = { + DCN20_VMID_MASK_SH_LIST(_MASK) +}; + +static const struct resource_caps res_cap_dcn314 = { + .num_timing_generator = 4, + .num_opp = 4, + .num_video_plane = 4, + .num_audio = 5, + .num_stream_encoder = 5, + .num_dig_link_enc = 5, + .num_hpo_dp_stream_encoder = 4, + .num_hpo_dp_link_encoder = 2, + .num_pll = 5, + .num_dwb = 1, + .num_ddc = 5, + .num_vmid = 16, + .num_mpc_3dlut = 2, + .num_dsc = 4, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCN_UNIVERSAL, + .per_pixel_alpha = true, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = true, + .fp16 = true, + .p010 = true, + .ayuv = false, + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 16000, + .fp16 = 16000 + }, + + // 6:1 downscaling ratio: 1000/6 = 166.666 + // 4:1 downscaling ratio for ARGB888 to prevent underflow during P010 playback: 1000/4 = 250 + .max_downscale_factor = { + .argb8888 = 250, + .nv12 = 167, + .fp16 = 167 + }, + 64, + 64 +}; + +static const struct dc_debug_options debug_defaults_drv = { + .disable_z10 = false, + .enable_z9_disable_interface = true, + .minimum_z8_residency_time = 2100, + .psr_skip_crtc_disable = true, + .replay_skip_crtc_disabled = true, + .disable_dmcu = true, + .force_abm_enable = false, + .timing_trace = false, + .clock_trace = true, + .disable_dpp_power_gate = false, + .disable_hubp_power_gate = false, + .disable_pplib_clock_request = false, + .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .vsr_support = true, + .performance_trace = false, + .max_downscale_src_width = 4096,/*upto true 4k*/ + .disable_pplib_wm_range = false, + .scl_reset_length10 = true, + .sanity_checks = true, + .underflow_assert_delay_us = 0xFFFFFFFF, + .dwb_fi_phase = -1, // -1 = disable, + .dmub_command_table = true, + .pstate_enabled = true, + .use_max_lb = true, + .enable_mem_low_power = { + .bits = { + .vga = true, + .i2c = true, + .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled + .dscl = true, + .cm = true, + .mpc = true, + .optc = true, + .vpg = true, + .afmt = true, + } + }, + + .root_clock_optimization = { + .bits = { + .dpp = true, + .dsc = true, + .hdmistream = true, + .hdmichar = true, + .dpstream = true, + .symclk32_se = false, + .symclk32_le = true, + .symclk_fe = true, + .physymclk = true, + .dpiasymclk = true, + } + }, + + .seamless_boot_odm_combine = true, + .using_dml2 = false, +}; + +static const struct dc_debug_options debug_defaults_diags = { + .disable_dmcu = true, + .force_abm_enable = false, + .timing_trace = true, + .clock_trace = true, + .disable_dpp_power_gate = true, + .disable_hubp_power_gate = true, + .disable_clock_gate = true, + .disable_pplib_clock_request = true, + .disable_pplib_wm_range = true, + .disable_stutter = false, + .scl_reset_length10 = true, + .dwb_fi_phase = -1, // -1 = disable + .dmub_command_table = true, + .enable_tri_buf = true, + .use_max_lb = true +}; + +static const struct dc_panel_config panel_config_defaults = { + .psr = { + .disable_psr = false, + .disallow_psrsu = false, + .disallow_replay = false, + }, + .ilr = { + .optimize_edp_link_rate = true, + }, +}; + +static void dcn31_dpp_destroy(struct dpp **dpp) +{ + kfree(TO_DCN20_DPP(*dpp)); + *dpp = NULL; +} + +static struct dpp *dcn31_dpp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn3_dpp *dpp = + kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); + + if (!dpp) + return NULL; + + if (dpp3_construct(dpp, ctx, inst, + &dpp_regs[inst], &tf_shift, &tf_mask)) + return &dpp->base; + + BREAK_TO_DEBUGGER(); + kfree(dpp); + return NULL; +} + +static struct output_pixel_processor *dcn31_opp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_opp *opp = + kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); + + if (!opp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dcn20_opp_construct(opp, ctx, inst, + &opp_regs[inst], &opp_shift, &opp_mask); + return &opp->base; +} + +static struct dce_aux *dcn31_aux_engine_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +/* ========================================================== */ + +/* + * DPIA index | Preferred Encoder | Host Router + * 0 | C | 0 + * 1 | First Available | 0 + * 2 | D | 1 + * 3 | First Available | 1 + */ +/* ========================================================== */ +static const enum engine_id dpia_to_preferred_enc_id_table[] = { + ENGINE_ID_DIGC, + ENGINE_ID_DIGC, + ENGINE_ID_DIGD, + ENGINE_ID_DIGD +}; + +static enum engine_id dcn314_get_preferred_eng_id_dpia(unsigned int dpia_index) +{ + return dpia_to_preferred_enc_id_table[dpia_index]; +} + +static struct dce_i2c_hw *dcn31_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} +static struct mpc *dcn31_mpc_create( + struct dc_context *ctx, + int num_mpcc, + int num_rmu) +{ + struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), + GFP_KERNEL); + + if (!mpc30) + return NULL; + + dcn30_mpc_construct(mpc30, ctx, + &mpc_regs, + &mpc_shift, + &mpc_mask, + num_mpcc, + num_rmu); + + return &mpc30->base; +} + +static struct hubbub *dcn31_hubbub_create(struct dc_context *ctx) +{ + int i; + + struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), + GFP_KERNEL); + + if (!hubbub3) + return NULL; + + hubbub31_construct(hubbub3, ctx, + &hubbub_reg, + &hubbub_shift, + &hubbub_mask, + dcn3_14_ip.det_buffer_size_kbytes, + dcn3_14_ip.pixel_chunk_size_kbytes, + dcn3_14_ip.config_return_buffer_size_in_kbytes); + + + for (i = 0; i < res_cap_dcn314.num_vmid; i++) { + struct dcn20_vmid *vmid = &hubbub3->vmid[i]; + + vmid->ctx = ctx; + + vmid->regs = &vmid_regs[i]; + vmid->shifts = &vmid_shifts; + vmid->masks = &vmid_masks; + } + + return &hubbub3->base; +} + +static struct timing_generator *dcn31_timing_generator_create( + struct dc_context *ctx, + uint32_t instance) +{ + struct optc *tgn10 = + kzalloc(sizeof(struct optc), GFP_KERNEL); + + if (!tgn10) + return NULL; + + tgn10->base.inst = instance; + tgn10->base.ctx = ctx; + + tgn10->tg_regs = &optc_regs[instance]; + tgn10->tg_shift = &optc_shift; + tgn10->tg_mask = &optc_mask; + + dcn314_timing_generator_init(tgn10); + + return &tgn10->base; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .fec_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + +static struct link_encoder *dcn31_link_encoder_create( + struct dc_context *ctx, + const struct encoder_init_data *enc_init_data) +{ + struct dcn20_link_encoder *enc20 = + kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + + if (!enc20) + return NULL; + + dcn31_link_encoder_construct(enc20, + enc_init_data, + &link_enc_feature, + &link_enc_regs[enc_init_data->transmitter], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], + &le_shift, + &le_mask); + + return &enc20->enc10.base; +} + +/* Create a minimal link encoder object not associated with a particular + * physical connector. + * resource_funcs.link_enc_create_minimal + */ +static struct link_encoder *dcn31_link_enc_create_minimal( + struct dc_context *ctx, enum engine_id eng_id) +{ + struct dcn20_link_encoder *enc20; + + if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc) + return NULL; + + enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + if (!enc20) + return NULL; + + dcn31_link_encoder_construct_minimal( + enc20, + ctx, + &link_enc_feature, + &link_enc_regs[eng_id - ENGINE_ID_DIGA], + eng_id); + + return &enc20->enc10.base; +} + +static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dcn31_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dcn31_panel_cntl_construct(panel_cntl, init_data); + + return &panel_cntl->base; +} + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX), + FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); + +} + +static struct audio *dcn31_create_audio( + struct dc_context *ctx, unsigned int inst) +{ + return dce_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + +static struct vpg *dcn31_vpg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL); + + if (!vpg31) + return NULL; + + vpg31_construct(vpg31, ctx, inst, + &vpg_regs[inst], + &vpg_shift, + &vpg_mask); + + return &vpg31->base; +} + +static struct afmt *dcn31_afmt_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL); + + if (!afmt31) + return NULL; + + afmt31_construct(afmt31, ctx, inst, + &afmt_regs[inst], + &afmt_shift, + &afmt_mask); + + // Light sleep by default, no need to power down here + + return &afmt31->base; +} + +static struct apg *dcn31_apg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); + + if (!apg31) + return NULL; + + apg31_construct(apg31, ctx, inst, + &apg_regs[inst], + &apg_shift, + &apg_mask); + + return &apg31->base; +} + +static struct stream_encoder *dcn314_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn10_stream_encoder *enc1; + struct vpg *vpg; + struct afmt *afmt; + int vpg_inst; + int afmt_inst; + + /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ + if (eng_id < ENGINE_ID_DIGF) { + vpg_inst = eng_id; + afmt_inst = eng_id; + } else + return NULL; + + enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); + vpg = dcn31_vpg_create(ctx, vpg_inst); + afmt = dcn31_afmt_create(ctx, afmt_inst); + + if (!enc1 || !vpg || !afmt) { + kfree(enc1); + kfree(vpg); + kfree(afmt); + return NULL; + } + + dcn314_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, + eng_id, vpg, afmt, + &stream_enc_regs[eng_id], + &se_shift, &se_mask); + + return &enc1->base; +} + +static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; + struct vpg *vpg; + struct apg *apg; + uint32_t hpo_dp_inst; + uint32_t vpg_inst; + uint32_t apg_inst; + + ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); + hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; + + /* Mapping of VPG register blocks to HPO DP block instance: + * VPG[6] -> HPO_DP[0] + * VPG[7] -> HPO_DP[1] + * VPG[8] -> HPO_DP[2] + * VPG[9] -> HPO_DP[3] + */ + //Uses offset index 5-8, but actually maps to vpg_inst 6-9 + vpg_inst = hpo_dp_inst + 5; + + /* Mapping of APG register blocks to HPO DP block instance: + * APG[0] -> HPO_DP[0] + * APG[1] -> HPO_DP[1] + * APG[2] -> HPO_DP[2] + * APG[3] -> HPO_DP[3] + */ + apg_inst = hpo_dp_inst; + + /* allocate HPO stream encoder and create VPG sub-block */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); + vpg = dcn31_vpg_create(ctx, vpg_inst); + apg = dcn31_apg_create(ctx, apg_inst); + + if (!hpo_dp_enc31 || !vpg || !apg) { + kfree(hpo_dp_enc31); + kfree(vpg); + kfree(apg); + return NULL; + } + + dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, + hpo_dp_inst, eng_id, vpg, apg, + &hpo_dp_stream_enc_regs[hpo_dp_inst], + &hpo_dp_se_shift, &hpo_dp_se_mask); + + return &hpo_dp_enc31->base; +} + +static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( + uint8_t inst, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; + + /* allocate HPO link encoder */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + + hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, + &hpo_dp_link_enc_regs[inst], + &hpo_dp_le_shift, &hpo_dp_le_mask); + + return &hpo_dp_enc31->base; +} + +static struct dce_hwseq *dcn314_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + } + return hws; +} +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = dcn31_create_audio, + .create_stream_encoder = dcn314_stream_encoder_create, + .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, + .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, + .create_hwseq = dcn314_hwseq_create, +}; + +static void dcn314_resource_destruct(struct dcn314_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) { + if (pool->base.stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); + pool->base.stream_enc[i]->vpg = NULL; + } + if (pool->base.stream_enc[i]->afmt != NULL) { + kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); + pool->base.stream_enc[i]->afmt = NULL; + } + kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); + pool->base.stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { + if (pool->base.hpo_dp_stream_enc[i] != NULL) { + if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); + pool->base.hpo_dp_stream_enc[i]->vpg = NULL; + } + if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { + kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); + pool->base.hpo_dp_stream_enc[i]->apg = NULL; + } + kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); + pool->base.hpo_dp_stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { + if (pool->base.hpo_dp_link_enc[i] != NULL) { + kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); + pool->base.hpo_dp_link_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + if (pool->base.dscs[i] != NULL) + dcn20_dsc_destroy(&pool->base.dscs[i]); + } + + if (pool->base.mpc != NULL) { + kfree(TO_DCN20_MPC(pool->base.mpc)); + pool->base.mpc = NULL; + } + if (pool->base.hubbub != NULL) { + kfree(pool->base.hubbub); + pool->base.hubbub = NULL; + } + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.dpps[i] != NULL) + dcn31_dpp_destroy(&pool->base.dpps[i]); + + if (pool->base.ipps[i] != NULL) + pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.hubps[i] != NULL) { + kfree(TO_DCN20_HUBP(pool->base.hubps[i])); + pool->base.hubps[i] = NULL; + } + + if (pool->base.irqs != NULL) + dal_irq_service_destroy(&pool->base.irqs); + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + if (pool->base.engines[i] != NULL) + dce110_engine_destroy(&pool->base.engines[i]); + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + if (pool->base.opps[i] != NULL) + pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.timing_generators[i] != NULL) { + kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dwb; i++) { + if (pool->base.dwbc[i] != NULL) { + kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); + pool->base.dwbc[i] = NULL; + } + if (pool->base.mcif_wb[i] != NULL) { + kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); + pool->base.mcif_wb[i] = NULL; + } + } + + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i]) + dce_aud_destroy(&pool->base.audios[i]); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) { + dcn20_clock_source_destroy(&pool->base.clock_sources[i]); + pool->base.clock_sources[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { + if (pool->base.mpc_lut[i] != NULL) { + dc_3dlut_func_release(pool->base.mpc_lut[i]); + pool->base.mpc_lut[i] = NULL; + } + if (pool->base.mpc_shaper[i] != NULL) { + dc_transfer_func_release(pool->base.mpc_shaper[i]); + pool->base.mpc_shaper[i] = NULL; + } + } + + if (pool->base.dp_clock_source != NULL) { + dcn20_clock_source_destroy(&pool->base.dp_clock_source); + pool->base.dp_clock_source = NULL; + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.multiple_abms[i] != NULL) + dce_abm_destroy(&pool->base.multiple_abms[i]); + } + + if (pool->base.psr != NULL) + dmub_psr_destroy(&pool->base.psr); + + if (pool->base.replay != NULL) + dmub_replay_destroy(&pool->base.replay); + + if (pool->base.dccg != NULL) + dcn_dccg_destroy(&pool->base.dccg); +} + +static struct hubp *dcn31_hubp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn20_hubp *hubp2 = + kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); + + if (!hubp2) + return NULL; + + if (hubp31_construct(hubp2, ctx, inst, + &hubp_regs[inst], &hubp_shift, &hubp_mask)) + return &hubp2->base; + + BREAK_TO_DEBUGGER(); + kfree(hubp2); + return NULL; +} + +static bool dcn31_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), + GFP_KERNEL); + + if (!dwbc30) { + dm_error("DC: failed to create dwbc30!\n"); + return false; + } + + dcn30_dwbc_construct(dwbc30, ctx, + &dwbc30_regs[i], + &dwbc30_shift, + &dwbc30_mask, + i); + + pool->dwbc[i] = &dwbc30->base; + } + return true; +} + +static bool dcn31_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), + GFP_KERNEL); + + if (!mcif_wb30) { + dm_error("DC: failed to create mcif_wb30!\n"); + return false; + } + + dcn30_mmhubbub_construct(mcif_wb30, ctx, + &mcif_wb30_regs[i], + &mcif_wb30_shift, + &mcif_wb30_mask, + i); + + pool->mcif_wb[i] = &mcif_wb30->base; + } + return true; +} + +static struct display_stream_compressor *dcn314_dsc_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_dsc *dsc = + kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); + + if (!dsc) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); + return &dsc->base; +} + +static void dcn314_destroy_resource_pool(struct resource_pool **pool) +{ + struct dcn314_resource_pool *dcn314_pool = TO_DCN314_RES_POOL(*pool); + + dcn314_resource_destruct(dcn314_pool); + kfree(dcn314_pool); + *pool = NULL; +} + +static struct clock_source *dcn31_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dcn31_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + BREAK_TO_DEBUGGER(); + kfree(clk_src); + return NULL; +} + +static int dcn314_populate_dml_pipes_from_context( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate) +{ + int pipe_cnt; + + DC_FP_START(); + pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, fast_validate); + DC_FP_END(); + + return pipe_cnt; +} + +static struct dc_cap_funcs cap_funcs = { + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap +}; + +static void dcn314_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) +{ + DC_FP_START(); + dcn314_update_bw_bounding_box_fpu(dc, bw_params); + DC_FP_END(); +} + +static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_config) +{ + *panel_config = panel_config_defaults; +} + +static bool filter_modes_for_single_channel_workaround(struct dc *dc, + struct dc_state *context) +{ + // Filter 2K@240Hz+8K@24fps above combination timing if memory only has single dimm LPDDR + if (dc->clk_mgr->bw_params->vram_type == 34 && + dc->clk_mgr->bw_params->num_channels < 2 && + context->stream_count > 1) { + int total_phy_pix_clk = 0; + + for (int i = 0; i < context->stream_count; i++) + if (context->res_ctx.pipe_ctx[i].stream) + total_phy_pix_clk += context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; + + if (total_phy_pix_clk >= (1148928+826260)) //2K@240Hz+8K@24fps + return true; + } + return false; +} + +bool dcn314_validate_bandwidth(struct dc *dc, + struct dc_state *context, + bool fast_validate) +{ + bool out = false; + + BW_VAL_TRACE_SETUP(); + + int vlevel = 0; + int pipe_cnt = 0; + display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); + DC_LOGGER_INIT(dc->ctx->logger); + + BW_VAL_TRACE_COUNT(); + + if (filter_modes_for_single_channel_workaround(dc, context)) + goto validate_fail; + + DC_FP_START(); + // do not support self refresh only + out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, false); + DC_FP_END(); + + // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg + if (pipe_cnt == 0) + fast_validate = false; + + if (!out) + goto validate_fail; + + BW_VAL_TRACE_END_VOLTAGE_LEVEL(); + + if (fast_validate) { + BW_VAL_TRACE_SKIP(fast); + goto validate_out; + } + if (dc->res_pool->funcs->calculate_wm_and_dlg) + dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + + BW_VAL_TRACE_END_WATERMARKS(); + + goto validate_out; + +validate_fail: + DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", + dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); + + BW_VAL_TRACE_SKIP(fail); + out = false; + +validate_out: + kfree(pipes); + + BW_VAL_TRACE_FINISH(); + + return out; +} + +static struct resource_funcs dcn314_res_pool_funcs = { + .destroy = dcn314_destroy_resource_pool, + .link_enc_create = dcn31_link_encoder_create, + .link_enc_create_minimal = dcn31_link_enc_create_minimal, + .link_encs_assign = link_enc_cfg_link_encs_assign, + .link_enc_unassign = link_enc_cfg_link_enc_unassign, + .panel_cntl_create = dcn31_panel_cntl_create, + .validate_bandwidth = dcn314_validate_bandwidth, + .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, + .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, + .populate_dml_pipes = dcn314_populate_dml_pipes_from_context, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, + .release_pipe = dcn20_release_pipe, + .add_stream_to_ctx = dcn30_add_stream_to_ctx, + .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, + .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, + .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, + .set_mcif_arb_params = dcn30_set_mcif_arb_params, + .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, + .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, + .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, + .update_bw_bounding_box = dcn314_update_bw_bounding_box, + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, + .get_panel_config_defaults = dcn314_get_panel_config_defaults, + .get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia, +}; + +static struct clock_source *dcn30_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dcn31_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + BREAK_TO_DEBUGGER(); + kfree(clk_src); + return NULL; +} + +static bool dcn314_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dcn314_resource_pool *pool) +{ + int i; + struct dc_context *ctx = dc->ctx; + struct irq_service_init_data init_data; + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap_dcn314; + pool->base.funcs = &dcn314_res_pool_funcs; + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->base.pipe_count = pool->base.res_cap->num_timing_generator; + pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; + dc->caps.max_downscale_ratio = 400; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.i2c_speed_in_khz_hdcp = 100; + dc->caps.max_cursor_size = 256; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dmdata_alloc_size = 2048; + dc->caps.max_slave_planes = 2; + dc->caps.max_slave_yuv_planes = 2; + dc->caps.max_slave_rgb_planes = 2; + dc->caps.post_blend_color_processing = true; + dc->caps.force_dp_tps4_for_cp2520 = true; + if (dc->config.forceHBR2CP2520) + dc->caps.force_dp_tps4_for_cp2520 = false; + dc->caps.dp_hpo = true; + dc->caps.dp_hdmi21_pcon_support = true; + dc->caps.edp_dsc_support = true; + dc->caps.extended_aux_timeout_support = true; + dc->caps.dmcub_support = true; + dc->caps.is_apu = true; + dc->caps.seamless_odm = true; + + dc->caps.zstate_support = true; + + /* Color pipeline capabilities */ + dc->caps.color.dpp.dcn_arch = 1; + dc->caps.color.dpp.input_lut_shared = 0; + dc->caps.color.dpp.icsc = 1; + dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr + dc->caps.color.dpp.dgam_rom_caps.srgb = 1; + dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; + dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; + dc->caps.color.dpp.dgam_rom_caps.pq = 1; + dc->caps.color.dpp.dgam_rom_caps.hlg = 1; + dc->caps.color.dpp.post_csc = 1; + dc->caps.color.dpp.gamma_corr = 1; + dc->caps.color.dpp.dgam_rom_for_yuv = 0; + + dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.ogam_ram = 1; + // no OGAM ROM on DCN301 + dc->caps.color.dpp.ogam_rom_caps.srgb = 0; + dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; + dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.ogam_rom_caps.pq = 0; + dc->caps.color.dpp.ogam_rom_caps.hlg = 0; + dc->caps.color.dpp.ocsc = 0; + + dc->caps.color.mpc.gamut_remap = 1; + dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2 + dc->caps.color.mpc.ogam_ram = 1; + dc->caps.color.mpc.ogam_rom_caps.srgb = 0; + dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; + dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.mpc.ogam_rom_caps.pq = 0; + dc->caps.color.mpc.ogam_rom_caps.hlg = 0; + dc->caps.color.mpc.ocsc = 1; + + dc->caps.max_disp_clock_khz_at_vmin = 650000; + + /* Use pipe context based otg sync logic */ + dc->config.use_pipe_ctx_sync_logic = true; + + /* read VBIOS LTTPR caps */ + { + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + /* interop bit is implicit */ + { + dc->caps.vbios_lttpr_aware = true; + } + } + + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) + dc->debug = debug_defaults_drv; + else + dc->debug = debug_defaults_diags; + + /* Disable pipe power gating */ + dc->debug.disable_dpp_power_gate = true; + dc->debug.disable_hubp_power_gate = true; + + /* Disable root clock optimization */ + dc->debug.root_clock_optimization.u32All = 0; + + // Init the vm_helper + if (dc->vm_helper) + vm_helper_init(dc->vm_helper, 16); + + /************************************************* + * Create resources * + *************************************************/ + + /* Clock Sources for Pixel Clock*/ + pool->base.clock_sources[DCN31_CLK_SRC_PLL0] = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL1] = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs[2], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL4] = + dcn30_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL4, + &clk_src_regs[4], false); + + pool->base.clk_src_count = DCN30_CLK_SRC_TOTAL; + + /* todo: not reuse phy_pll registers */ + pool->base.dp_clock_source = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, + &clk_src_regs[0], true); + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + pool->base.dccg = dccg314_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); + if (pool->base.dccg == NULL) { + dm_error("DC: failed to create dccg!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dcn314_create(&init_data); + if (!pool->base.irqs) + goto create_fail; + + /* HUBBUB */ + pool->base.hubbub = dcn31_hubbub_create(ctx); + if (pool->base.hubbub == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create hubbub!\n"); + goto create_fail; + } + + /* HUBPs, DPPs, OPPs and TGs */ + for (i = 0; i < pool->base.pipe_count; i++) { + pool->base.hubps[i] = dcn31_hubp_create(ctx, i); + if (pool->base.hubps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create hubps!\n"); + goto create_fail; + } + + pool->base.dpps[i] = dcn31_dpp_create(ctx, i); + if (pool->base.dpps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create dpps!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + pool->base.opps[i] = dcn31_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + pool->base.timing_generators[i] = dcn31_timing_generator_create( + ctx, i); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + } + pool->base.timing_generator_count = i; + + /* PSR */ + pool->base.psr = dmub_psr_create(ctx); + if (pool->base.psr == NULL) { + dm_error("DC: failed to create psr obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* Replay */ + pool->base.replay = dmub_replay_create(ctx); + if (pool->base.replay == NULL) { + dm_error("DC: failed to create replay obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* ABM */ + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + pool->base.multiple_abms[i] = dmub_abm_create(ctx, + &abm_regs[i], + &abm_shift, + &abm_mask); + if (pool->base.multiple_abms[i] == NULL) { + dm_error("DC: failed to create abm for pipe %d!\n", i); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* MPC and DSC */ + pool->base.mpc = dcn31_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut); + if (pool->base.mpc == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mpc!\n"); + goto create_fail; + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + pool->base.dscs[i] = dcn314_dsc_create(ctx, i); + if (pool->base.dscs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create display stream compressor %d!\n", i); + goto create_fail; + } + } + + /* DWB and MMHUBBUB */ + if (!dcn31_dwbc_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create dwbc!\n"); + goto create_fail; + } + + if (!dcn31_mmhubbub_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mcif_wb!\n"); + goto create_fail; + } + + /* AUX and I2C */ + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dcn31_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto create_fail; + } + pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create hw i2c!!\n"); + goto create_fail; + } + pool->base.sw_i2cs[i] = NULL; + } + + /* DCN314 has 4 DPIA */ + pool->base.usb4_dpia_count = 4; + + /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto create_fail; + + /* HW Sequencer and Plane caps */ + dcn314_hw_sequencer_construct(dc); + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->cap_funcs = cap_funcs; + + dc->dcn_ip->max_num_dpp = dcn3_14_ip.max_num_dpp; + + return true; + +create_fail: + + dcn314_resource_destruct(pool); + + return false; +} + +struct resource_pool *dcn314_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc) +{ + struct dcn314_resource_pool *pool = + kzalloc(sizeof(struct dcn314_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dcn314_resource_construct(init_data->num_virtual_links, dc, pool)) + return &pool->base; + + BREAK_TO_DEBUGGER(); + kfree(pool); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h new file mode 100644 index 0000000000..49ffe71018 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DCN314_RESOURCE_H_ +#define _DCN314_RESOURCE_H_ + +#include "core_types.h" + +extern struct _vcs_dpi_ip_params_st dcn3_14_ip; +extern struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc; + +#define TO_DCN314_RES_POOL(pool)\ + container_of(pool, struct dcn314_resource_pool, base) + +struct dcn314_resource_pool { + struct resource_pool base; +}; + +bool dcn314_validate_bandwidth(struct dc *dc, + struct dc_state *context, + bool fast_validate); + +struct resource_pool *dcn314_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc); + +#endif /* _DCN314_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c new file mode 100644 index 0000000000..515ba435f7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c @@ -0,0 +1,2153 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "dm_services.h" +#include "dc.h" + +#include "dcn31/dcn31_init.h" + +#include "resource.h" +#include "include/irq_service_interface.h" +#include "dcn315_resource.h" + +#include "dcn20/dcn20_resource.h" +#include "dcn30/dcn30_resource.h" +#include "dcn31/dcn31_resource.h" + +#include "dcn10/dcn10_ipp.h" +#include "dcn30/dcn30_hubbub.h" +#include "dcn31/dcn31_hubbub.h" +#include "dcn30/dcn30_mpc.h" +#include "dcn31/dcn31_hubp.h" +#include "irq/dcn315/irq_service_dcn315.h" +#include "dcn30/dcn30_dpp.h" +#include "dcn31/dcn31_optc.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn30/dcn30_hwseq.h" +#include "dce110/dce110_hwseq.h" +#include "dcn30/dcn30_opp.h" +#include "dcn20/dcn20_dsc.h" +#include "dcn30/dcn30_vpg.h" +#include "dcn30/dcn30_afmt.h" +#include "dcn30/dcn30_dio_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_link_encoder.h" +#include "dcn31/dcn31_apg.h" +#include "dcn31/dcn31_dio_link_encoder.h" +#include "dcn31/dcn31_vpg.h" +#include "dcn31/dcn31_afmt.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_audio.h" +#include "dce/dce_hwseq.h" +#include "clk_mgr.h" +#include "virtual/virtual_stream_encoder.h" +#include "dce110/dce110_resource.h" +#include "dml/display_mode_vba.h" +#include "dml/dcn31/dcn31_fpu.h" +#include "dcn31/dcn31_dccg.h" +#include "dcn10/dcn10_resource.h" +#include "dcn31/dcn31_panel_cntl.h" + +#include "dcn30/dcn30_dwb.h" +#include "dcn30/dcn30_mmhubbub.h" + +#include "dcn/dcn_3_1_5_offset.h" +#include "dcn/dcn_3_1_5_sh_mask.h" +#include "dpcs/dpcs_4_2_2_offset.h" +#include "dpcs/dpcs_4_2_2_sh_mask.h" + +#define NBIO_BASE__INST0_SEG0 0x00000000 +#define NBIO_BASE__INST0_SEG1 0x00000014 +#define NBIO_BASE__INST0_SEG2 0x00000D20 +#define NBIO_BASE__INST0_SEG3 0x00010400 +#define NBIO_BASE__INST0_SEG4 0x0241B000 +#define NBIO_BASE__INST0_SEG5 0x04040000 + +#define DPCS_BASE__INST0_SEG0 0x00000012 +#define DPCS_BASE__INST0_SEG1 0x000000C0 +#define DPCS_BASE__INST0_SEG2 0x000034C0 +#define DPCS_BASE__INST0_SEG3 0x00009000 +#define DPCS_BASE__INST0_SEG4 0x02403C00 +#define DPCS_BASE__INST0_SEG5 0 + +#define DCN_BASE__INST0_SEG0 0x00000012 +#define DCN_BASE__INST0_SEG1 0x000000C0 +#define DCN_BASE__INST0_SEG2 0x000034C0 +#define DCN_BASE__INST0_SEG3 0x00009000 +#define DCN_BASE__INST0_SEG4 0x02403C00 +#define DCN_BASE__INST0_SEG5 0 + +#define regBIF_BX_PF2_RSMU_INDEX 0x0000 +#define regBIF_BX_PF2_RSMU_INDEX_BASE_IDX 1 +#define regBIF_BX_PF2_RSMU_DATA 0x0001 +#define regBIF_BX_PF2_RSMU_DATA_BASE_IDX 1 +#define regBIF_BX2_BIOS_SCRATCH_6 0x003e +#define regBIF_BX2_BIOS_SCRATCH_6_BASE_IDX 1 +#define BIF_BX2_BIOS_SCRATCH_6__BIOS_SCRATCH_6__SHIFT 0x0 +#define BIF_BX2_BIOS_SCRATCH_6__BIOS_SCRATCH_6_MASK 0xFFFFFFFFL +#define regBIF_BX2_BIOS_SCRATCH_2 0x003a +#define regBIF_BX2_BIOS_SCRATCH_2_BASE_IDX 1 +#define BIF_BX2_BIOS_SCRATCH_2__BIOS_SCRATCH_2__SHIFT 0x0 +#define BIF_BX2_BIOS_SCRATCH_2__BIOS_SCRATCH_2_MASK 0xFFFFFFFFL +#define regBIF_BX2_BIOS_SCRATCH_3 0x003b +#define regBIF_BX2_BIOS_SCRATCH_3_BASE_IDX 1 +#define BIF_BX2_BIOS_SCRATCH_3__BIOS_SCRATCH_3__SHIFT 0x0 +#define BIF_BX2_BIOS_SCRATCH_3__BIOS_SCRATCH_3_MASK 0xFFFFFFFFL + +#define regDCHUBBUB_DEBUG_CTRL_0 0x04d6 +#define regDCHUBBUB_DEBUG_CTRL_0_BASE_IDX 2 +#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10 +#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x01FF0000L + +#include "reg_helper.h" +#include "dce/dmub_abm.h" +#include "dce/dmub_psr.h" +#include "dce/dce_aux.h" +#include "dce/dce_i2c.h" + +#include "dml/dcn30/display_mode_vba_30.h" +#include "vm_helper.h" +#include "dcn20/dcn20_vmid.h" + +#include "link_enc_cfg.h" + +#define DCN3_15_MAX_DET_SIZE 384 +#define DCN3_15_CRB_SEGMENT_SIZE_KB 64 +#define DCN3_15_MAX_DET_SEGS (DCN3_15_MAX_DET_SIZE / DCN3_15_CRB_SEGMENT_SIZE_KB) +/* Minimum 3 extra segments need to be in compbuf and claimable to guarantee seamless mpo transitions */ +#define MIN_RESERVED_DET_SEGS 3 + +enum dcn31_clk_src_array_id { + DCN31_CLK_SRC_PLL0, + DCN31_CLK_SRC_PLL1, + DCN31_CLK_SRC_PLL2, + DCN31_CLK_SRC_PLL3, + DCN31_CLK_SRC_PLL4, + DCN30_CLK_SRC_TOTAL +}; + +/* begin ********************* + * macros to expend register list macro defined in HW object header file + */ + +/* DCN */ +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define SRI(reg_name, block, id)\ + .reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRI2(reg_name, block, id)\ + .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define SRIR(var_name, reg_name, block, id)\ + .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII(reg_name, block, id)\ + .reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_MPC_RMU(reg_name, block, id)\ + .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_DWB(reg_name, temp_name, block, id)\ + .reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## temp_name + +#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define DCCG_SRII(reg_name, block, id)\ + .block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define VUPDATE_SRII(reg_name, block, id)\ + .reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + reg ## reg_name ## _ ## block ## id + +/* NBIO */ +#define NBIO_BASE_INNER(seg) \ + NBIO_BASE__INST0_SEG ## seg + +#define NBIO_BASE(seg) \ + NBIO_BASE_INNER(seg) + +#define NBIO_SR(reg_name)\ + .reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \ + regBIF_BX2_ ## reg_name + +static const struct bios_registers bios_regs = { + NBIO_SR(BIOS_SCRATCH_3), + NBIO_SR(BIOS_SCRATCH_6) +}; + +#define clk_src_regs(index, pllid)\ +[index] = {\ + CS_COMMON_REG_LIST_DCN3_0(index, pllid),\ +} + +static const struct dce110_clk_src_regs clk_src_regs[] = { + clk_src_regs(0, A), + clk_src_regs(1, B), + clk_src_regs(2, C), + clk_src_regs(3, D), + clk_src_regs(4, E) +}; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) +}; + +#define abm_regs(id)\ +[id] = {\ + ABM_DCN302_REG_LIST(id)\ +} + +static const struct dce_abm_registers abm_regs[] = { + abm_regs(0), + abm_regs(1), + abm_regs(2), + abm_regs(3), +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCN30(_MASK) +}; + +#define audio_regs(id)\ +[id] = {\ + AUD_COMMON_REG_LIST(id)\ +} + +static const struct dce_audio_registers audio_regs[] = { + audio_regs(0), + audio_regs(1), + audio_regs(2), + audio_regs(3), + audio_regs(4), + audio_regs(5), + audio_regs(6) +}; + +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ + AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) + +static const struct dce_audio_shift audio_shift = { + DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +#define vpg_regs(id)\ +[id] = {\ + VPG_DCN31_REG_LIST(id)\ +} + +static const struct dcn31_vpg_registers vpg_regs[] = { + vpg_regs(0), + vpg_regs(1), + vpg_regs(2), + vpg_regs(3), + vpg_regs(4), + vpg_regs(5), + vpg_regs(6), + vpg_regs(7), + vpg_regs(8), + vpg_regs(9), +}; + +static const struct dcn31_vpg_shift vpg_shift = { + DCN31_VPG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_vpg_mask vpg_mask = { + DCN31_VPG_MASK_SH_LIST(_MASK) +}; + +#define afmt_regs(id)\ +[id] = {\ + AFMT_DCN31_REG_LIST(id)\ +} + +static const struct dcn31_afmt_registers afmt_regs[] = { + afmt_regs(0), + afmt_regs(1), + afmt_regs(2), + afmt_regs(3), + afmt_regs(4), + afmt_regs(5) +}; + +static const struct dcn31_afmt_shift afmt_shift = { + DCN31_AFMT_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_afmt_mask afmt_mask = { + DCN31_AFMT_MASK_SH_LIST(_MASK) +}; + +#define apg_regs(id)\ +[id] = {\ + APG_DCN31_REG_LIST(id)\ +} + +static const struct dcn31_apg_registers apg_regs[] = { + apg_regs(0), + apg_regs(1), + apg_regs(2), + apg_regs(3) +}; + +static const struct dcn31_apg_shift apg_shift = { + DCN31_APG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_apg_mask apg_mask = { + DCN31_APG_MASK_SH_LIST(_MASK) +}; + +#define stream_enc_regs(id)\ +[id] = {\ + SE_DCN3_REG_LIST(id)\ +} + +static const struct dcn10_stream_enc_registers stream_enc_regs[] = { + stream_enc_regs(0), + stream_enc_regs(1), + stream_enc_regs(2), + stream_enc_regs(3), + stream_enc_regs(4) +}; + +static const struct dcn10_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn10_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + + +#define aux_regs(id)\ +[id] = {\ + DCN2_AUX_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), + aux_regs(2), + aux_regs(3), + aux_regs(4) +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), + hpd_regs(4) +}; + +#define link_regs(id, phyid)\ +[id] = {\ + LE_DCN31_REG_LIST(id), \ + UNIPHY_DCN2_REG_LIST(phyid), \ + DPCS_DCN31_REG_LIST(id), \ +} + +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +static const struct dcn10_link_enc_registers link_enc_regs[] = { + link_regs(0, A), + link_regs(1, B), + link_regs(2, C), + link_regs(3, D), + link_regs(4, E) +}; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \ + DPCS_DCN31_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), \ + DPCS_DCN31_MASK_SH_LIST(_MASK) +}; + +#define hpo_dp_stream_encoder_reg_list(id)\ +[id] = {\ + DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\ +} + +static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = { + hpo_dp_stream_encoder_reg_list(0), + hpo_dp_stream_encoder_reg_list(1), + hpo_dp_stream_encoder_reg_list(2), + hpo_dp_stream_encoder_reg_list(3), +}; + +static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) +}; + + +#define hpo_dp_link_encoder_reg_list(id)\ +[id] = {\ + DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\ + DCN3_1_RDPCSTX_REG_LIST(0),\ + DCN3_1_RDPCSTX_REG_LIST(1),\ + DCN3_1_RDPCSTX_REG_LIST(2),\ + DCN3_1_RDPCSTX_REG_LIST(3),\ + DCN3_1_RDPCSTX_REG_LIST(4)\ +} + +static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = { + hpo_dp_link_encoder_reg_list(0), + hpo_dp_link_encoder_reg_list(1), +}; + +static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { + DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { + DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK) +}; + +#define dpp_regs(id)\ +[id] = {\ + DPP_REG_LIST_DCN30(id),\ +} + +static const struct dcn3_dpp_registers dpp_regs[] = { + dpp_regs(0), + dpp_regs(1), + dpp_regs(2), + dpp_regs(3) +}; + +static const struct dcn3_dpp_shift tf_shift = { + DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) +}; + +static const struct dcn3_dpp_mask tf_mask = { + DPP_REG_LIST_SH_MASK_DCN30(_MASK) +}; + +#define opp_regs(id)\ +[id] = {\ + OPP_REG_LIST_DCN30(id),\ +} + +static const struct dcn20_opp_registers opp_regs[] = { + opp_regs(0), + opp_regs(1), + opp_regs(2), + opp_regs(3) +}; + +static const struct dcn20_opp_shift opp_shift = { + OPP_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn20_opp_mask opp_mask = { + OPP_MASK_SH_LIST_DCN20(_MASK) +}; + +#define aux_engine_regs(id)\ +[id] = {\ + AUX_COMMON_REG_LIST0(id), \ + .AUXN_IMPCAL = 0, \ + .AUXP_IMPCAL = 0, \ + .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ +} + +static const struct dce110_aux_registers aux_engine_regs[] = { + aux_engine_regs(0), + aux_engine_regs(1), + aux_engine_regs(2), + aux_engine_regs(3), + aux_engine_regs(4) +}; + +#define dwbc_regs_dcn3(id)\ +[id] = {\ + DWBC_COMMON_REG_LIST_DCN30(id),\ +} + +static const struct dcn30_dwbc_registers dwbc30_regs[] = { + dwbc_regs_dcn3(0), +}; + +static const struct dcn30_dwbc_shift dwbc30_shift = { + DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_dwbc_mask dwbc30_mask = { + DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define mcif_wb_regs_dcn3(id)\ +[id] = {\ + MCIF_WB_COMMON_REG_LIST_DCN30(id),\ +} + +static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { + mcif_wb_regs_dcn3(0) +}; + +static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define dsc_regsDCN20(id)\ +[id] = {\ + DSC_REG_LIST_DCN20(id)\ +} + +static const struct dcn20_dsc_registers dsc_regs[] = { + dsc_regsDCN20(0), + dsc_regsDCN20(1), + dsc_regsDCN20(2) +}; + +static const struct dcn20_dsc_shift dsc_shift = { + DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) +}; + +static const struct dcn20_dsc_mask dsc_mask = { + DSC_REG_LIST_SH_MASK_DCN20(_MASK) +}; + +static const struct dcn30_mpc_registers mpc_regs = { + MPC_REG_LIST_DCN3_0(0), + MPC_REG_LIST_DCN3_0(1), + MPC_REG_LIST_DCN3_0(2), + MPC_REG_LIST_DCN3_0(3), + MPC_OUT_MUX_REG_LIST_DCN3_0(0), + MPC_OUT_MUX_REG_LIST_DCN3_0(1), + MPC_OUT_MUX_REG_LIST_DCN3_0(2), + MPC_OUT_MUX_REG_LIST_DCN3_0(3), + MPC_DWB_MUX_REG_LIST_DCN3_0(0), +}; + +static const struct dcn30_mpc_shift mpc_shift = { + MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_mpc_mask mpc_mask = { + MPC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define optc_regs(id)\ +[id] = {OPTC_COMMON_REG_LIST_DCN3_1(id)} + +static const struct dcn_optc_registers optc_regs[] = { + optc_regs(0), + optc_regs(1), + optc_regs(2), + optc_regs(3) +}; + +static const struct dcn_optc_shift optc_shift = { + OPTC_COMMON_MASK_SH_LIST_DCN3_1(__SHIFT) +}; + +static const struct dcn_optc_mask optc_mask = { + OPTC_COMMON_MASK_SH_LIST_DCN3_1(_MASK) +}; + +#define hubp_regs(id)\ +[id] = {\ + HUBP_REG_LIST_DCN30(id)\ +} + +static const struct dcn_hubp2_registers hubp_regs[] = { + hubp_regs(0), + hubp_regs(1), + hubp_regs(2), + hubp_regs(3) +}; + + +static const struct dcn_hubp2_shift hubp_shift = { + HUBP_MASK_SH_LIST_DCN31(__SHIFT) +}; + +static const struct dcn_hubp2_mask hubp_mask = { + HUBP_MASK_SH_LIST_DCN31(_MASK) +}; +static const struct dcn_hubbub_registers hubbub_reg = { + HUBBUB_REG_LIST_DCN31(0) +}; + +static const struct dcn_hubbub_shift hubbub_shift = { + HUBBUB_MASK_SH_LIST_DCN31(__SHIFT) +}; + +static const struct dcn_hubbub_mask hubbub_mask = { + HUBBUB_MASK_SH_LIST_DCN31(_MASK) +}; + +static const struct dccg_registers dccg_regs = { + DCCG_REG_LIST_DCN31() +}; + +static const struct dccg_shift dccg_shift = { + DCCG_MASK_SH_LIST_DCN31(__SHIFT) +}; + +static const struct dccg_mask dccg_mask = { + DCCG_MASK_SH_LIST_DCN31(_MASK) +}; + + +#define SRII2(reg_name_pre, reg_name_post, id)\ + .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \ + ## id ## _ ## reg_name_post ## _BASE_IDX) + \ + reg ## reg_name_pre ## id ## _ ## reg_name_post + + +#define HWSEQ_DCN31_REG_LIST()\ + SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ + SR(DCHUBBUB_ARB_HOSTVM_CNTL), \ + SR(DIO_MEM_PWR_CTRL), \ + SR(ODM_MEM_PWR_CTRL3), \ + SR(DMU_MEM_PWR_CNTL), \ + SR(MMHUBBUB_MEM_PWR_CNTL), \ + SR(DCCG_GATE_DISABLE_CNTL), \ + SR(DCCG_GATE_DISABLE_CNTL2), \ + SR(DCFCLK_CNTL),\ + SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ + SRII(PIXEL_RATE_CNTL, OTG, 0), \ + SRII(PIXEL_RATE_CNTL, OTG, 1),\ + SRII(PIXEL_RATE_CNTL, OTG, 2),\ + SRII(PIXEL_RATE_CNTL, OTG, 3),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\ + SR(MICROSECOND_TIME_BASE_DIV), \ + SR(MILLISECOND_TIME_BASE_DIV), \ + SR(DISPCLK_FREQ_CHANGE_CNTL), \ + SR(RBBMIF_TIMEOUT_DIS), \ + SR(RBBMIF_TIMEOUT_DIS_2), \ + SR(DCHUBBUB_CRC_CTRL), \ + SR(DPP_TOP0_DPP_CRC_CTRL), \ + SR(DPP_TOP0_DPP_CRC_VAL_B_A), \ + SR(DPP_TOP0_DPP_CRC_VAL_R_G), \ + SR(MPC_CRC_CTRL), \ + SR(MPC_CRC_RESULT_GB), \ + SR(MPC_CRC_RESULT_C), \ + SR(MPC_CRC_RESULT_AR), \ + SR(DOMAIN0_PG_CONFIG), \ + SR(DOMAIN1_PG_CONFIG), \ + SR(DOMAIN2_PG_CONFIG), \ + SR(DOMAIN3_PG_CONFIG), \ + SR(DOMAIN16_PG_CONFIG), \ + SR(DOMAIN17_PG_CONFIG), \ + SR(DOMAIN18_PG_CONFIG), \ + SR(DOMAIN0_PG_STATUS), \ + SR(DOMAIN1_PG_STATUS), \ + SR(DOMAIN2_PG_STATUS), \ + SR(DOMAIN3_PG_STATUS), \ + SR(DOMAIN16_PG_STATUS), \ + SR(DOMAIN17_PG_STATUS), \ + SR(DOMAIN18_PG_STATUS), \ + SR(D1VGA_CONTROL), \ + SR(D2VGA_CONTROL), \ + SR(D3VGA_CONTROL), \ + SR(D4VGA_CONTROL), \ + SR(D5VGA_CONTROL), \ + SR(D6VGA_CONTROL), \ + SR(DC_IP_REQUEST_CNTL), \ + SR(AZALIA_AUDIO_DTO), \ + SR(AZALIA_CONTROLLER_CLOCK_GATING), \ + SR(HPO_TOP_HW_CONTROL) + +static const struct dce_hwseq_registers hwseq_reg = { + HWSEQ_DCN31_REG_LIST() +}; + +#define HWSEQ_DCN31_MASK_SH_LIST(mask_sh)\ + HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ + HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ + HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \ + HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ + HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ + HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \ + HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \ + HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ + HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ + HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \ + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \ + HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh) + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCN31_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCN31_MASK_SH_LIST(_MASK) +}; +#define vmid_regs(id)\ +[id] = {\ + DCN20_VMID_REG_LIST(id)\ +} + +static const struct dcn_vmid_registers vmid_regs[] = { + vmid_regs(0), + vmid_regs(1), + vmid_regs(2), + vmid_regs(3), + vmid_regs(4), + vmid_regs(5), + vmid_regs(6), + vmid_regs(7), + vmid_regs(8), + vmid_regs(9), + vmid_regs(10), + vmid_regs(11), + vmid_regs(12), + vmid_regs(13), + vmid_regs(14), + vmid_regs(15) +}; + +static const struct dcn20_vmid_shift vmid_shifts = { + DCN20_VMID_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn20_vmid_mask vmid_masks = { + DCN20_VMID_MASK_SH_LIST(_MASK) +}; + +static const struct resource_caps res_cap_dcn31 = { + .num_timing_generator = 4, + .num_opp = 4, + .num_video_plane = 4, + .num_audio = 5, + .num_stream_encoder = 5, + .num_dig_link_enc = 5, + .num_hpo_dp_stream_encoder = 4, + .num_hpo_dp_link_encoder = 2, + .num_pll = 5, + .num_dwb = 1, + .num_ddc = 5, + .num_vmid = 16, + .num_mpc_3dlut = 2, + .num_dsc = 3, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCN_UNIVERSAL, + .per_pixel_alpha = true, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = true, + .fp16 = true, + .p010 = true, + .ayuv = false, + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 16000, + .fp16 = 16000 + }, + + // 6:1 downscaling ratio: 1000/6 = 166.666 + .max_downscale_factor = { + .argb8888 = 167, + .nv12 = 167, + .fp16 = 167 + }, + 64, + 64 +}; + +static const struct dc_debug_options debug_defaults_drv = { + .disable_z10 = true, /*hw not support it*/ + .disable_dmcu = true, + .force_abm_enable = false, + .timing_trace = false, + .clock_trace = true, + .disable_pplib_clock_request = false, + .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .vsr_support = true, + .performance_trace = false, + .max_downscale_src_width = 4096,/*upto true 4k*/ + .disable_pplib_wm_range = false, + .scl_reset_length10 = true, + .sanity_checks = false, + .underflow_assert_delay_us = 0xFFFFFFFF, + .dwb_fi_phase = -1, // -1 = disable, + .dmub_command_table = true, + .pstate_enabled = true, + .use_max_lb = true, + .enable_mem_low_power = { + .bits = { + .vga = true, + .i2c = true, + .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled + .dscl = true, + .cm = true, + .mpc = true, + .optc = true, + .vpg = true, + .afmt = true, + } + }, + .enable_legacy_fast_update = true, + .psr_power_use_phy_fsm = 0, + .using_dml2 = false, +}; + +static const struct dc_panel_config panel_config_defaults = { + .psr = { + .disable_psr = false, + .disallow_psrsu = false, + .disallow_replay = false, + }, + .ilr = { + .optimize_edp_link_rate = true, + }, +}; + +static void dcn31_dpp_destroy(struct dpp **dpp) +{ + kfree(TO_DCN20_DPP(*dpp)); + *dpp = NULL; +} + +static struct dpp *dcn31_dpp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn3_dpp *dpp = + kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); + + if (!dpp) + return NULL; + + if (dpp3_construct(dpp, ctx, inst, + &dpp_regs[inst], &tf_shift, &tf_mask)) + return &dpp->base; + + BREAK_TO_DEBUGGER(); + kfree(dpp); + return NULL; +} + +static struct output_pixel_processor *dcn31_opp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_opp *opp = + kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); + + if (!opp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dcn20_opp_construct(opp, ctx, inst, + &opp_regs[inst], &opp_shift, &opp_mask); + return &opp->base; +} + +static struct dce_aux *dcn31_aux_engine_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +static struct dce_i2c_hw *dcn31_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} +static struct mpc *dcn31_mpc_create( + struct dc_context *ctx, + int num_mpcc, + int num_rmu) +{ + struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), + GFP_KERNEL); + + if (!mpc30) + return NULL; + + dcn30_mpc_construct(mpc30, ctx, + &mpc_regs, + &mpc_shift, + &mpc_mask, + num_mpcc, + num_rmu); + + return &mpc30->base; +} + +static struct hubbub *dcn31_hubbub_create(struct dc_context *ctx) +{ + int i; + + struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), + GFP_KERNEL); + + if (!hubbub3) + return NULL; + + hubbub31_construct(hubbub3, ctx, + &hubbub_reg, + &hubbub_shift, + &hubbub_mask, + dcn3_15_ip.det_buffer_size_kbytes, + dcn3_15_ip.pixel_chunk_size_kbytes, + dcn3_15_ip.config_return_buffer_size_in_kbytes); + + + for (i = 0; i < res_cap_dcn31.num_vmid; i++) { + struct dcn20_vmid *vmid = &hubbub3->vmid[i]; + + vmid->ctx = ctx; + + vmid->regs = &vmid_regs[i]; + vmid->shifts = &vmid_shifts; + vmid->masks = &vmid_masks; + } + + return &hubbub3->base; +} + +static struct timing_generator *dcn31_timing_generator_create( + struct dc_context *ctx, + uint32_t instance) +{ + struct optc *tgn10 = + kzalloc(sizeof(struct optc), GFP_KERNEL); + + if (!tgn10) + return NULL; + + tgn10->base.inst = instance; + tgn10->base.ctx = ctx; + + tgn10->tg_regs = &optc_regs[instance]; + tgn10->tg_shift = &optc_shift; + tgn10->tg_mask = &optc_mask; + + dcn31_timing_generator_init(tgn10); + + return &tgn10->base; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .fec_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + +static struct link_encoder *dcn31_link_encoder_create( + struct dc_context *ctx, + const struct encoder_init_data *enc_init_data) +{ + struct dcn20_link_encoder *enc20 = + kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + + if (!enc20) + return NULL; + + dcn31_link_encoder_construct(enc20, + enc_init_data, + &link_enc_feature, + &link_enc_regs[enc_init_data->transmitter], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], + &le_shift, + &le_mask); + + return &enc20->enc10.base; +} + +/* Create a minimal link encoder object not associated with a particular + * physical connector. + * resource_funcs.link_enc_create_minimal + */ +static struct link_encoder *dcn31_link_enc_create_minimal( + struct dc_context *ctx, enum engine_id eng_id) +{ + struct dcn20_link_encoder *enc20; + + if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc) + return NULL; + + enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + if (!enc20) + return NULL; + + dcn31_link_encoder_construct_minimal( + enc20, + ctx, + &link_enc_feature, + &link_enc_regs[eng_id - ENGINE_ID_DIGA], + eng_id); + + return &enc20->enc10.base; +} + +static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dcn31_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dcn31_panel_cntl_construct(panel_cntl, init_data); + + return &panel_cntl->base; +} + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX), + FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); + +} + +static struct audio *dcn31_create_audio( + struct dc_context *ctx, unsigned int inst) +{ + return dce_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + +static struct vpg *dcn31_vpg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL); + + if (!vpg31) + return NULL; + + vpg31_construct(vpg31, ctx, inst, + &vpg_regs[inst], + &vpg_shift, + &vpg_mask); + + return &vpg31->base; +} + +static struct afmt *dcn31_afmt_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL); + + if (!afmt31) + return NULL; + + afmt31_construct(afmt31, ctx, inst, + &afmt_regs[inst], + &afmt_shift, + &afmt_mask); + + // Light sleep by default, no need to power down here + + return &afmt31->base; +} + +static struct apg *dcn31_apg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); + + if (!apg31) + return NULL; + + apg31_construct(apg31, ctx, inst, + &apg_regs[inst], + &apg_shift, + &apg_mask); + + return &apg31->base; +} + +static struct stream_encoder *dcn315_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn10_stream_encoder *enc1; + struct vpg *vpg; + struct afmt *afmt; + int vpg_inst; + int afmt_inst; + + /*PHYB is wired off in HW, allow front end to remapping, otherwise needs more changes*/ + + /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ + if (eng_id <= ENGINE_ID_DIGF) { + vpg_inst = eng_id; + afmt_inst = eng_id; + } else + return NULL; + + enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); + vpg = dcn31_vpg_create(ctx, vpg_inst); + afmt = dcn31_afmt_create(ctx, afmt_inst); + + if (!enc1 || !vpg || !afmt) { + kfree(enc1); + kfree(vpg); + kfree(afmt); + return NULL; + } + + dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, + eng_id, vpg, afmt, + &stream_enc_regs[eng_id], + &se_shift, &se_mask); + + return &enc1->base; +} + +static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; + struct vpg *vpg; + struct apg *apg; + uint32_t hpo_dp_inst; + uint32_t vpg_inst; + uint32_t apg_inst; + + ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); + hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; + + /* Mapping of VPG register blocks to HPO DP block instance: + * VPG[6] -> HPO_DP[0] + * VPG[7] -> HPO_DP[1] + * VPG[8] -> HPO_DP[2] + * VPG[9] -> HPO_DP[3] + */ + vpg_inst = hpo_dp_inst + 6; + + /* Mapping of APG register blocks to HPO DP block instance: + * APG[0] -> HPO_DP[0] + * APG[1] -> HPO_DP[1] + * APG[2] -> HPO_DP[2] + * APG[3] -> HPO_DP[3] + */ + apg_inst = hpo_dp_inst; + + /* allocate HPO stream encoder and create VPG sub-block */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); + vpg = dcn31_vpg_create(ctx, vpg_inst); + apg = dcn31_apg_create(ctx, apg_inst); + + if (!hpo_dp_enc31 || !vpg || !apg) { + kfree(hpo_dp_enc31); + kfree(vpg); + kfree(apg); + return NULL; + } + + dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, + hpo_dp_inst, eng_id, vpg, apg, + &hpo_dp_stream_enc_regs[hpo_dp_inst], + &hpo_dp_se_shift, &hpo_dp_se_mask); + + return &hpo_dp_enc31->base; +} + +static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( + uint8_t inst, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; + + /* allocate HPO link encoder */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + + hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, + &hpo_dp_link_enc_regs[inst], + &hpo_dp_le_shift, &hpo_dp_le_mask); + + return &hpo_dp_enc31->base; +} + +static struct dce_hwseq *dcn31_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + } + return hws; +} +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = dcn31_create_audio, + .create_stream_encoder = dcn315_stream_encoder_create, + .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, + .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, + .create_hwseq = dcn31_hwseq_create, +}; + +static void dcn315_resource_destruct(struct dcn315_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) { + if (pool->base.stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); + pool->base.stream_enc[i]->vpg = NULL; + } + if (pool->base.stream_enc[i]->afmt != NULL) { + kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); + pool->base.stream_enc[i]->afmt = NULL; + } + kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); + pool->base.stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { + if (pool->base.hpo_dp_stream_enc[i] != NULL) { + if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); + pool->base.hpo_dp_stream_enc[i]->vpg = NULL; + } + if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { + kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); + pool->base.hpo_dp_stream_enc[i]->apg = NULL; + } + kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); + pool->base.hpo_dp_stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { + if (pool->base.hpo_dp_link_enc[i] != NULL) { + kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); + pool->base.hpo_dp_link_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + if (pool->base.dscs[i] != NULL) + dcn20_dsc_destroy(&pool->base.dscs[i]); + } + + if (pool->base.mpc != NULL) { + kfree(TO_DCN20_MPC(pool->base.mpc)); + pool->base.mpc = NULL; + } + if (pool->base.hubbub != NULL) { + kfree(pool->base.hubbub); + pool->base.hubbub = NULL; + } + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.dpps[i] != NULL) + dcn31_dpp_destroy(&pool->base.dpps[i]); + + if (pool->base.ipps[i] != NULL) + pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.hubps[i] != NULL) { + kfree(TO_DCN20_HUBP(pool->base.hubps[i])); + pool->base.hubps[i] = NULL; + } + + if (pool->base.irqs != NULL) { + dal_irq_service_destroy(&pool->base.irqs); + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + if (pool->base.engines[i] != NULL) + dce110_engine_destroy(&pool->base.engines[i]); + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + if (pool->base.opps[i] != NULL) + pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.timing_generators[i] != NULL) { + kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dwb; i++) { + if (pool->base.dwbc[i] != NULL) { + kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); + pool->base.dwbc[i] = NULL; + } + if (pool->base.mcif_wb[i] != NULL) { + kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); + pool->base.mcif_wb[i] = NULL; + } + } + + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i]) + dce_aud_destroy(&pool->base.audios[i]); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) { + dcn20_clock_source_destroy(&pool->base.clock_sources[i]); + pool->base.clock_sources[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { + if (pool->base.mpc_lut[i] != NULL) { + dc_3dlut_func_release(pool->base.mpc_lut[i]); + pool->base.mpc_lut[i] = NULL; + } + if (pool->base.mpc_shaper[i] != NULL) { + dc_transfer_func_release(pool->base.mpc_shaper[i]); + pool->base.mpc_shaper[i] = NULL; + } + } + + if (pool->base.dp_clock_source != NULL) { + dcn20_clock_source_destroy(&pool->base.dp_clock_source); + pool->base.dp_clock_source = NULL; + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.multiple_abms[i] != NULL) + dce_abm_destroy(&pool->base.multiple_abms[i]); + } + + if (pool->base.psr != NULL) + dmub_psr_destroy(&pool->base.psr); + + if (pool->base.dccg != NULL) + dcn_dccg_destroy(&pool->base.dccg); +} + +static struct hubp *dcn31_hubp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn20_hubp *hubp2 = + kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); + + if (!hubp2) + return NULL; + + if (hubp31_construct(hubp2, ctx, inst, + &hubp_regs[inst], &hubp_shift, &hubp_mask)) + return &hubp2->base; + + BREAK_TO_DEBUGGER(); + kfree(hubp2); + return NULL; +} + +static bool dcn31_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), + GFP_KERNEL); + + if (!dwbc30) { + dm_error("DC: failed to create dwbc30!\n"); + return false; + } + + dcn30_dwbc_construct(dwbc30, ctx, + &dwbc30_regs[i], + &dwbc30_shift, + &dwbc30_mask, + i); + + pool->dwbc[i] = &dwbc30->base; + } + return true; +} + +static bool dcn31_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), + GFP_KERNEL); + + if (!mcif_wb30) { + dm_error("DC: failed to create mcif_wb30!\n"); + return false; + } + + dcn30_mmhubbub_construct(mcif_wb30, ctx, + &mcif_wb30_regs[i], + &mcif_wb30_shift, + &mcif_wb30_mask, + i); + + pool->mcif_wb[i] = &mcif_wb30->base; + } + return true; +} + +static struct display_stream_compressor *dcn31_dsc_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_dsc *dsc = + kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); + + if (!dsc) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); + return &dsc->base; +} + +static void dcn315_destroy_resource_pool(struct resource_pool **pool) +{ + struct dcn315_resource_pool *dcn31_pool = TO_DCN315_RES_POOL(*pool); + + dcn315_resource_destruct(dcn31_pool); + kfree(dcn31_pool); + *pool = NULL; +} + +static struct clock_source *dcn31_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dcn31_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + kfree(clk_src); + BREAK_TO_DEBUGGER(); + return NULL; +} + +static bool is_dual_plane(enum surface_pixel_format format) +{ + return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; +} + +static int source_format_to_bpp (enum source_format_class SourcePixelFormat) +{ + if (SourcePixelFormat == dm_444_64) + return 8; + else if (SourcePixelFormat == dm_444_16) + return 2; + else if (SourcePixelFormat == dm_444_8) + return 1; + else if (SourcePixelFormat == dm_rgbe_alpha) + return 5; + else if (SourcePixelFormat == dm_420_8) + return 3; + else if (SourcePixelFormat == dm_420_12) + return 6; + else + return 4; +} + +static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context) +{ + int i; + struct resource_context *res_ctx = &context->res_ctx; + + /* Only apply for dual stream scenarios with edp*/ + if (context->stream_count != 2) + return false; + if (context->streams[0]->signal != SIGNAL_TYPE_EDP && context->streams[1]->signal != SIGNAL_TYPE_EDP) + return false; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (!res_ctx->pipe_ctx[i].stream) + continue; + + /*Don't apply if scaling*/ + if (res_ctx->pipe_ctx[i].stream->src.width != res_ctx->pipe_ctx[i].stream->dst.width || + res_ctx->pipe_ctx[i].stream->src.height != res_ctx->pipe_ctx[i].stream->dst.height || + (res_ctx->pipe_ctx[i].plane_state && (res_ctx->pipe_ctx[i].plane_state->src_rect.width + != res_ctx->pipe_ctx[i].plane_state->dst_rect.width || + res_ctx->pipe_ctx[i].plane_state->src_rect.height + != res_ctx->pipe_ctx[i].plane_state->dst_rect.height))) + return false; + /*Don't apply if MPO to avoid transition issues*/ + if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state != res_ctx->pipe_ctx[i].plane_state) + return false; + } + return true; +} + +static int dcn315_populate_dml_pipes_from_context( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate) +{ + int i, pipe_cnt, crb_idx, crb_pipes; + struct resource_context *res_ctx = &context->res_ctx; + struct pipe_ctx *pipe = NULL; + const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB; + int remaining_det_segs = max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB; + bool pixel_rate_crb = allow_pixel_rate_crb(dc, context); + + DC_FP_START(); + dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + DC_FP_END(); + + for (i = 0, pipe_cnt = 0, crb_pipes = 0; i < dc->res_pool->pipe_count; i++) { + struct dc_crtc_timing *timing; + + if (!res_ctx->pipe_ctx[i].stream) + continue; + pipe = &res_ctx->pipe_ctx[i]; + timing = &pipe->stream->timing; + + /* + * Immediate flip can be set dynamically after enabling the plane. + * We need to require support for immediate flip or underflow can be + * intermittently experienced depending on peak b/w requirements. + */ + pipes[pipe_cnt].pipe.src.immediate_flip = true; + + pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; + pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; + pipes[pipe_cnt].pipe.src.dcc_rate = 3; + pipes[pipe_cnt].dout.dsc_input_bpc = 0; + DC_FP_START(); + dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt); + if (pixel_rate_crb && !pipe->top_pipe && !pipe->prev_odm_pipe) { + int bpp = source_format_to_bpp(pipes[pipe_cnt].pipe.src.source_format); + /* Ceil to crb segment size */ + int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate( + &context->bw_ctx.dml.soc, timing->pix_clk_100hz, bpp, DCN3_15_CRB_SEGMENT_SIZE_KB); + + if (approx_det_segs_required_for_pstate <= 2 * DCN3_15_MAX_DET_SEGS) { + bool split_required = approx_det_segs_required_for_pstate > DCN3_15_MAX_DET_SEGS; + split_required = split_required || timing->pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc); + split_required = split_required || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120); + + /* Minimum 2 segments to allow mpc/odm combine if its used later */ + if (approx_det_segs_required_for_pstate < 2) + approx_det_segs_required_for_pstate = 2; + if (split_required) + approx_det_segs_required_for_pstate += approx_det_segs_required_for_pstate % 2; + pipes[pipe_cnt].pipe.src.det_size_override = approx_det_segs_required_for_pstate; + remaining_det_segs -= approx_det_segs_required_for_pstate; + } else + remaining_det_segs = -1; + crb_pipes++; + } + DC_FP_END(); + + if (pipes[pipe_cnt].dout.dsc_enable) { + switch (timing->display_color_depth) { + case COLOR_DEPTH_888: + pipes[pipe_cnt].dout.dsc_input_bpc = 8; + break; + case COLOR_DEPTH_101010: + pipes[pipe_cnt].dout.dsc_input_bpc = 10; + break; + case COLOR_DEPTH_121212: + pipes[pipe_cnt].dout.dsc_input_bpc = 12; + break; + default: + ASSERT(0); + break; + } + } + pipe_cnt++; + } + + /* Spread remaining unreserved crb evenly among all pipes*/ + if (pixel_rate_crb) { + for (i = 0, pipe_cnt = 0, crb_idx = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &res_ctx->pipe_ctx[i]; + if (!pipe->stream) + continue; + + /* Do not use asymetric crb if not enough for pstate support */ + if (remaining_det_segs < 0) { + pipes[pipe_cnt].pipe.src.det_size_override = 0; + pipe_cnt++; + continue; + } + + if (!pipe->top_pipe && !pipe->prev_odm_pipe) { + bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc) + || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120); + + if (remaining_det_segs > MIN_RESERVED_DET_SEGS) + pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes + + (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0); + if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) { + /* Clamp to 2 pipe split max det segments */ + remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS); + pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS; + } + if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) { + /* If we are splitting we must have an even number of segments */ + remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2; + pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2; + } + /* Convert segments into size for DML use */ + pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB; + + crb_idx++; + } + pipe_cnt++; + } + } + + if (pipe_cnt) + context->bw_ctx.dml.ip.det_buffer_size_kbytes = + (max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / pipe_cnt) * DCN3_15_CRB_SEGMENT_SIZE_KB; + if (context->bw_ctx.dml.ip.det_buffer_size_kbytes > DCN3_15_MAX_DET_SIZE) + context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_15_MAX_DET_SIZE; + + dc->config.enable_4to1MPC = false; + if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { + if (is_dual_plane(pipe->plane_state->format) + && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) { + dc->config.enable_4to1MPC = true; + context->bw_ctx.dml.ip.det_buffer_size_kbytes = + (max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / 4) * DCN3_15_CRB_SEGMENT_SIZE_KB; + } else if (!is_dual_plane(pipe->plane_state->format) + && pipe->plane_state->src_rect.width <= 5120 + && pipe->stream->timing.pix_clk_100hz < dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)) { + /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */ + context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; + pipes[0].pipe.src.unbounded_req_mode = true; + } + } + + return pipe_cnt; +} + +static void dcn315_get_panel_config_defaults(struct dc_panel_config *panel_config) +{ + *panel_config = panel_config_defaults; +} + +static struct dc_cap_funcs cap_funcs = { + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap +}; + +static struct resource_funcs dcn315_res_pool_funcs = { + .destroy = dcn315_destroy_resource_pool, + .link_enc_create = dcn31_link_encoder_create, + .link_enc_create_minimal = dcn31_link_enc_create_minimal, + .link_encs_assign = link_enc_cfg_link_encs_assign, + .link_enc_unassign = link_enc_cfg_link_enc_unassign, + .panel_cntl_create = dcn31_panel_cntl_create, + .validate_bandwidth = dcn31_validate_bandwidth, + .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, + .update_soc_for_wm_a = dcn315_update_soc_for_wm_a, + .populate_dml_pipes = dcn315_populate_dml_pipes_from_context, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, + .release_pipe = dcn20_release_pipe, + .add_stream_to_ctx = dcn30_add_stream_to_ctx, + .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, + .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, + .populate_dml_writeback_from_context = dcn31_populate_dml_writeback_from_context, + .set_mcif_arb_params = dcn31_set_mcif_arb_params, + .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, + .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, + .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, + .update_bw_bounding_box = dcn315_update_bw_bounding_box, + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, + .get_panel_config_defaults = dcn315_get_panel_config_defaults, +}; + +static bool dcn315_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dcn315_resource_pool *pool) +{ + int i; + struct dc_context *ctx = dc->ctx; + struct irq_service_init_data init_data; + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap_dcn31; + + pool->base.funcs = &dcn315_res_pool_funcs; + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->base.pipe_count = pool->base.res_cap->num_timing_generator; + pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; + dc->caps.max_downscale_ratio = 600; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.i2c_speed_in_khz_hdcp = 100; + dc->caps.max_cursor_size = 256; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dmdata_alloc_size = 2048; + dc->caps.max_slave_planes = 2; + dc->caps.max_slave_yuv_planes = 2; + dc->caps.max_slave_rgb_planes = 2; + dc->caps.post_blend_color_processing = true; + dc->caps.force_dp_tps4_for_cp2520 = true; + if (dc->config.forceHBR2CP2520) + dc->caps.force_dp_tps4_for_cp2520 = false; + dc->caps.dp_hpo = true; + dc->caps.dp_hdmi21_pcon_support = true; + dc->caps.edp_dsc_support = true; + dc->caps.extended_aux_timeout_support = true; + dc->caps.dmcub_support = true; + dc->caps.is_apu = true; + + /* Color pipeline capabilities */ + dc->caps.color.dpp.dcn_arch = 1; + dc->caps.color.dpp.input_lut_shared = 0; + dc->caps.color.dpp.icsc = 1; + dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr + dc->caps.color.dpp.dgam_rom_caps.srgb = 1; + dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; + dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; + dc->caps.color.dpp.dgam_rom_caps.pq = 1; + dc->caps.color.dpp.dgam_rom_caps.hlg = 1; + dc->caps.color.dpp.post_csc = 1; + dc->caps.color.dpp.gamma_corr = 1; + dc->caps.color.dpp.dgam_rom_for_yuv = 0; + + dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.ogam_ram = 1; + // no OGAM ROM on DCN301 + dc->caps.color.dpp.ogam_rom_caps.srgb = 0; + dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; + dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.ogam_rom_caps.pq = 0; + dc->caps.color.dpp.ogam_rom_caps.hlg = 0; + dc->caps.color.dpp.ocsc = 0; + + dc->caps.color.mpc.gamut_remap = 1; + dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2 + dc->caps.color.mpc.ogam_ram = 1; + dc->caps.color.mpc.ogam_rom_caps.srgb = 0; + dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; + dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.mpc.ogam_rom_caps.pq = 0; + dc->caps.color.mpc.ogam_rom_caps.hlg = 0; + dc->caps.color.mpc.ocsc = 1; + + /* read VBIOS LTTPR caps */ + { + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + /* interop bit is implicit */ + { + dc->caps.vbios_lttpr_aware = true; + } + } + + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) + dc->debug = debug_defaults_drv; + + // Init the vm_helper + if (dc->vm_helper) + vm_helper_init(dc->vm_helper, 16); + + /************************************************* + * Create resources * + *************************************************/ + + /* Clock Sources for Pixel Clock*/ + pool->base.clock_sources[DCN31_CLK_SRC_PLL0] = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL1] = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs[2], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL4] = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL4, + &clk_src_regs[4], false); + + pool->base.clk_src_count = DCN30_CLK_SRC_TOTAL; + + /* todo: not reuse phy_pll registers */ + pool->base.dp_clock_source = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, + &clk_src_regs[0], true); + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* TODO: DCCG */ + pool->base.dccg = dccg31_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); + if (pool->base.dccg == NULL) { + dm_error("DC: failed to create dccg!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* TODO: IRQ */ + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dcn315_create(&init_data); + if (!pool->base.irqs) + goto create_fail; + + /* HUBBUB */ + pool->base.hubbub = dcn31_hubbub_create(ctx); + if (pool->base.hubbub == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create hubbub!\n"); + goto create_fail; + } + + /* HUBPs, DPPs, OPPs and TGs */ + for (i = 0; i < pool->base.pipe_count; i++) { + pool->base.hubps[i] = dcn31_hubp_create(ctx, i); + if (pool->base.hubps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create hubps!\n"); + goto create_fail; + } + + pool->base.dpps[i] = dcn31_dpp_create(ctx, i); + if (pool->base.dpps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create dpps!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + pool->base.opps[i] = dcn31_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + pool->base.timing_generators[i] = dcn31_timing_generator_create( + ctx, i); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + } + pool->base.timing_generator_count = i; + + /* PSR */ + pool->base.psr = dmub_psr_create(ctx); + if (pool->base.psr == NULL) { + dm_error("DC: failed to create psr obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* ABM */ + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + pool->base.multiple_abms[i] = dmub_abm_create(ctx, + &abm_regs[i], + &abm_shift, + &abm_mask); + if (pool->base.multiple_abms[i] == NULL) { + dm_error("DC: failed to create abm for pipe %d!\n", i); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* MPC and DSC */ + pool->base.mpc = dcn31_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut); + if (pool->base.mpc == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mpc!\n"); + goto create_fail; + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + pool->base.dscs[i] = dcn31_dsc_create(ctx, i); + if (pool->base.dscs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create display stream compressor %d!\n", i); + goto create_fail; + } + } + + /* DWB and MMHUBBUB */ + if (!dcn31_dwbc_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create dwbc!\n"); + goto create_fail; + } + + if (!dcn31_mmhubbub_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mcif_wb!\n"); + goto create_fail; + } + + /* AUX and I2C */ + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dcn31_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto create_fail; + } + pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create hw i2c!!\n"); + goto create_fail; + } + pool->base.sw_i2cs[i] = NULL; + } + + /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto create_fail; + + /* HW Sequencer and Plane caps */ + dcn31_hw_sequencer_construct(dc); + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->cap_funcs = cap_funcs; + + dc->dcn_ip->max_num_dpp = dcn3_15_ip.max_num_dpp; + + return true; + +create_fail: + + dcn315_resource_destruct(pool); + + return false; +} + +struct resource_pool *dcn315_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc) +{ + struct dcn315_resource_pool *pool = + kzalloc(sizeof(struct dcn315_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dcn315_resource_construct(init_data->num_virtual_links, dc, pool)) + return &pool->base; + + BREAK_TO_DEBUGGER(); + kfree(pool); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.h new file mode 100644 index 0000000000..22849eaa6f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.h @@ -0,0 +1,44 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DCN315_RESOURCE_H_ +#define _DCN315_RESOURCE_H_ + +#include "core_types.h" + +#define TO_DCN315_RES_POOL(pool)\ + container_of(pool, struct dcn315_resource_pool, base) + +extern struct _vcs_dpi_ip_params_st dcn3_15_ip; + +struct dcn315_resource_pool { + struct resource_pool base; +}; + +struct resource_pool *dcn315_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc); + +#endif /* _DCN315_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c new file mode 100644 index 0000000000..b9753d4606 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c @@ -0,0 +1,2038 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "dm_services.h" +#include "dc.h" + +#include "dcn31/dcn31_init.h" + +#include "resource.h" +#include "include/irq_service_interface.h" +#include "dcn316_resource.h" + +#include "dcn20/dcn20_resource.h" +#include "dcn30/dcn30_resource.h" +#include "dcn31/dcn31_resource.h" + +#include "dcn10/dcn10_ipp.h" +#include "dcn30/dcn30_hubbub.h" +#include "dcn31/dcn31_hubbub.h" +#include "dcn30/dcn30_mpc.h" +#include "dcn31/dcn31_hubp.h" +#include "irq/dcn31/irq_service_dcn31.h" +#include "dcn30/dcn30_dpp.h" +#include "dcn31/dcn31_optc.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn30/dcn30_hwseq.h" +#include "dce110/dce110_hwseq.h" +#include "dcn30/dcn30_opp.h" +#include "dcn20/dcn20_dsc.h" +#include "dcn30/dcn30_vpg.h" +#include "dcn30/dcn30_afmt.h" +#include "dcn30/dcn30_dio_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_link_encoder.h" +#include "dcn31/dcn31_apg.h" +#include "dcn31/dcn31_dio_link_encoder.h" +#include "dcn31/dcn31_vpg.h" +#include "dcn31/dcn31_afmt.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_audio.h" +#include "dce/dce_hwseq.h" +#include "clk_mgr.h" +#include "virtual/virtual_stream_encoder.h" +#include "dce110/dce110_resource.h" +#include "dml/display_mode_vba.h" +#include "dml/dcn31/dcn31_fpu.h" +#include "dcn31/dcn31_dccg.h" +#include "dcn10/dcn10_resource.h" +#include "dcn31/dcn31_panel_cntl.h" + +#include "dcn30/dcn30_dwb.h" +#include "dcn30/dcn30_mmhubbub.h" + +#include "dcn/dcn_3_1_6_offset.h" +#include "dcn/dcn_3_1_6_sh_mask.h" +#include "dpcs/dpcs_4_2_3_offset.h" +#include "dpcs/dpcs_4_2_3_sh_mask.h" + +#define regBIF_BX1_BIOS_SCRATCH_2 0x003a +#define regBIF_BX1_BIOS_SCRATCH_2_BASE_IDX 1 +#define regBIF_BX1_BIOS_SCRATCH_3 0x003b +#define regBIF_BX1_BIOS_SCRATCH_3_BASE_IDX 1 +#define regBIF_BX1_BIOS_SCRATCH_6 0x003e +#define regBIF_BX1_BIOS_SCRATCH_6_BASE_IDX 1 + +#define regDCHUBBUB_DEBUG_CTRL_0 0x04d6 +#define regDCHUBBUB_DEBUG_CTRL_0_BASE_IDX 2 +#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10 +#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x01FF0000L + +#define DCN_BASE__INST0_SEG0 0x00000012 +#define DCN_BASE__INST0_SEG1 0x000000C0 +#define DCN_BASE__INST0_SEG2 0x000034C0 +#define DCN_BASE__INST0_SEG3 0x00009000 +#define DCN_BASE__INST0_SEG4 0x02403C00 +#define DCN_BASE__INST0_SEG5 0 + +#define DPCS_BASE__INST0_SEG0 0x00000012 +#define DPCS_BASE__INST0_SEG1 0x000000C0 +#define DPCS_BASE__INST0_SEG2 0x000034C0 +#define DPCS_BASE__INST0_SEG3 0x00009000 +#define DPCS_BASE__INST0_SEG4 0x02403C00 +#define DPCS_BASE__INST0_SEG5 0 + +#define NBIO_BASE__INST0_SEG0 0x00000000 +#define NBIO_BASE__INST0_SEG1 0x00000014 +#define NBIO_BASE__INST0_SEG2 0x00000D20 +#define NBIO_BASE__INST0_SEG3 0x00010400 +#define NBIO_BASE__INST0_SEG4 0x0241B000 +#define NBIO_BASE__INST0_SEG5 0x04040000 + +#include "reg_helper.h" +#include "dce/dmub_abm.h" +#include "dce/dmub_psr.h" +#include "dce/dce_aux.h" +#include "dce/dce_i2c.h" + +#include "dml/dcn30/display_mode_vba_30.h" +#include "vm_helper.h" +#include "dcn20/dcn20_vmid.h" + +#include "link_enc_cfg.h" + +#define DCN3_16_MAX_DET_SIZE 384 +#define DCN3_16_MIN_COMPBUF_SIZE_KB 128 +#define DCN3_16_CRB_SEGMENT_SIZE_KB 64 + +enum dcn31_clk_src_array_id { + DCN31_CLK_SRC_PLL0, + DCN31_CLK_SRC_PLL1, + DCN31_CLK_SRC_PLL2, + DCN31_CLK_SRC_PLL3, + DCN31_CLK_SRC_PLL4, + DCN30_CLK_SRC_TOTAL +}; + +/* begin ********************* + * macros to expend register list macro defined in HW object header file + */ + +/* DCN */ +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define SRI(reg_name, block, id)\ + .reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRI2(reg_name, block, id)\ + .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define SRIR(var_name, reg_name, block, id)\ + .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII(reg_name, block, id)\ + .reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_MPC_RMU(reg_name, block, id)\ + .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_DWB(reg_name, temp_name, block, id)\ + .reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## temp_name + +#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define DCCG_SRII(reg_name, block, id)\ + .block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define VUPDATE_SRII(reg_name, block, id)\ + .reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + reg ## reg_name ## _ ## block ## id + +/* NBIO */ +#define NBIO_BASE_INNER(seg) \ + NBIO_BASE__INST0_SEG ## seg + +#define NBIO_BASE(seg) \ + NBIO_BASE_INNER(seg) + +#define NBIO_SR(reg_name)\ + .reg_name = NBIO_BASE(regBIF_BX1_ ## reg_name ## _BASE_IDX) + \ + regBIF_BX1_ ## reg_name + +static const struct bios_registers bios_regs = { + NBIO_SR(BIOS_SCRATCH_3), + NBIO_SR(BIOS_SCRATCH_6) +}; + +#define clk_src_regs(index, pllid)\ +[index] = {\ + CS_COMMON_REG_LIST_DCN3_0(index, pllid),\ +} + +static const struct dce110_clk_src_regs clk_src_regs[] = { + clk_src_regs(0, A), + clk_src_regs(1, B), + clk_src_regs(2, C), + clk_src_regs(3, D), + clk_src_regs(4, E) +}; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) +}; + +#define abm_regs(id)\ +[id] = {\ + ABM_DCN302_REG_LIST(id)\ +} + +static const struct dce_abm_registers abm_regs[] = { + abm_regs(0), + abm_regs(1), + abm_regs(2), + abm_regs(3), +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCN30(_MASK) +}; + +#define audio_regs(id)\ +[id] = {\ + AUD_COMMON_REG_LIST(id)\ +} + +static const struct dce_audio_registers audio_regs[] = { + audio_regs(0), + audio_regs(1), + audio_regs(2), + audio_regs(3), + audio_regs(4), + audio_regs(5), + audio_regs(6) +}; + +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ + AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) + +static const struct dce_audio_shift audio_shift = { + DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +#define vpg_regs(id)\ +[id] = {\ + VPG_DCN31_REG_LIST(id)\ +} + +static const struct dcn31_vpg_registers vpg_regs[] = { + vpg_regs(0), + vpg_regs(1), + vpg_regs(2), + vpg_regs(3), + vpg_regs(4), + vpg_regs(5), + vpg_regs(6), + vpg_regs(7), + vpg_regs(8), + vpg_regs(9), +}; + +static const struct dcn31_vpg_shift vpg_shift = { + DCN31_VPG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_vpg_mask vpg_mask = { + DCN31_VPG_MASK_SH_LIST(_MASK) +}; + +#define afmt_regs(id)\ +[id] = {\ + AFMT_DCN31_REG_LIST(id)\ +} + +static const struct dcn31_afmt_registers afmt_regs[] = { + afmt_regs(0), + afmt_regs(1), + afmt_regs(2), + afmt_regs(3), + afmt_regs(4), + afmt_regs(5) +}; + +static const struct dcn31_afmt_shift afmt_shift = { + DCN31_AFMT_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_afmt_mask afmt_mask = { + DCN31_AFMT_MASK_SH_LIST(_MASK) +}; + + +#define apg_regs(id)\ +[id] = {\ + APG_DCN31_REG_LIST(id)\ +} + +static const struct dcn31_apg_registers apg_regs[] = { + apg_regs(0), + apg_regs(1), + apg_regs(2), + apg_regs(3) +}; + +static const struct dcn31_apg_shift apg_shift = { + DCN31_APG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_apg_mask apg_mask = { + DCN31_APG_MASK_SH_LIST(_MASK) +}; + + +#define stream_enc_regs(id)\ +[id] = {\ + SE_DCN3_REG_LIST(id)\ +} + +static const struct dcn10_stream_enc_registers stream_enc_regs[] = { + stream_enc_regs(0), + stream_enc_regs(1), + stream_enc_regs(2), + stream_enc_regs(3), + stream_enc_regs(4) +}; + +static const struct dcn10_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn10_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + + +#define aux_regs(id)\ +[id] = {\ + DCN2_AUX_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), + aux_regs(2), + aux_regs(3), + aux_regs(4) +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), + hpd_regs(4) +}; + +#define link_regs(id, phyid)\ +[id] = {\ + LE_DCN31_REG_LIST(id), \ + UNIPHY_DCN2_REG_LIST(phyid), \ + DPCS_DCN31_REG_LIST(id), \ +} + +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +static const struct dcn10_link_enc_registers link_enc_regs[] = { + link_regs(0, A), + link_regs(1, B), + link_regs(2, C), + link_regs(3, D), + link_regs(4, E) +}; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \ + DPCS_DCN31_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), \ + DPCS_DCN31_MASK_SH_LIST(_MASK) +}; + + + +#define hpo_dp_stream_encoder_reg_list(id)\ +[id] = {\ + DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\ +} + +static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = { + hpo_dp_stream_encoder_reg_list(0), + hpo_dp_stream_encoder_reg_list(1), + hpo_dp_stream_encoder_reg_list(2), + hpo_dp_stream_encoder_reg_list(3), +}; + +static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) +}; + + +#define hpo_dp_link_encoder_reg_list(id)\ +[id] = {\ + DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\ + DCN3_1_RDPCSTX_REG_LIST(0),\ + DCN3_1_RDPCSTX_REG_LIST(1),\ + DCN3_1_RDPCSTX_REG_LIST(2),\ + DCN3_1_RDPCSTX_REG_LIST(3),\ + DCN3_1_RDPCSTX_REG_LIST(4)\ +} + +static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = { + hpo_dp_link_encoder_reg_list(0), + hpo_dp_link_encoder_reg_list(1), +}; + +static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { + DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { + DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK) +}; + + +#define dpp_regs(id)\ +[id] = {\ + DPP_REG_LIST_DCN30(id),\ +} + +static const struct dcn3_dpp_registers dpp_regs[] = { + dpp_regs(0), + dpp_regs(1), + dpp_regs(2), + dpp_regs(3) +}; + +static const struct dcn3_dpp_shift tf_shift = { + DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) +}; + +static const struct dcn3_dpp_mask tf_mask = { + DPP_REG_LIST_SH_MASK_DCN30(_MASK) +}; + +#define opp_regs(id)\ +[id] = {\ + OPP_REG_LIST_DCN30(id),\ +} + +static const struct dcn20_opp_registers opp_regs[] = { + opp_regs(0), + opp_regs(1), + opp_regs(2), + opp_regs(3) +}; + +static const struct dcn20_opp_shift opp_shift = { + OPP_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn20_opp_mask opp_mask = { + OPP_MASK_SH_LIST_DCN20(_MASK) +}; + +#define aux_engine_regs(id)\ +[id] = {\ + AUX_COMMON_REG_LIST0(id), \ + .AUXN_IMPCAL = 0, \ + .AUXP_IMPCAL = 0, \ + .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ +} + +static const struct dce110_aux_registers aux_engine_regs[] = { + aux_engine_regs(0), + aux_engine_regs(1), + aux_engine_regs(2), + aux_engine_regs(3), + aux_engine_regs(4) +}; + +#define dwbc_regs_dcn3(id)\ +[id] = {\ + DWBC_COMMON_REG_LIST_DCN30(id),\ +} + +static const struct dcn30_dwbc_registers dwbc30_regs[] = { + dwbc_regs_dcn3(0), +}; + +static const struct dcn30_dwbc_shift dwbc30_shift = { + DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_dwbc_mask dwbc30_mask = { + DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define mcif_wb_regs_dcn3(id)\ +[id] = {\ + MCIF_WB_COMMON_REG_LIST_DCN30(id),\ +} + +static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { + mcif_wb_regs_dcn3(0) +}; + +static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define dsc_regsDCN20(id)\ +[id] = {\ + DSC_REG_LIST_DCN20(id)\ +} + +static const struct dcn20_dsc_registers dsc_regs[] = { + dsc_regsDCN20(0), + dsc_regsDCN20(1), + dsc_regsDCN20(2) +}; + +static const struct dcn20_dsc_shift dsc_shift = { + DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) +}; + +static const struct dcn20_dsc_mask dsc_mask = { + DSC_REG_LIST_SH_MASK_DCN20(_MASK) +}; + +static const struct dcn30_mpc_registers mpc_regs = { + MPC_REG_LIST_DCN3_0(0), + MPC_REG_LIST_DCN3_0(1), + MPC_REG_LIST_DCN3_0(2), + MPC_REG_LIST_DCN3_0(3), + MPC_OUT_MUX_REG_LIST_DCN3_0(0), + MPC_OUT_MUX_REG_LIST_DCN3_0(1), + MPC_OUT_MUX_REG_LIST_DCN3_0(2), + MPC_OUT_MUX_REG_LIST_DCN3_0(3), + MPC_RMU_GLOBAL_REG_LIST_DCN3AG, + MPC_RMU_REG_LIST_DCN3AG(0), + MPC_RMU_REG_LIST_DCN3AG(1), + //MPC_RMU_REG_LIST_DCN3AG(2), + MPC_DWB_MUX_REG_LIST_DCN3_0(0), +}; + +static const struct dcn30_mpc_shift mpc_shift = { + MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_mpc_mask mpc_mask = { + MPC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define optc_regs(id)\ +[id] = {OPTC_COMMON_REG_LIST_DCN3_1(id)} + +static const struct dcn_optc_registers optc_regs[] = { + optc_regs(0), + optc_regs(1), + optc_regs(2), + optc_regs(3) +}; + +static const struct dcn_optc_shift optc_shift = { + OPTC_COMMON_MASK_SH_LIST_DCN3_1(__SHIFT) +}; + +static const struct dcn_optc_mask optc_mask = { + OPTC_COMMON_MASK_SH_LIST_DCN3_1(_MASK) +}; + +#define hubp_regs(id)\ +[id] = {\ + HUBP_REG_LIST_DCN30(id)\ +} + +static const struct dcn_hubp2_registers hubp_regs[] = { + hubp_regs(0), + hubp_regs(1), + hubp_regs(2), + hubp_regs(3) +}; + + +static const struct dcn_hubp2_shift hubp_shift = { + HUBP_MASK_SH_LIST_DCN31(__SHIFT) +}; + +static const struct dcn_hubp2_mask hubp_mask = { + HUBP_MASK_SH_LIST_DCN31(_MASK) +}; +static const struct dcn_hubbub_registers hubbub_reg = { + HUBBUB_REG_LIST_DCN31(0) +}; + +static const struct dcn_hubbub_shift hubbub_shift = { + HUBBUB_MASK_SH_LIST_DCN31(__SHIFT) +}; + +static const struct dcn_hubbub_mask hubbub_mask = { + HUBBUB_MASK_SH_LIST_DCN31(_MASK) +}; + +static const struct dccg_registers dccg_regs = { + DCCG_REG_LIST_DCN31() +}; + +static const struct dccg_shift dccg_shift = { + DCCG_MASK_SH_LIST_DCN31(__SHIFT) +}; + +static const struct dccg_mask dccg_mask = { + DCCG_MASK_SH_LIST_DCN31(_MASK) +}; + + +#define SRII2(reg_name_pre, reg_name_post, id)\ + .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \ + ## id ## _ ## reg_name_post ## _BASE_IDX) + \ + reg ## reg_name_pre ## id ## _ ## reg_name_post + + +#define HWSEQ_DCN31_REG_LIST()\ + SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ + SR(DCHUBBUB_ARB_HOSTVM_CNTL), \ + SR(DIO_MEM_PWR_CTRL), \ + SR(ODM_MEM_PWR_CTRL3), \ + SR(DMU_MEM_PWR_CNTL), \ + SR(MMHUBBUB_MEM_PWR_CNTL), \ + SR(DCCG_GATE_DISABLE_CNTL), \ + SR(DCCG_GATE_DISABLE_CNTL2), \ + SR(DCFCLK_CNTL),\ + SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ + SRII(PIXEL_RATE_CNTL, OTG, 0), \ + SRII(PIXEL_RATE_CNTL, OTG, 1),\ + SRII(PIXEL_RATE_CNTL, OTG, 2),\ + SRII(PIXEL_RATE_CNTL, OTG, 3),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\ + SR(MICROSECOND_TIME_BASE_DIV), \ + SR(MILLISECOND_TIME_BASE_DIV), \ + SR(DISPCLK_FREQ_CHANGE_CNTL), \ + SR(RBBMIF_TIMEOUT_DIS), \ + SR(RBBMIF_TIMEOUT_DIS_2), \ + SR(DCHUBBUB_CRC_CTRL), \ + SR(DPP_TOP0_DPP_CRC_CTRL), \ + SR(DPP_TOP0_DPP_CRC_VAL_B_A), \ + SR(DPP_TOP0_DPP_CRC_VAL_R_G), \ + SR(MPC_CRC_CTRL), \ + SR(MPC_CRC_RESULT_GB), \ + SR(MPC_CRC_RESULT_C), \ + SR(MPC_CRC_RESULT_AR), \ + SR(DOMAIN0_PG_CONFIG), \ + SR(DOMAIN1_PG_CONFIG), \ + SR(DOMAIN2_PG_CONFIG), \ + SR(DOMAIN3_PG_CONFIG), \ + SR(DOMAIN16_PG_CONFIG), \ + SR(DOMAIN17_PG_CONFIG), \ + SR(DOMAIN18_PG_CONFIG), \ + SR(DOMAIN0_PG_STATUS), \ + SR(DOMAIN1_PG_STATUS), \ + SR(DOMAIN2_PG_STATUS), \ + SR(DOMAIN3_PG_STATUS), \ + SR(DOMAIN16_PG_STATUS), \ + SR(DOMAIN17_PG_STATUS), \ + SR(DOMAIN18_PG_STATUS), \ + SR(D1VGA_CONTROL), \ + SR(D2VGA_CONTROL), \ + SR(D3VGA_CONTROL), \ + SR(D4VGA_CONTROL), \ + SR(D5VGA_CONTROL), \ + SR(D6VGA_CONTROL), \ + SR(DC_IP_REQUEST_CNTL), \ + SR(AZALIA_AUDIO_DTO), \ + SR(AZALIA_CONTROLLER_CLOCK_GATING), \ + SR(HPO_TOP_HW_CONTROL) + +static const struct dce_hwseq_registers hwseq_reg = { + HWSEQ_DCN31_REG_LIST() +}; + +#define HWSEQ_DCN31_MASK_SH_LIST(mask_sh)\ + HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ + HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ + HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \ + HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ + HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ + HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \ + HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \ + HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ + HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ + HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \ + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \ + HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh) + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCN31_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCN31_MASK_SH_LIST(_MASK) +}; +#define vmid_regs(id)\ +[id] = {\ + DCN20_VMID_REG_LIST(id)\ +} + +static const struct dcn_vmid_registers vmid_regs[] = { + vmid_regs(0), + vmid_regs(1), + vmid_regs(2), + vmid_regs(3), + vmid_regs(4), + vmid_regs(5), + vmid_regs(6), + vmid_regs(7), + vmid_regs(8), + vmid_regs(9), + vmid_regs(10), + vmid_regs(11), + vmid_regs(12), + vmid_regs(13), + vmid_regs(14), + vmid_regs(15) +}; + +static const struct dcn20_vmid_shift vmid_shifts = { + DCN20_VMID_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn20_vmid_mask vmid_masks = { + DCN20_VMID_MASK_SH_LIST(_MASK) +}; + +static const struct resource_caps res_cap_dcn31 = { + .num_timing_generator = 4, + .num_opp = 4, + .num_video_plane = 4, + .num_audio = 5, + .num_stream_encoder = 5, + .num_dig_link_enc = 5, + .num_hpo_dp_stream_encoder = 4, + .num_hpo_dp_link_encoder = 2, + .num_pll = 5, + .num_dwb = 1, + .num_ddc = 5, + .num_vmid = 16, + .num_mpc_3dlut = 2, + .num_dsc = 3, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCN_UNIVERSAL, + .per_pixel_alpha = true, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = true, + .fp16 = true, + .p010 = true, + .ayuv = false, + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 16000, + .fp16 = 16000 + }, + + // 6:1 downscaling ratio: 1000/6 = 166.666 + .max_downscale_factor = { + .argb8888 = 167, + .nv12 = 167, + .fp16 = 167 + }, + 64, + 64 +}; + +static const struct dc_debug_options debug_defaults_drv = { + .disable_z10 = true, /*hw not support it*/ + .disable_dmcu = true, + .force_abm_enable = false, + .timing_trace = false, + .clock_trace = true, + .disable_pplib_clock_request = false, + .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .vsr_support = true, + .performance_trace = false, + .max_downscale_src_width = 4096,/*upto true 4k*/ + .disable_pplib_wm_range = false, + .scl_reset_length10 = true, + .sanity_checks = false, + .underflow_assert_delay_us = 0xFFFFFFFF, + .dwb_fi_phase = -1, // -1 = disable, + .dmub_command_table = true, + .pstate_enabled = true, + .use_max_lb = true, + .enable_mem_low_power = { + .bits = { + .vga = true, + .i2c = true, + .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled + .dscl = true, + .cm = true, + .mpc = true, + .optc = true, + .vpg = true, + .afmt = true, + } + }, + .enable_legacy_fast_update = true, + .using_dml2 = false, +}; + +static const struct dc_panel_config panel_config_defaults = { + .psr = { + .disable_psr = false, + .disallow_psrsu = false, + .disallow_replay = false, + }, + .ilr = { + .optimize_edp_link_rate = true, + }, +}; + +static void dcn31_dpp_destroy(struct dpp **dpp) +{ + kfree(TO_DCN20_DPP(*dpp)); + *dpp = NULL; +} + +static struct dpp *dcn31_dpp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn3_dpp *dpp = + kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); + + if (!dpp) + return NULL; + + if (dpp3_construct(dpp, ctx, inst, + &dpp_regs[inst], &tf_shift, &tf_mask)) + return &dpp->base; + + BREAK_TO_DEBUGGER(); + kfree(dpp); + return NULL; +} + +static struct output_pixel_processor *dcn31_opp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_opp *opp = + kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); + + if (!opp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dcn20_opp_construct(opp, ctx, inst, + &opp_regs[inst], &opp_shift, &opp_mask); + return &opp->base; +} + +static struct dce_aux *dcn31_aux_engine_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), + i2c_inst_regs(3), + i2c_inst_regs(4), + i2c_inst_regs(5), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +static struct dce_i2c_hw *dcn31_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} +static struct mpc *dcn31_mpc_create( + struct dc_context *ctx, + int num_mpcc, + int num_rmu) +{ + struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), + GFP_KERNEL); + + if (!mpc30) + return NULL; + + dcn30_mpc_construct(mpc30, ctx, + &mpc_regs, + &mpc_shift, + &mpc_mask, + num_mpcc, + num_rmu); + + return &mpc30->base; +} + +static struct hubbub *dcn31_hubbub_create(struct dc_context *ctx) +{ + int i; + + struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), + GFP_KERNEL); + + if (!hubbub3) + return NULL; + + hubbub31_construct(hubbub3, ctx, + &hubbub_reg, + &hubbub_shift, + &hubbub_mask, + dcn3_16_ip.det_buffer_size_kbytes, + dcn3_16_ip.pixel_chunk_size_kbytes, + dcn3_16_ip.config_return_buffer_size_in_kbytes); + + + for (i = 0; i < res_cap_dcn31.num_vmid; i++) { + struct dcn20_vmid *vmid = &hubbub3->vmid[i]; + + vmid->ctx = ctx; + + vmid->regs = &vmid_regs[i]; + vmid->shifts = &vmid_shifts; + vmid->masks = &vmid_masks; + } + + return &hubbub3->base; +} + +static struct timing_generator *dcn31_timing_generator_create( + struct dc_context *ctx, + uint32_t instance) +{ + struct optc *tgn10 = + kzalloc(sizeof(struct optc), GFP_KERNEL); + + if (!tgn10) + return NULL; + + tgn10->base.inst = instance; + tgn10->base.ctx = ctx; + + tgn10->tg_regs = &optc_regs[instance]; + tgn10->tg_shift = &optc_shift; + tgn10->tg_mask = &optc_mask; + + dcn31_timing_generator_init(tgn10); + + return &tgn10->base; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .fec_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + +static struct link_encoder *dcn31_link_encoder_create( + struct dc_context *ctx, + const struct encoder_init_data *enc_init_data) +{ + struct dcn20_link_encoder *enc20 = + kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + + if (!enc20) + return NULL; + + dcn31_link_encoder_construct(enc20, + enc_init_data, + &link_enc_feature, + &link_enc_regs[enc_init_data->transmitter], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], + &le_shift, + &le_mask); + + return &enc20->enc10.base; +} + +/* Create a minimal link encoder object not associated with a particular + * physical connector. + * resource_funcs.link_enc_create_minimal + */ +static struct link_encoder *dcn31_link_enc_create_minimal( + struct dc_context *ctx, enum engine_id eng_id) +{ + struct dcn20_link_encoder *enc20; + + if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc) + return NULL; + + enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + if (!enc20) + return NULL; + + dcn31_link_encoder_construct_minimal( + enc20, + ctx, + &link_enc_feature, + &link_enc_regs[eng_id - ENGINE_ID_DIGA], + eng_id); + + return &enc20->enc10.base; +} + +static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dcn31_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dcn31_panel_cntl_construct(panel_cntl, init_data); + + return &panel_cntl->base; +} + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX), + FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); + +} + +static struct audio *dcn31_create_audio( + struct dc_context *ctx, unsigned int inst) +{ + return dce_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + +static struct vpg *dcn31_vpg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL); + + if (!vpg31) + return NULL; + + vpg31_construct(vpg31, ctx, inst, + &vpg_regs[inst], + &vpg_shift, + &vpg_mask); + + return &vpg31->base; +} + +static struct afmt *dcn31_afmt_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL); + + if (!afmt31) + return NULL; + + afmt31_construct(afmt31, ctx, inst, + &afmt_regs[inst], + &afmt_shift, + &afmt_mask); + + // Light sleep by default, no need to power down here + + return &afmt31->base; +} + + +static struct apg *dcn31_apg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); + + if (!apg31) + return NULL; + + apg31_construct(apg31, ctx, inst, + &apg_regs[inst], + &apg_shift, + &apg_mask); + + return &apg31->base; +} + + +static struct stream_encoder *dcn316_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn10_stream_encoder *enc1; + struct vpg *vpg; + struct afmt *afmt; + int vpg_inst; + int afmt_inst; + + /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ + if (eng_id <= ENGINE_ID_DIGF) { + vpg_inst = eng_id; + afmt_inst = eng_id; + } else + return NULL; + + enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); + vpg = dcn31_vpg_create(ctx, vpg_inst); + afmt = dcn31_afmt_create(ctx, afmt_inst); + + if (!enc1 || !vpg || !afmt) { + kfree(enc1); + kfree(vpg); + kfree(afmt); + return NULL; + } + + dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, + eng_id, vpg, afmt, + &stream_enc_regs[eng_id], + &se_shift, &se_mask); + + return &enc1->base; +} + + +static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; + struct vpg *vpg; + struct apg *apg; + uint32_t hpo_dp_inst; + uint32_t vpg_inst; + uint32_t apg_inst; + + ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); + hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; + + /* Mapping of VPG register blocks to HPO DP block instance: + * VPG[6] -> HPO_DP[0] + * VPG[7] -> HPO_DP[1] + * VPG[8] -> HPO_DP[2] + * VPG[9] -> HPO_DP[3] + */ + vpg_inst = hpo_dp_inst + 6; + + /* Mapping of APG register blocks to HPO DP block instance: + * APG[0] -> HPO_DP[0] + * APG[1] -> HPO_DP[1] + * APG[2] -> HPO_DP[2] + * APG[3] -> HPO_DP[3] + */ + apg_inst = hpo_dp_inst; + + /* allocate HPO stream encoder and create VPG sub-block */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); + vpg = dcn31_vpg_create(ctx, vpg_inst); + apg = dcn31_apg_create(ctx, apg_inst); + + if (!hpo_dp_enc31 || !vpg || !apg) { + kfree(hpo_dp_enc31); + kfree(vpg); + kfree(apg); + return NULL; + } + + dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, + hpo_dp_inst, eng_id, vpg, apg, + &hpo_dp_stream_enc_regs[hpo_dp_inst], + &hpo_dp_se_shift, &hpo_dp_se_mask); + + return &hpo_dp_enc31->base; +} + +static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( + uint8_t inst, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; + + /* allocate HPO link encoder */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + + hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, + &hpo_dp_link_enc_regs[inst], + &hpo_dp_le_shift, &hpo_dp_le_mask); + + return &hpo_dp_enc31->base; +} + + +static struct dce_hwseq *dcn31_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + } + return hws; +} +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = dcn31_create_audio, + .create_stream_encoder = dcn316_stream_encoder_create, + .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, + .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, + .create_hwseq = dcn31_hwseq_create, +}; + +static void dcn316_resource_destruct(struct dcn316_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) { + if (pool->base.stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); + pool->base.stream_enc[i]->vpg = NULL; + } + if (pool->base.stream_enc[i]->afmt != NULL) { + kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); + pool->base.stream_enc[i]->afmt = NULL; + } + kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); + pool->base.stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { + if (pool->base.hpo_dp_stream_enc[i] != NULL) { + if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); + pool->base.hpo_dp_stream_enc[i]->vpg = NULL; + } + if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { + kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); + pool->base.hpo_dp_stream_enc[i]->apg = NULL; + } + kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); + pool->base.hpo_dp_stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { + if (pool->base.hpo_dp_link_enc[i] != NULL) { + kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); + pool->base.hpo_dp_link_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + if (pool->base.dscs[i] != NULL) + dcn20_dsc_destroy(&pool->base.dscs[i]); + } + + if (pool->base.mpc != NULL) { + kfree(TO_DCN20_MPC(pool->base.mpc)); + pool->base.mpc = NULL; + } + if (pool->base.hubbub != NULL) { + kfree(pool->base.hubbub); + pool->base.hubbub = NULL; + } + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.dpps[i] != NULL) + dcn31_dpp_destroy(&pool->base.dpps[i]); + + if (pool->base.ipps[i] != NULL) + pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.hubps[i] != NULL) { + kfree(TO_DCN20_HUBP(pool->base.hubps[i])); + pool->base.hubps[i] = NULL; + } + + if (pool->base.irqs != NULL) { + dal_irq_service_destroy(&pool->base.irqs); + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + if (pool->base.engines[i] != NULL) + dce110_engine_destroy(&pool->base.engines[i]); + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + if (pool->base.opps[i] != NULL) + pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.timing_generators[i] != NULL) { + kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dwb; i++) { + if (pool->base.dwbc[i] != NULL) { + kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); + pool->base.dwbc[i] = NULL; + } + if (pool->base.mcif_wb[i] != NULL) { + kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); + pool->base.mcif_wb[i] = NULL; + } + } + + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i]) + dce_aud_destroy(&pool->base.audios[i]); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) { + dcn20_clock_source_destroy(&pool->base.clock_sources[i]); + pool->base.clock_sources[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { + if (pool->base.mpc_lut[i] != NULL) { + dc_3dlut_func_release(pool->base.mpc_lut[i]); + pool->base.mpc_lut[i] = NULL; + } + if (pool->base.mpc_shaper[i] != NULL) { + dc_transfer_func_release(pool->base.mpc_shaper[i]); + pool->base.mpc_shaper[i] = NULL; + } + } + + if (pool->base.dp_clock_source != NULL) { + dcn20_clock_source_destroy(&pool->base.dp_clock_source); + pool->base.dp_clock_source = NULL; + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.multiple_abms[i] != NULL) + dce_abm_destroy(&pool->base.multiple_abms[i]); + } + + if (pool->base.psr != NULL) + dmub_psr_destroy(&pool->base.psr); + + if (pool->base.dccg != NULL) + dcn_dccg_destroy(&pool->base.dccg); +} + +static struct hubp *dcn31_hubp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn20_hubp *hubp2 = + kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); + + if (!hubp2) + return NULL; + + if (hubp31_construct(hubp2, ctx, inst, + &hubp_regs[inst], &hubp_shift, &hubp_mask)) + return &hubp2->base; + + BREAK_TO_DEBUGGER(); + kfree(hubp2); + return NULL; +} + +static bool dcn31_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), + GFP_KERNEL); + + if (!dwbc30) { + dm_error("DC: failed to create dwbc30!\n"); + return false; + } + + dcn30_dwbc_construct(dwbc30, ctx, + &dwbc30_regs[i], + &dwbc30_shift, + &dwbc30_mask, + i); + + pool->dwbc[i] = &dwbc30->base; + } + return true; +} + +static bool dcn31_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), + GFP_KERNEL); + + if (!mcif_wb30) { + dm_error("DC: failed to create mcif_wb30!\n"); + return false; + } + + dcn30_mmhubbub_construct(mcif_wb30, ctx, + &mcif_wb30_regs[i], + &mcif_wb30_shift, + &mcif_wb30_mask, + i); + + pool->mcif_wb[i] = &mcif_wb30->base; + } + return true; +} + +static struct display_stream_compressor *dcn31_dsc_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_dsc *dsc = + kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); + + if (!dsc) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); + return &dsc->base; +} + +static void dcn316_destroy_resource_pool(struct resource_pool **pool) +{ + struct dcn316_resource_pool *dcn31_pool = TO_DCN316_RES_POOL(*pool); + + dcn316_resource_destruct(dcn31_pool); + kfree(dcn31_pool); + *pool = NULL; +} + +static struct clock_source *dcn31_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dcn31_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + kfree(clk_src); + + BREAK_TO_DEBUGGER(); + return NULL; +} + +static bool is_dual_plane(enum surface_pixel_format format) +{ + return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; +} + +static int dcn316_populate_dml_pipes_from_context( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate) +{ + int i, pipe_cnt; + struct resource_context *res_ctx = &context->res_ctx; + struct pipe_ctx *pipe; + const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_16_MIN_COMPBUF_SIZE_KB; + + DC_FP_START(); + dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + DC_FP_END(); + + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + struct dc_crtc_timing *timing; + + if (!res_ctx->pipe_ctx[i].stream) + continue; + pipe = &res_ctx->pipe_ctx[i]; + timing = &pipe->stream->timing; + + /* + * Immediate flip can be set dynamically after enabling the plane. + * We need to require support for immediate flip or underflow can be + * intermittently experienced depending on peak b/w requirements. + */ + pipes[pipe_cnt].pipe.src.immediate_flip = true; + + pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; + pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; + pipes[pipe_cnt].pipe.src.dcc_rate = 3; + pipes[pipe_cnt].dout.dsc_input_bpc = 0; + DC_FP_START(); + dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt); + DC_FP_END(); + + if (pipes[pipe_cnt].dout.dsc_enable) { + switch (timing->display_color_depth) { + case COLOR_DEPTH_888: + pipes[pipe_cnt].dout.dsc_input_bpc = 8; + break; + case COLOR_DEPTH_101010: + pipes[pipe_cnt].dout.dsc_input_bpc = 10; + break; + case COLOR_DEPTH_121212: + pipes[pipe_cnt].dout.dsc_input_bpc = 12; + break; + default: + ASSERT(0); + break; + } + } + + pipe_cnt++; + } + + if (pipe_cnt) + context->bw_ctx.dml.ip.det_buffer_size_kbytes = + (max_usable_det / DCN3_16_CRB_SEGMENT_SIZE_KB / pipe_cnt) * DCN3_16_CRB_SEGMENT_SIZE_KB; + if (context->bw_ctx.dml.ip.det_buffer_size_kbytes > DCN3_16_MAX_DET_SIZE) + context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_16_MAX_DET_SIZE; + ASSERT(context->bw_ctx.dml.ip.det_buffer_size_kbytes >= DCN3_16_DEFAULT_DET_SIZE); + dc->config.enable_4to1MPC = false; + if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { + if (is_dual_plane(pipe->plane_state->format) + && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) { + dc->config.enable_4to1MPC = true; + context->bw_ctx.dml.ip.det_buffer_size_kbytes = + (max_usable_det / DCN3_16_CRB_SEGMENT_SIZE_KB / 4) * DCN3_16_CRB_SEGMENT_SIZE_KB; + } else if (!is_dual_plane(pipe->plane_state->format)) { + context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; + pipes[0].pipe.src.unbounded_req_mode = true; + } + } + + return pipe_cnt; +} + +static void dcn316_get_panel_config_defaults(struct dc_panel_config *panel_config) +{ + *panel_config = panel_config_defaults; +} + +static struct dc_cap_funcs cap_funcs = { + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap +}; + +static struct resource_funcs dcn316_res_pool_funcs = { + .destroy = dcn316_destroy_resource_pool, + .link_enc_create = dcn31_link_encoder_create, + .link_enc_create_minimal = dcn31_link_enc_create_minimal, + .link_encs_assign = link_enc_cfg_link_encs_assign, + .link_enc_unassign = link_enc_cfg_link_enc_unassign, + .panel_cntl_create = dcn31_panel_cntl_create, + .validate_bandwidth = dcn31_validate_bandwidth, + .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, + .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, + .populate_dml_pipes = dcn316_populate_dml_pipes_from_context, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, + .release_pipe = dcn20_release_pipe, + .add_stream_to_ctx = dcn30_add_stream_to_ctx, + .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, + .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, + .populate_dml_writeback_from_context = dcn31_populate_dml_writeback_from_context, + .set_mcif_arb_params = dcn31_set_mcif_arb_params, + .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, + .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, + .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, + .update_bw_bounding_box = dcn316_update_bw_bounding_box, + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, + .get_panel_config_defaults = dcn316_get_panel_config_defaults, +}; + +static bool dcn316_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dcn316_resource_pool *pool) +{ + int i; + struct dc_context *ctx = dc->ctx; + struct irq_service_init_data init_data; + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap_dcn31; + + pool->base.funcs = &dcn316_res_pool_funcs; + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->base.pipe_count = pool->base.res_cap->num_timing_generator; + pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; + dc->caps.max_downscale_ratio = 600; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.5 w/a applied by default*/ + dc->caps.max_cursor_size = 256; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dmdata_alloc_size = 2048; + dc->caps.max_slave_planes = 2; + dc->caps.max_slave_yuv_planes = 2; + dc->caps.max_slave_rgb_planes = 2; + dc->caps.post_blend_color_processing = true; + dc->caps.force_dp_tps4_for_cp2520 = true; + if (dc->config.forceHBR2CP2520) + dc->caps.force_dp_tps4_for_cp2520 = false; + dc->caps.dp_hpo = true; + dc->caps.dp_hdmi21_pcon_support = true; + dc->caps.edp_dsc_support = true; + dc->caps.extended_aux_timeout_support = true; + dc->caps.dmcub_support = true; + dc->caps.is_apu = true; + + /* Color pipeline capabilities */ + dc->caps.color.dpp.dcn_arch = 1; + dc->caps.color.dpp.input_lut_shared = 0; + dc->caps.color.dpp.icsc = 1; + dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr + dc->caps.color.dpp.dgam_rom_caps.srgb = 1; + dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; + dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; + dc->caps.color.dpp.dgam_rom_caps.pq = 1; + dc->caps.color.dpp.dgam_rom_caps.hlg = 1; + dc->caps.color.dpp.post_csc = 1; + dc->caps.color.dpp.gamma_corr = 1; + dc->caps.color.dpp.dgam_rom_for_yuv = 0; + + dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.ogam_ram = 1; + // no OGAM ROM on DCN301 + dc->caps.color.dpp.ogam_rom_caps.srgb = 0; + dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; + dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.ogam_rom_caps.pq = 0; + dc->caps.color.dpp.ogam_rom_caps.hlg = 0; + dc->caps.color.dpp.ocsc = 0; + + dc->caps.color.mpc.gamut_remap = 1; + dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2 + dc->caps.color.mpc.ogam_ram = 1; + dc->caps.color.mpc.ogam_rom_caps.srgb = 0; + dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; + dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.mpc.ogam_rom_caps.pq = 0; + dc->caps.color.mpc.ogam_rom_caps.hlg = 0; + dc->caps.color.mpc.ocsc = 1; + + /* read VBIOS LTTPR caps */ + { + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + /* interop bit is implicit */ + { + dc->caps.vbios_lttpr_aware = true; + } + } + + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) + dc->debug = debug_defaults_drv; + + // Init the vm_helper + if (dc->vm_helper) + vm_helper_init(dc->vm_helper, 16); + + /************************************************* + * Create resources * + *************************************************/ + + /* Clock Sources for Pixel Clock*/ + pool->base.clock_sources[DCN31_CLK_SRC_PLL0] = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL1] = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs[2], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + pool->base.clock_sources[DCN31_CLK_SRC_PLL4] = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL4, + &clk_src_regs[4], false); + + pool->base.clk_src_count = DCN30_CLK_SRC_TOTAL; + + /* todo: not reuse phy_pll registers */ + pool->base.dp_clock_source = + dcn31_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, + &clk_src_regs[0], true); + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* TODO: DCCG */ + pool->base.dccg = dccg31_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); + if (pool->base.dccg == NULL) { + dm_error("DC: failed to create dccg!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* TODO: IRQ */ + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dcn31_create(&init_data); + if (!pool->base.irqs) + goto create_fail; + + /* HUBBUB */ + pool->base.hubbub = dcn31_hubbub_create(ctx); + if (pool->base.hubbub == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create hubbub!\n"); + goto create_fail; + } + + /* HUBPs, DPPs, OPPs and TGs */ + for (i = 0; i < pool->base.pipe_count; i++) { + pool->base.hubps[i] = dcn31_hubp_create(ctx, i); + if (pool->base.hubps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create hubps!\n"); + goto create_fail; + } + + pool->base.dpps[i] = dcn31_dpp_create(ctx, i); + if (pool->base.dpps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create dpps!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + pool->base.opps[i] = dcn31_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + pool->base.timing_generators[i] = dcn31_timing_generator_create( + ctx, i); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + } + pool->base.timing_generator_count = i; + + /* PSR */ + pool->base.psr = dmub_psr_create(ctx); + if (pool->base.psr == NULL) { + dm_error("DC: failed to create psr obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* ABM */ + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + pool->base.multiple_abms[i] = dmub_abm_create(ctx, + &abm_regs[i], + &abm_shift, + &abm_mask); + if (pool->base.multiple_abms[i] == NULL) { + dm_error("DC: failed to create abm for pipe %d!\n", i); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* MPC and DSC */ + pool->base.mpc = dcn31_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut); + if (pool->base.mpc == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mpc!\n"); + goto create_fail; + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + pool->base.dscs[i] = dcn31_dsc_create(ctx, i); + if (pool->base.dscs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create display stream compressor %d!\n", i); + goto create_fail; + } + } + + /* DWB and MMHUBBUB */ + if (!dcn31_dwbc_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create dwbc!\n"); + goto create_fail; + } + + if (!dcn31_mmhubbub_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mcif_wb!\n"); + goto create_fail; + } + + /* AUX and I2C */ + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dcn31_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto create_fail; + } + pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create hw i2c!!\n"); + goto create_fail; + } + pool->base.sw_i2cs[i] = NULL; + } + + /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto create_fail; + + /* HW Sequencer and Plane caps */ + dcn31_hw_sequencer_construct(dc); + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->cap_funcs = cap_funcs; + + dc->dcn_ip->max_num_dpp = dcn3_16_ip.max_num_dpp; + + return true; + +create_fail: + + dcn316_resource_destruct(pool); + + return false; +} + +struct resource_pool *dcn316_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc) +{ + struct dcn316_resource_pool *pool = + kzalloc(sizeof(struct dcn316_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dcn316_resource_construct(init_data->num_virtual_links, dc, pool)) + return &pool->base; + + BREAK_TO_DEBUGGER(); + kfree(pool); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.h new file mode 100644 index 0000000000..aba6d63413 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.h @@ -0,0 +1,44 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DCN316_RESOURCE_H_ +#define _DCN316_RESOURCE_H_ + +#include "core_types.h" + +#define TO_DCN316_RES_POOL(pool)\ + container_of(pool, struct dcn316_resource_pool, base) + +extern struct _vcs_dpi_ip_params_st dcn3_16_ip; + +struct dcn316_resource_pool { + struct resource_pool base; +}; + +struct resource_pool *dcn316_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc); + +#endif /* _DCN316_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c new file mode 100644 index 0000000000..9042378fa8 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -0,0 +1,2784 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "dc.h" + +#include "dcn32/dcn32_init.h" + +#include "resource.h" +#include "include/irq_service_interface.h" +#include "dcn32_resource.h" + +#include "dcn20/dcn20_resource.h" +#include "dcn30/dcn30_resource.h" + +#include "dcn10/dcn10_ipp.h" +#include "dcn30/dcn30_hubbub.h" +#include "dcn31/dcn31_hubbub.h" +#include "dcn32/dcn32_hubbub.h" +#include "dcn32/dcn32_mpc.h" +#include "dcn32/dcn32_hubp.h" +#include "irq/dcn32/irq_service_dcn32.h" +#include "dcn32/dcn32_dpp.h" +#include "dcn32/dcn32_optc.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn30/dcn30_hwseq.h" +#include "dce110/dce110_hwseq.h" +#include "dcn30/dcn30_opp.h" +#include "dcn20/dcn20_dsc.h" +#include "dcn30/dcn30_vpg.h" +#include "dcn30/dcn30_afmt.h" +#include "dcn30/dcn30_dio_stream_encoder.h" +#include "dcn32/dcn32_dio_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_link_encoder.h" +#include "dcn32/dcn32_hpo_dp_link_encoder.h" +#include "dcn31/dcn31_apg.h" +#include "dcn31/dcn31_dio_link_encoder.h" +#include "dcn32/dcn32_dio_link_encoder.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_audio.h" +#include "dce/dce_hwseq.h" +#include "clk_mgr.h" +#include "virtual/virtual_stream_encoder.h" +#include "dml/display_mode_vba.h" +#include "dcn32/dcn32_dccg.h" +#include "dcn10/dcn10_resource.h" +#include "link.h" +#include "dcn31/dcn31_panel_cntl.h" + +#include "dcn30/dcn30_dwb.h" +#include "dcn32/dcn32_mmhubbub.h" + +#include "dcn/dcn_3_2_0_offset.h" +#include "dcn/dcn_3_2_0_sh_mask.h" +#include "nbio/nbio_4_3_0_offset.h" + +#include "reg_helper.h" +#include "dce/dmub_abm.h" +#include "dce/dmub_psr.h" +#include "dce/dce_aux.h" +#include "dce/dce_i2c.h" + +#include "dml/dcn30/display_mode_vba_30.h" +#include "vm_helper.h" +#include "dcn20/dcn20_vmid.h" +#include "dml/dcn32/dcn32_fpu.h" + +#include "dc_state_priv.h" + +#include "dml2/dml2_wrapper.h" + +#define DC_LOGGER_INIT(logger) + +enum dcn32_clk_src_array_id { + DCN32_CLK_SRC_PLL0, + DCN32_CLK_SRC_PLL1, + DCN32_CLK_SRC_PLL2, + DCN32_CLK_SRC_PLL3, + DCN32_CLK_SRC_PLL4, + DCN32_CLK_SRC_TOTAL +}; + +/* begin ********************* + * macros to expend register list macro defined in HW object header file + */ + +/* DCN */ +#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg] + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + REG_STRUCT.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name +#define SR_ARR(reg_name, id) \ + REG_STRUCT[id].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name + +#define SR_ARR_INIT(reg_name, id, value) \ + REG_STRUCT[id].reg_name = value + +#define SRI(reg_name, block, id)\ + REG_STRUCT.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRI_ARR(reg_name, block, id)\ + REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SR_ARR_I2C(reg_name, id) \ + REG_STRUCT[id-1].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name + +#define SRI_ARR_I2C(reg_name, block, id)\ + REG_STRUCT[id-1].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRI_ARR_ALPHABET(reg_name, block, index, id)\ + REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRI2(reg_name, block, id)\ + .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name +#define SRI2_ARR(reg_name, block, id)\ + REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define SRIR(var_name, reg_name, block, id)\ + .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII(reg_name, block, id)\ + REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_ARR_2(reg_name, block, id, inst)\ + REG_STRUCT[inst].reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_MPC_RMU(reg_name, block, id)\ + .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_DWB(reg_name, temp_name, block, id)\ + REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## temp_name + +#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define DCCG_SRII(reg_name, block, id)\ + REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define VUPDATE_SRII(reg_name, block, id)\ + REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + reg ## reg_name ## _ ## block ## id + +/* NBIO */ +#define NBIO_BASE_INNER(seg) ctx->nbio_reg_offsets[seg] + +#define NBIO_BASE(seg) \ + NBIO_BASE_INNER(seg) + +#define NBIO_SR(reg_name)\ + REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \ + regBIF_BX0_ ## reg_name +#define NBIO_SR_ARR(reg_name, id)\ + REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \ + regBIF_BX0_ ## reg_name + +#undef CTX +#define CTX ctx +#define REG(reg_name) \ + (ctx->dcn_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) + +static struct bios_registers bios_regs; + +#define bios_regs_init() \ + ( \ + NBIO_SR(BIOS_SCRATCH_3),\ + NBIO_SR(BIOS_SCRATCH_6)\ + ) + +#define clk_src_regs_init(index, pllid)\ + CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid) + +static struct dce110_clk_src_regs clk_src_regs[5]; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCN3_2(_MASK) +}; + +#define abm_regs_init(id)\ + ABM_DCN32_REG_LIST_RI(id) + +static struct dce_abm_registers abm_regs[4]; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCN32(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCN32(_MASK) +}; + +#define audio_regs_init(id)\ + AUD_COMMON_REG_LIST_RI(id) + +static struct dce_audio_registers audio_regs[5]; + +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ + AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) + +static const struct dce_audio_shift audio_shift = { + DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +#define vpg_regs_init(id)\ + VPG_DCN3_REG_LIST_RI(id) + +static struct dcn30_vpg_registers vpg_regs[10]; + +static const struct dcn30_vpg_shift vpg_shift = { + DCN3_VPG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn30_vpg_mask vpg_mask = { + DCN3_VPG_MASK_SH_LIST(_MASK) +}; + +#define afmt_regs_init(id)\ + AFMT_DCN3_REG_LIST_RI(id) + +static struct dcn30_afmt_registers afmt_regs[6]; + +static const struct dcn30_afmt_shift afmt_shift = { + DCN3_AFMT_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn30_afmt_mask afmt_mask = { + DCN3_AFMT_MASK_SH_LIST(_MASK) +}; + +#define apg_regs_init(id)\ + APG_DCN31_REG_LIST_RI(id) + +static struct dcn31_apg_registers apg_regs[4]; + +static const struct dcn31_apg_shift apg_shift = { + DCN31_APG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_apg_mask apg_mask = { + DCN31_APG_MASK_SH_LIST(_MASK) +}; + +#define stream_enc_regs_init(id)\ + SE_DCN32_REG_LIST_RI(id) + +static struct dcn10_stream_enc_registers stream_enc_regs[5]; + +static const struct dcn10_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCN32(__SHIFT) +}; + +static const struct dcn10_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCN32(_MASK) +}; + + +#define aux_regs_init(id)\ + DCN2_AUX_REG_LIST_RI(id) + +static struct dcn10_link_enc_aux_registers link_enc_aux_regs[5]; + +#define hpd_regs_init(id)\ + HPD_REG_LIST_RI(id) + +static struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[5]; + +#define link_regs_init(id, phyid)\ + ( \ + LE_DCN31_REG_LIST_RI(id), \ + UNIPHY_DCN2_REG_LIST_RI(id, phyid)\ + ) + /*DPCS_DCN31_REG_LIST(id),*/ \ + +static struct dcn10_link_enc_registers link_enc_regs[5]; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \ + //DPCS_DCN31_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), \ + //DPCS_DCN31_MASK_SH_LIST(_MASK) +}; + +#define hpo_dp_stream_encoder_reg_init(id)\ + DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id) + +static struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[4]; + +static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) +}; + + +#define hpo_dp_link_encoder_reg_init(id)\ + DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id) + /*DCN3_1_RDPCSTX_REG_LIST(0),*/ + /*DCN3_1_RDPCSTX_REG_LIST(1),*/ + /*DCN3_1_RDPCSTX_REG_LIST(2),*/ + /*DCN3_1_RDPCSTX_REG_LIST(3),*/ + +static struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[2]; + +static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { + DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { + DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK) +}; + +#define dpp_regs_init(id)\ + DPP_REG_LIST_DCN30_COMMON_RI(id) + +static struct dcn3_dpp_registers dpp_regs[4]; + +static const struct dcn3_dpp_shift tf_shift = { + DPP_REG_LIST_SH_MASK_DCN30_COMMON(__SHIFT) +}; + +static const struct dcn3_dpp_mask tf_mask = { + DPP_REG_LIST_SH_MASK_DCN30_COMMON(_MASK) +}; + + +#define opp_regs_init(id)\ + OPP_REG_LIST_DCN30_RI(id) + +static struct dcn20_opp_registers opp_regs[4]; + +static const struct dcn20_opp_shift opp_shift = { + OPP_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn20_opp_mask opp_mask = { + OPP_MASK_SH_LIST_DCN20(_MASK) +}; + +#define aux_engine_regs_init(id)\ + ( \ + AUX_COMMON_REG_LIST0_RI(id), \ + SR_ARR_INIT(AUXN_IMPCAL, id, 0), \ + SR_ARR_INIT(AUXP_IMPCAL, id, 0), \ + SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK), \ + SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK)\ + ) + +static struct dce110_aux_registers aux_engine_regs[5]; + +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +#define dwbc_regs_dcn3_init(id)\ + DWBC_COMMON_REG_LIST_DCN30_RI(id) + +static struct dcn30_dwbc_registers dwbc30_regs[1]; + +static const struct dcn30_dwbc_shift dwbc30_shift = { + DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_dwbc_mask dwbc30_mask = { + DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define mcif_wb_regs_dcn3_init(id)\ + MCIF_WB_COMMON_REG_LIST_DCN32_RI(id) + +static struct dcn30_mmhubbub_registers mcif_wb30_regs[1]; + +static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN32(__SHIFT) +}; + +static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN32(_MASK) +}; + +#define dsc_regsDCN20_init(id)\ + DSC_REG_LIST_DCN20_RI(id) + +static struct dcn20_dsc_registers dsc_regs[4]; + +static const struct dcn20_dsc_shift dsc_shift = { + DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) +}; + +static const struct dcn20_dsc_mask dsc_mask = { + DSC_REG_LIST_SH_MASK_DCN20(_MASK) +}; + +static struct dcn30_mpc_registers mpc_regs; + +#define dcn_mpc_regs_init() \ + MPC_REG_LIST_DCN3_2_RI(0),\ + MPC_REG_LIST_DCN3_2_RI(1),\ + MPC_REG_LIST_DCN3_2_RI(2),\ + MPC_REG_LIST_DCN3_2_RI(3),\ + MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\ + MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\ + MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\ + MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\ + MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0) + +static const struct dcn30_mpc_shift mpc_shift = { + MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT) +}; + +static const struct dcn30_mpc_mask mpc_mask = { + MPC_COMMON_MASK_SH_LIST_DCN32(_MASK) +}; + +#define optc_regs_init(id)\ + OPTC_COMMON_REG_LIST_DCN3_2_RI(id) + +static struct dcn_optc_registers optc_regs[4]; + +static const struct dcn_optc_shift optc_shift = { + OPTC_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT) +}; + +static const struct dcn_optc_mask optc_mask = { + OPTC_COMMON_MASK_SH_LIST_DCN3_2(_MASK) +}; + +#define hubp_regs_init(id)\ + HUBP_REG_LIST_DCN32_RI(id) + +static struct dcn_hubp2_registers hubp_regs[4]; + + +static const struct dcn_hubp2_shift hubp_shift = { + HUBP_MASK_SH_LIST_DCN32(__SHIFT) +}; + +static const struct dcn_hubp2_mask hubp_mask = { + HUBP_MASK_SH_LIST_DCN32(_MASK) +}; + +static struct dcn_hubbub_registers hubbub_reg; +#define hubbub_reg_init()\ + HUBBUB_REG_LIST_DCN32_RI(0) + +static const struct dcn_hubbub_shift hubbub_shift = { + HUBBUB_MASK_SH_LIST_DCN32(__SHIFT) +}; + +static const struct dcn_hubbub_mask hubbub_mask = { + HUBBUB_MASK_SH_LIST_DCN32(_MASK) +}; + +static struct dccg_registers dccg_regs; + +#define dccg_regs_init()\ + DCCG_REG_LIST_DCN32_RI() + +static const struct dccg_shift dccg_shift = { + DCCG_MASK_SH_LIST_DCN32(__SHIFT) +}; + +static const struct dccg_mask dccg_mask = { + DCCG_MASK_SH_LIST_DCN32(_MASK) +}; + + +#define SRII2(reg_name_pre, reg_name_post, id)\ + .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \ + ## id ## _ ## reg_name_post ## _BASE_IDX) + \ + reg ## reg_name_pre ## id ## _ ## reg_name_post + + +#define HWSEQ_DCN32_REG_LIST()\ + SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ + SR(DIO_MEM_PWR_CTRL), \ + SR(ODM_MEM_PWR_CTRL3), \ + SR(MMHUBBUB_MEM_PWR_CNTL), \ + SR(DCCG_GATE_DISABLE_CNTL), \ + SR(DCCG_GATE_DISABLE_CNTL2), \ + SR(DCFCLK_CNTL),\ + SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ + SRII(PIXEL_RATE_CNTL, OTG, 0), \ + SRII(PIXEL_RATE_CNTL, OTG, 1),\ + SRII(PIXEL_RATE_CNTL, OTG, 2),\ + SRII(PIXEL_RATE_CNTL, OTG, 3),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\ + SR(MICROSECOND_TIME_BASE_DIV), \ + SR(MILLISECOND_TIME_BASE_DIV), \ + SR(DISPCLK_FREQ_CHANGE_CNTL), \ + SR(RBBMIF_TIMEOUT_DIS), \ + SR(RBBMIF_TIMEOUT_DIS_2), \ + SR(DCHUBBUB_CRC_CTRL), \ + SR(DPP_TOP0_DPP_CRC_CTRL), \ + SR(DPP_TOP0_DPP_CRC_VAL_B_A), \ + SR(DPP_TOP0_DPP_CRC_VAL_R_G), \ + SR(MPC_CRC_CTRL), \ + SR(MPC_CRC_RESULT_GB), \ + SR(MPC_CRC_RESULT_C), \ + SR(MPC_CRC_RESULT_AR), \ + SR(DOMAIN0_PG_CONFIG), \ + SR(DOMAIN1_PG_CONFIG), \ + SR(DOMAIN2_PG_CONFIG), \ + SR(DOMAIN3_PG_CONFIG), \ + SR(DOMAIN16_PG_CONFIG), \ + SR(DOMAIN17_PG_CONFIG), \ + SR(DOMAIN18_PG_CONFIG), \ + SR(DOMAIN19_PG_CONFIG), \ + SR(DOMAIN0_PG_STATUS), \ + SR(DOMAIN1_PG_STATUS), \ + SR(DOMAIN2_PG_STATUS), \ + SR(DOMAIN3_PG_STATUS), \ + SR(DOMAIN16_PG_STATUS), \ + SR(DOMAIN17_PG_STATUS), \ + SR(DOMAIN18_PG_STATUS), \ + SR(DOMAIN19_PG_STATUS), \ + SR(D1VGA_CONTROL), \ + SR(D2VGA_CONTROL), \ + SR(D3VGA_CONTROL), \ + SR(D4VGA_CONTROL), \ + SR(D5VGA_CONTROL), \ + SR(D6VGA_CONTROL), \ + SR(DC_IP_REQUEST_CNTL), \ + SR(AZALIA_AUDIO_DTO), \ + SR(AZALIA_CONTROLLER_CLOCK_GATING) + +static struct dce_hwseq_registers hwseq_reg; + +#define hwseq_reg_init()\ + HWSEQ_DCN32_REG_LIST() + +#define HWSEQ_DCN32_MASK_SH_LIST(mask_sh)\ + HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ + HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ + HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ + HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ + HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \ + HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ + HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ + HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh) + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCN32_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCN32_MASK_SH_LIST(_MASK) +}; +#define vmid_regs_init(id)\ + DCN20_VMID_REG_LIST_RI(id) + +static struct dcn_vmid_registers vmid_regs[16]; + +static const struct dcn20_vmid_shift vmid_shifts = { + DCN20_VMID_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn20_vmid_mask vmid_masks = { + DCN20_VMID_MASK_SH_LIST(_MASK) +}; + +static const struct resource_caps res_cap_dcn32 = { + .num_timing_generator = 4, + .num_opp = 4, + .num_video_plane = 4, + .num_audio = 5, + .num_stream_encoder = 5, + .num_hpo_dp_stream_encoder = 4, + .num_hpo_dp_link_encoder = 2, + .num_pll = 5, + .num_dwb = 1, + .num_ddc = 5, + .num_vmid = 16, + .num_mpc_3dlut = 4, + .num_dsc = 4, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCN_UNIVERSAL, + .per_pixel_alpha = true, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = true, + .fp16 = true, + .p010 = true, + .ayuv = false, + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 16000, + .fp16 = 16000 + }, + + // 6:1 downscaling ratio: 1000/6 = 166.666 + .max_downscale_factor = { + .argb8888 = 167, + .nv12 = 167, + .fp16 = 167 + }, + 64, + 64 +}; + +static const struct dc_debug_options debug_defaults_drv = { + .disable_dmcu = true, + .force_abm_enable = false, + .timing_trace = false, + .clock_trace = true, + .disable_pplib_clock_request = false, + .pipe_split_policy = MPC_SPLIT_AVOID, // Due to CRB, no need to MPC split anymore + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .vsr_support = true, + .performance_trace = false, + .max_downscale_src_width = 7680,/*upto 8K*/ + .disable_pplib_wm_range = false, + .scl_reset_length10 = true, + .sanity_checks = false, + .underflow_assert_delay_us = 0xFFFFFFFF, + .dwb_fi_phase = -1, // -1 = disable, + .dmub_command_table = true, + .enable_mem_low_power = { + .bits = { + .vga = false, + .i2c = false, + .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled + .dscl = false, + .cm = false, + .mpc = false, + .optc = true, + } + }, + .use_max_lb = true, + .force_disable_subvp = false, + .exit_idle_opt_for_cursor_updates = true, + .using_dml2 = false, + .enable_single_display_2to1_odm_policy = true, + + /* Must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions*/ + .enable_double_buffered_dsc_pg_support = true, + .enable_dp_dig_pixel_rate_div_policy = 1, + .allow_sw_cursor_fallback = false, // Linux can't do SW cursor "fallback" + .alloc_extra_way_for_cursor = true, + .min_prefetch_in_strobe_ns = 60000, // 60us + .disable_unbounded_requesting = false, + .override_dispclk_programming = true, + .disable_fpo_optimizations = false, + .fpo_vactive_margin_us = 2000, // 2000us + .disable_fpo_vactive = false, + .disable_boot_optimizations = false, + .disable_subvp_high_refresh = false, + .disable_dp_plus_plus_wa = true, + .fpo_vactive_min_active_margin_us = 200, + .fpo_vactive_max_blank_us = 1000, + .enable_legacy_fast_update = false, +}; + +static struct dce_aux *dcn32_aux_engine_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT aux_engine_regs + aux_engine_regs_init(0), + aux_engine_regs_init(1), + aux_engine_regs_init(2), + aux_engine_regs_init(3), + aux_engine_regs_init(4); + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} +#define i2c_inst_regs_init(id)\ + I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id) + +static struct dce_i2c_registers i2c_hw_regs[5]; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +static struct dce_i2c_hw *dcn32_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT i2c_hw_regs + i2c_inst_regs_init(1), + i2c_inst_regs_init(2), + i2c_inst_regs_init(3), + i2c_inst_regs_init(4), + i2c_inst_regs_init(5); + + dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} + +static struct clock_source *dcn32_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dcn31_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + kfree(clk_src); + BREAK_TO_DEBUGGER(); + return NULL; +} + +static struct hubbub *dcn32_hubbub_create(struct dc_context *ctx) +{ + int i; + + struct dcn20_hubbub *hubbub2 = kzalloc(sizeof(struct dcn20_hubbub), + GFP_KERNEL); + + if (!hubbub2) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT hubbub_reg + hubbub_reg_init(); + +#undef REG_STRUCT +#define REG_STRUCT vmid_regs + vmid_regs_init(0), + vmid_regs_init(1), + vmid_regs_init(2), + vmid_regs_init(3), + vmid_regs_init(4), + vmid_regs_init(5), + vmid_regs_init(6), + vmid_regs_init(7), + vmid_regs_init(8), + vmid_regs_init(9), + vmid_regs_init(10), + vmid_regs_init(11), + vmid_regs_init(12), + vmid_regs_init(13), + vmid_regs_init(14), + vmid_regs_init(15); + + hubbub32_construct(hubbub2, ctx, + &hubbub_reg, + &hubbub_shift, + &hubbub_mask, + ctx->dc->dml.ip.det_buffer_size_kbytes, + ctx->dc->dml.ip.pixel_chunk_size_kbytes, + ctx->dc->dml.ip.config_return_buffer_size_in_kbytes); + + + for (i = 0; i < res_cap_dcn32.num_vmid; i++) { + struct dcn20_vmid *vmid = &hubbub2->vmid[i]; + + vmid->ctx = ctx; + + vmid->regs = &vmid_regs[i]; + vmid->shifts = &vmid_shifts; + vmid->masks = &vmid_masks; + } + + return &hubbub2->base; +} + +static struct hubp *dcn32_hubp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn20_hubp *hubp2 = + kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); + + if (!hubp2) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT hubp_regs + hubp_regs_init(0), + hubp_regs_init(1), + hubp_regs_init(2), + hubp_regs_init(3); + + if (hubp32_construct(hubp2, ctx, inst, + &hubp_regs[inst], &hubp_shift, &hubp_mask)) + return &hubp2->base; + + BREAK_TO_DEBUGGER(); + kfree(hubp2); + return NULL; +} + +static void dcn32_dpp_destroy(struct dpp **dpp) +{ + kfree(TO_DCN30_DPP(*dpp)); + *dpp = NULL; +} + +static struct dpp *dcn32_dpp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn3_dpp *dpp3 = + kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); + + if (!dpp3) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT dpp_regs + dpp_regs_init(0), + dpp_regs_init(1), + dpp_regs_init(2), + dpp_regs_init(3); + + if (dpp32_construct(dpp3, ctx, inst, + &dpp_regs[inst], &tf_shift, &tf_mask)) + return &dpp3->base; + + BREAK_TO_DEBUGGER(); + kfree(dpp3); + return NULL; +} + +static struct mpc *dcn32_mpc_create( + struct dc_context *ctx, + int num_mpcc, + int num_rmu) +{ + struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), + GFP_KERNEL); + + if (!mpc30) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT mpc_regs + dcn_mpc_regs_init(); + + dcn32_mpc_construct(mpc30, ctx, + &mpc_regs, + &mpc_shift, + &mpc_mask, + num_mpcc, + num_rmu); + + return &mpc30->base; +} + +static struct output_pixel_processor *dcn32_opp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_opp *opp2 = + kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); + + if (!opp2) { + BREAK_TO_DEBUGGER(); + return NULL; + } + +#undef REG_STRUCT +#define REG_STRUCT opp_regs + opp_regs_init(0), + opp_regs_init(1), + opp_regs_init(2), + opp_regs_init(3); + + dcn20_opp_construct(opp2, ctx, inst, + &opp_regs[inst], &opp_shift, &opp_mask); + return &opp2->base; +} + + +static struct timing_generator *dcn32_timing_generator_create( + struct dc_context *ctx, + uint32_t instance) +{ + struct optc *tgn10 = + kzalloc(sizeof(struct optc), GFP_KERNEL); + + if (!tgn10) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT optc_regs + optc_regs_init(0), + optc_regs_init(1), + optc_regs_init(2), + optc_regs_init(3); + + tgn10->base.inst = instance; + tgn10->base.ctx = ctx; + + tgn10->tg_regs = &optc_regs[instance]; + tgn10->tg_shift = &optc_shift; + tgn10->tg_mask = &optc_mask; + + dcn32_timing_generator_init(tgn10); + + return &tgn10->base; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .fec_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + +static struct link_encoder *dcn32_link_encoder_create( + struct dc_context *ctx, + const struct encoder_init_data *enc_init_data) +{ + struct dcn20_link_encoder *enc20 = + kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + + if (!enc20) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT link_enc_aux_regs + aux_regs_init(0), + aux_regs_init(1), + aux_regs_init(2), + aux_regs_init(3), + aux_regs_init(4); + +#undef REG_STRUCT +#define REG_STRUCT link_enc_hpd_regs + hpd_regs_init(0), + hpd_regs_init(1), + hpd_regs_init(2), + hpd_regs_init(3), + hpd_regs_init(4); + +#undef REG_STRUCT +#define REG_STRUCT link_enc_regs + link_regs_init(0, A), + link_regs_init(1, B), + link_regs_init(2, C), + link_regs_init(3, D), + link_regs_init(4, E); + + dcn32_link_encoder_construct(enc20, + enc_init_data, + &link_enc_feature, + &link_enc_regs[enc_init_data->transmitter], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], + &le_shift, + &le_mask); + + return &enc20->enc10.base; +} + +struct panel_cntl *dcn32_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dcn31_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dcn31_panel_cntl_construct(panel_cntl, init_data); + + return &panel_cntl->base; +} + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + generic_reg_get(ctx, ctx->dcn_reg_offsets[regDC_PINSTRAPS_BASE_IDX] + regDC_PINSTRAPS, + FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); + +} + +static struct audio *dcn32_create_audio( + struct dc_context *ctx, unsigned int inst) +{ + +#undef REG_STRUCT +#define REG_STRUCT audio_regs + audio_regs_init(0), + audio_regs_init(1), + audio_regs_init(2), + audio_regs_init(3), + audio_regs_init(4); + + return dce_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + +static struct vpg *dcn32_vpg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL); + + if (!vpg3) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT vpg_regs + vpg_regs_init(0), + vpg_regs_init(1), + vpg_regs_init(2), + vpg_regs_init(3), + vpg_regs_init(4), + vpg_regs_init(5), + vpg_regs_init(6), + vpg_regs_init(7), + vpg_regs_init(8), + vpg_regs_init(9); + + vpg3_construct(vpg3, ctx, inst, + &vpg_regs[inst], + &vpg_shift, + &vpg_mask); + + return &vpg3->base; +} + +static struct afmt *dcn32_afmt_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL); + + if (!afmt3) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT afmt_regs + afmt_regs_init(0), + afmt_regs_init(1), + afmt_regs_init(2), + afmt_regs_init(3), + afmt_regs_init(4), + afmt_regs_init(5); + + afmt3_construct(afmt3, ctx, inst, + &afmt_regs[inst], + &afmt_shift, + &afmt_mask); + + return &afmt3->base; +} + +static struct apg *dcn31_apg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); + + if (!apg31) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT apg_regs + apg_regs_init(0), + apg_regs_init(1), + apg_regs_init(2), + apg_regs_init(3); + + apg31_construct(apg31, ctx, inst, + &apg_regs[inst], + &apg_shift, + &apg_mask); + + return &apg31->base; +} + +static struct stream_encoder *dcn32_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn10_stream_encoder *enc1; + struct vpg *vpg; + struct afmt *afmt; + int vpg_inst; + int afmt_inst; + + /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ + if (eng_id <= ENGINE_ID_DIGF) { + vpg_inst = eng_id; + afmt_inst = eng_id; + } else + return NULL; + + enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); + vpg = dcn32_vpg_create(ctx, vpg_inst); + afmt = dcn32_afmt_create(ctx, afmt_inst); + + if (!enc1 || !vpg || !afmt) { + kfree(enc1); + kfree(vpg); + kfree(afmt); + return NULL; + } + +#undef REG_STRUCT +#define REG_STRUCT stream_enc_regs + stream_enc_regs_init(0), + stream_enc_regs_init(1), + stream_enc_regs_init(2), + stream_enc_regs_init(3), + stream_enc_regs_init(4); + + dcn32_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, + eng_id, vpg, afmt, + &stream_enc_regs[eng_id], + &se_shift, &se_mask); + + return &enc1->base; +} + +static struct hpo_dp_stream_encoder *dcn32_hpo_dp_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; + struct vpg *vpg; + struct apg *apg; + uint32_t hpo_dp_inst; + uint32_t vpg_inst; + uint32_t apg_inst; + + ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); + hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; + + /* Mapping of VPG register blocks to HPO DP block instance: + * VPG[6] -> HPO_DP[0] + * VPG[7] -> HPO_DP[1] + * VPG[8] -> HPO_DP[2] + * VPG[9] -> HPO_DP[3] + */ + vpg_inst = hpo_dp_inst + 6; + + /* Mapping of APG register blocks to HPO DP block instance: + * APG[0] -> HPO_DP[0] + * APG[1] -> HPO_DP[1] + * APG[2] -> HPO_DP[2] + * APG[3] -> HPO_DP[3] + */ + apg_inst = hpo_dp_inst; + + /* allocate HPO stream encoder and create VPG sub-block */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); + vpg = dcn32_vpg_create(ctx, vpg_inst); + apg = dcn31_apg_create(ctx, apg_inst); + + if (!hpo_dp_enc31 || !vpg || !apg) { + kfree(hpo_dp_enc31); + kfree(vpg); + kfree(apg); + return NULL; + } + +#undef REG_STRUCT +#define REG_STRUCT hpo_dp_stream_enc_regs + hpo_dp_stream_encoder_reg_init(0), + hpo_dp_stream_encoder_reg_init(1), + hpo_dp_stream_encoder_reg_init(2), + hpo_dp_stream_encoder_reg_init(3); + + dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, + hpo_dp_inst, eng_id, vpg, apg, + &hpo_dp_stream_enc_regs[hpo_dp_inst], + &hpo_dp_se_shift, &hpo_dp_se_mask); + + return &hpo_dp_enc31->base; +} + +static struct hpo_dp_link_encoder *dcn32_hpo_dp_link_encoder_create( + uint8_t inst, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; + + /* allocate HPO link encoder */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + +#undef REG_STRUCT +#define REG_STRUCT hpo_dp_link_enc_regs + hpo_dp_link_encoder_reg_init(0), + hpo_dp_link_encoder_reg_init(1); + + hpo_dp_link_encoder32_construct(hpo_dp_enc31, ctx, inst, + &hpo_dp_link_enc_regs[inst], + &hpo_dp_le_shift, &hpo_dp_le_mask); + + return &hpo_dp_enc31->base; +} + +static struct dce_hwseq *dcn32_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + +#undef REG_STRUCT +#define REG_STRUCT hwseq_reg + hwseq_reg_init(); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + } + return hws; +} +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = dcn32_create_audio, + .create_stream_encoder = dcn32_stream_encoder_create, + .create_hpo_dp_stream_encoder = dcn32_hpo_dp_stream_encoder_create, + .create_hpo_dp_link_encoder = dcn32_hpo_dp_link_encoder_create, + .create_hwseq = dcn32_hwseq_create, +}; + +static void dcn32_resource_destruct(struct dcn32_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) { + if (pool->base.stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); + pool->base.stream_enc[i]->vpg = NULL; + } + if (pool->base.stream_enc[i]->afmt != NULL) { + kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); + pool->base.stream_enc[i]->afmt = NULL; + } + kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); + pool->base.stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { + if (pool->base.hpo_dp_stream_enc[i] != NULL) { + if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); + pool->base.hpo_dp_stream_enc[i]->vpg = NULL; + } + if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { + kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); + pool->base.hpo_dp_stream_enc[i]->apg = NULL; + } + kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); + pool->base.hpo_dp_stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { + if (pool->base.hpo_dp_link_enc[i] != NULL) { + kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); + pool->base.hpo_dp_link_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + if (pool->base.dscs[i] != NULL) + dcn20_dsc_destroy(&pool->base.dscs[i]); + } + + if (pool->base.mpc != NULL) { + kfree(TO_DCN20_MPC(pool->base.mpc)); + pool->base.mpc = NULL; + } + if (pool->base.hubbub != NULL) { + kfree(TO_DCN20_HUBBUB(pool->base.hubbub)); + pool->base.hubbub = NULL; + } + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.dpps[i] != NULL) + dcn32_dpp_destroy(&pool->base.dpps[i]); + + if (pool->base.ipps[i] != NULL) + pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.hubps[i] != NULL) { + kfree(TO_DCN20_HUBP(pool->base.hubps[i])); + pool->base.hubps[i] = NULL; + } + + if (pool->base.irqs != NULL) { + dal_irq_service_destroy(&pool->base.irqs); + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + if (pool->base.engines[i] != NULL) + dce110_engine_destroy(&pool->base.engines[i]); + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + if (pool->base.opps[i] != NULL) + pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.timing_generators[i] != NULL) { + kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dwb; i++) { + if (pool->base.dwbc[i] != NULL) { + kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); + pool->base.dwbc[i] = NULL; + } + if (pool->base.mcif_wb[i] != NULL) { + kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); + pool->base.mcif_wb[i] = NULL; + } + } + + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i]) + dce_aud_destroy(&pool->base.audios[i]); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) { + dcn20_clock_source_destroy(&pool->base.clock_sources[i]); + pool->base.clock_sources[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { + if (pool->base.mpc_lut[i] != NULL) { + dc_3dlut_func_release(pool->base.mpc_lut[i]); + pool->base.mpc_lut[i] = NULL; + } + if (pool->base.mpc_shaper[i] != NULL) { + dc_transfer_func_release(pool->base.mpc_shaper[i]); + pool->base.mpc_shaper[i] = NULL; + } + } + + if (pool->base.dp_clock_source != NULL) { + dcn20_clock_source_destroy(&pool->base.dp_clock_source); + pool->base.dp_clock_source = NULL; + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.multiple_abms[i] != NULL) + dce_abm_destroy(&pool->base.multiple_abms[i]); + } + + if (pool->base.psr != NULL) + dmub_psr_destroy(&pool->base.psr); + + if (pool->base.dccg != NULL) + dcn_dccg_destroy(&pool->base.dccg); + + if (pool->base.oem_device != NULL) { + struct dc *dc = pool->base.oem_device->ctx->dc; + + dc->link_srv->destroy_ddc_service(&pool->base.oem_device); + } +} + + +static bool dcn32_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t dwb_count = pool->res_cap->num_dwb; + + for (i = 0; i < dwb_count; i++) { + struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), + GFP_KERNEL); + + if (!dwbc30) { + dm_error("DC: failed to create dwbc30!\n"); + return false; + } + +#undef REG_STRUCT +#define REG_STRUCT dwbc30_regs + dwbc_regs_dcn3_init(0); + + dcn30_dwbc_construct(dwbc30, ctx, + &dwbc30_regs[i], + &dwbc30_shift, + &dwbc30_mask, + i); + + pool->dwbc[i] = &dwbc30->base; + } + return true; +} + +static bool dcn32_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t dwb_count = pool->res_cap->num_dwb; + + for (i = 0; i < dwb_count; i++) { + struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), + GFP_KERNEL); + + if (!mcif_wb30) { + dm_error("DC: failed to create mcif_wb30!\n"); + return false; + } + +#undef REG_STRUCT +#define REG_STRUCT mcif_wb30_regs + mcif_wb_regs_dcn3_init(0); + + dcn32_mmhubbub_construct(mcif_wb30, ctx, + &mcif_wb30_regs[i], + &mcif_wb30_shift, + &mcif_wb30_mask, + i); + + pool->mcif_wb[i] = &mcif_wb30->base; + } + return true; +} + +static struct display_stream_compressor *dcn32_dsc_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_dsc *dsc = + kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); + + if (!dsc) { + BREAK_TO_DEBUGGER(); + return NULL; + } + +#undef REG_STRUCT +#define REG_STRUCT dsc_regs + dsc_regsDCN20_init(0), + dsc_regsDCN20_init(1), + dsc_regsDCN20_init(2), + dsc_regsDCN20_init(3); + + dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); + + dsc->max_image_width = 6016; + + return &dsc->base; +} + +static void dcn32_destroy_resource_pool(struct resource_pool **pool) +{ + struct dcn32_resource_pool *dcn32_pool = TO_DCN32_RES_POOL(*pool); + + dcn32_resource_destruct(dcn32_pool); + kfree(dcn32_pool); + *pool = NULL; +} + +bool dcn32_acquire_post_bldn_3dlut( + struct resource_context *res_ctx, + const struct resource_pool *pool, + int mpcc_id, + struct dc_3dlut **lut, + struct dc_transfer_func **shaper) +{ + bool ret = false; + + ASSERT(*lut == NULL && *shaper == NULL); + *lut = NULL; + *shaper = NULL; + + if (!res_ctx->is_mpc_3dlut_acquired[mpcc_id]) { + *lut = pool->mpc_lut[mpcc_id]; + *shaper = pool->mpc_shaper[mpcc_id]; + res_ctx->is_mpc_3dlut_acquired[mpcc_id] = true; + ret = true; + } + return ret; +} + +bool dcn32_release_post_bldn_3dlut( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct dc_3dlut **lut, + struct dc_transfer_func **shaper) +{ + int i; + bool ret = false; + + for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) { + if (pool->mpc_lut[i] == *lut && pool->mpc_shaper[i] == *shaper) { + res_ctx->is_mpc_3dlut_acquired[i] = false; + pool->mpc_lut[i]->state.raw = 0; + *lut = NULL; + *shaper = NULL; + ret = true; + break; + } + } + return ret; +} + +static void dcn32_enable_phantom_plane(struct dc *dc, + struct dc_state *context, + struct dc_stream_state *phantom_stream, + unsigned int dc_pipe_idx) +{ + struct dc_plane_state *phantom_plane = NULL; + struct dc_plane_state *prev_phantom_plane = NULL; + struct pipe_ctx *curr_pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx]; + + while (curr_pipe) { + if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state) + phantom_plane = prev_phantom_plane; + else + phantom_plane = dc_state_create_phantom_plane(dc, context, curr_pipe->plane_state); + + memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address)); + memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality, + sizeof(phantom_plane->scaling_quality)); + memcpy(&phantom_plane->src_rect, &curr_pipe->plane_state->src_rect, sizeof(phantom_plane->src_rect)); + memcpy(&phantom_plane->dst_rect, &curr_pipe->plane_state->dst_rect, sizeof(phantom_plane->dst_rect)); + memcpy(&phantom_plane->clip_rect, &curr_pipe->plane_state->clip_rect, sizeof(phantom_plane->clip_rect)); + memcpy(&phantom_plane->plane_size, &curr_pipe->plane_state->plane_size, + sizeof(phantom_plane->plane_size)); + memcpy(&phantom_plane->tiling_info, &curr_pipe->plane_state->tiling_info, + sizeof(phantom_plane->tiling_info)); + memcpy(&phantom_plane->dcc, &curr_pipe->plane_state->dcc, sizeof(phantom_plane->dcc)); + phantom_plane->format = curr_pipe->plane_state->format; + phantom_plane->rotation = curr_pipe->plane_state->rotation; + phantom_plane->visible = curr_pipe->plane_state->visible; + + /* Shadow pipe has small viewport. */ + phantom_plane->clip_rect.y = 0; + phantom_plane->clip_rect.height = phantom_stream->src.height; + + dc_state_add_phantom_plane(dc, phantom_stream, phantom_plane, context); + + curr_pipe = curr_pipe->bottom_pipe; + prev_phantom_plane = phantom_plane; + } +} + +static struct dc_stream_state *dcn32_enable_phantom_stream(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + unsigned int pipe_cnt, + unsigned int dc_pipe_idx) +{ + struct dc_stream_state *phantom_stream = NULL; + struct pipe_ctx *ref_pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx]; + + phantom_stream = dc_state_create_phantom_stream(dc, context, ref_pipe->stream); + + /* stream has limited viewport and small timing */ + memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing)); + memcpy(&phantom_stream->src, &ref_pipe->stream->src, sizeof(phantom_stream->src)); + memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst)); + DC_FP_START(); + dcn32_set_phantom_stream_timing(dc, context, ref_pipe, phantom_stream, pipes, pipe_cnt, dc_pipe_idx); + DC_FP_END(); + + dc_state_add_phantom_stream(dc, context, phantom_stream, ref_pipe->stream); + return phantom_stream; +} + +/* TODO: Input to this function should indicate which pipe indexes (or streams) + * require a phantom pipe / stream + */ +void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + unsigned int pipe_cnt, + unsigned int index) +{ + struct dc_stream_state *phantom_stream = NULL; + unsigned int i; + + // The index of the DC pipe passed into this function is guarenteed to + // be a valid candidate for SubVP (i.e. has a plane, stream, doesn't + // already have phantom pipe assigned, etc.) by previous checks. + phantom_stream = dcn32_enable_phantom_stream(dc, context, pipes, pipe_cnt, index); + dcn32_enable_phantom_plane(dc, context, phantom_stream, index); + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + // Build scaling params for phantom pipes which were newly added. + // We determine which phantom pipes were added by comparing with + // the phantom stream. + if (pipe->plane_state && pipe->stream && pipe->stream == phantom_stream && + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { + pipe->stream->use_dynamic_meta = false; + pipe->plane_state->flip_immediate = false; + if (!resource_build_scaling_params(pipe)) { + // Log / remove phantom pipes since failed to build scaling params + } + } + } +} + +static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_validate) +{ + bool out = false; + + BW_VAL_TRACE_SETUP(); + + int vlevel = 0; + int pipe_cnt = 0; + display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); + + /* To handle Freesync properly, setting FreeSync DML parameters + * to its default state for the first stage of validation + */ + context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false; + context->bw_ctx.dml.soc.dram_clock_change_requirement_final = true; + + DC_LOGGER_INIT(dc->ctx->logger); + + BW_VAL_TRACE_COUNT(); + + DC_FP_START(); + out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); + DC_FP_END(); + + if (pipe_cnt == 0) + goto validate_out; + + if (!out) + goto validate_fail; + + BW_VAL_TRACE_END_VOLTAGE_LEVEL(); + + if (fast_validate) { + BW_VAL_TRACE_SKIP(fast); + goto validate_out; + } + + dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + + dcn32_override_min_req_memclk(dc, context); + dcn32_override_min_req_dcfclk(dc, context); + + BW_VAL_TRACE_END_WATERMARKS(); + + goto validate_out; + +validate_fail: + DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", + dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); + + BW_VAL_TRACE_SKIP(fail); + out = false; + +validate_out: + kfree(pipes); + + BW_VAL_TRACE_FINISH(); + + return out; +} + +bool dcn32_validate_bandwidth(struct dc *dc, + struct dc_state *context, + bool fast_validate) +{ + bool out = false; + + if (dc->debug.using_dml2) + out = dml2_validate(dc, context, fast_validate); + else + out = dml1_validate(dc, context, fast_validate); + return out; +} + +int dcn32_populate_dml_pipes_from_context( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate) +{ + int i, pipe_cnt; + struct resource_context *res_ctx = &context->res_ctx; + struct pipe_ctx *pipe = NULL; + bool subvp_in_use = false; + struct dc_crtc_timing *timing; + + dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + + if (!res_ctx->pipe_ctx[i].stream) + continue; + pipe = &res_ctx->pipe_ctx[i]; + timing = &pipe->stream->timing; + + pipes[pipe_cnt].pipe.src.gpuvm = true; + DC_FP_START(); + dcn32_zero_pipe_dcc_fraction(pipes, pipe_cnt); + DC_FP_END(); + pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; + if (dc->config.enable_windowed_mpo_odm && + dc->debug.enable_single_display_2to1_odm_policy) { + switch (resource_get_odm_slice_count(pipe)) { + case 2: + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; + break; + case 4: + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1; + break; + default: + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; + } + } else { + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; + } + pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet + pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; + pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19; + + /* Only populate DML input with subvp info for full updates. + * This is just a workaround -- needs a proper fix. + */ + if (!fast_validate) { + switch (dc_state_get_pipe_subvp_type(context, pipe)) { + case SUBVP_MAIN: + pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_sub_viewport; + subvp_in_use = true; + break; + case SUBVP_PHANTOM: + pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_phantom_pipe; + pipes[pipe_cnt].pipe.src.use_mall_for_static_screen = dm_use_mall_static_screen_disable; + // Disallow unbounded req for SubVP according to DCHUB programming guide + pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; + break; + case SUBVP_NONE: + pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_disable; + pipes[pipe_cnt].pipe.src.use_mall_for_static_screen = dm_use_mall_static_screen_disable; + break; + default: + break; + } + } + + pipes[pipe_cnt].dout.dsc_input_bpc = 0; + if (pipes[pipe_cnt].dout.dsc_enable) { + switch (timing->display_color_depth) { + case COLOR_DEPTH_888: + pipes[pipe_cnt].dout.dsc_input_bpc = 8; + break; + case COLOR_DEPTH_101010: + pipes[pipe_cnt].dout.dsc_input_bpc = 10; + break; + case COLOR_DEPTH_121212: + pipes[pipe_cnt].dout.dsc_input_bpc = 12; + break; + default: + ASSERT(0); + break; + } + } + + + pipe_cnt++; + } + + /* For DET allocation, we don't want to use DML policy (not optimal for utilizing all + * the DET available for each pipe). Use the DET override input to maintain our driver + * policy. + */ + dcn32_set_det_allocations(dc, context, pipes); + + // In general cases we want to keep the dram clock change requirement + // (prefer configs that support MCLK switch). Only override to false + // for SubVP + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || subvp_in_use) + context->bw_ctx.dml.soc.dram_clock_change_requirement_final = false; + else + context->bw_ctx.dml.soc.dram_clock_change_requirement_final = true; + + return pipe_cnt; +} + +static struct dc_cap_funcs cap_funcs = { + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap, + .get_subvp_en = dcn32_subvp_in_use, +}; + +void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel) +{ + DC_FP_START(); + dcn32_calculate_wm_and_dlg_fpu(dc, context, pipes, pipe_cnt, vlevel); + DC_FP_END(); +} + +static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) +{ + DC_FP_START(); + dcn32_update_bw_bounding_box_fpu(dc, bw_params); + if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) + dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); + DC_FP_END(); +} + +static struct resource_funcs dcn32_res_pool_funcs = { + .destroy = dcn32_destroy_resource_pool, + .link_enc_create = dcn32_link_encoder_create, + .link_enc_create_minimal = NULL, + .panel_cntl_create = dcn32_panel_cntl_create, + .validate_bandwidth = dcn32_validate_bandwidth, + .calculate_wm_and_dlg = dcn32_calculate_wm_and_dlg, + .populate_dml_pipes = dcn32_populate_dml_pipes_from_context, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn32_acquire_free_pipe_as_secondary_dpp_pipe, + .acquire_free_pipe_as_secondary_opp_head = dcn32_acquire_free_pipe_as_secondary_opp_head, + .release_pipe = dcn20_release_pipe, + .add_stream_to_ctx = dcn30_add_stream_to_ctx, + .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, + .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, + .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, + .set_mcif_arb_params = dcn30_set_mcif_arb_params, + .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, + .acquire_post_bldn_3dlut = dcn32_acquire_post_bldn_3dlut, + .release_post_bldn_3dlut = dcn32_release_post_bldn_3dlut, + .update_bw_bounding_box = dcn32_update_bw_bounding_box, + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, + .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, + .add_phantom_pipes = dcn32_add_phantom_pipes, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params, +}; + +static uint32_t read_pipe_fuses(struct dc_context *ctx) +{ + uint32_t value = REG_READ(CC_DC_PIPE_DIS); + /* DCN32 support max 4 pipes */ + value = value & 0xf; + return value; +} + + +static bool dcn32_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dcn32_resource_pool *pool) +{ + int i, j; + struct dc_context *ctx = dc->ctx; + struct irq_service_init_data init_data; + struct ddc_service_init_data ddc_init_data = {0}; + uint32_t pipe_fuses = 0; + uint32_t num_pipes = 4; + +#undef REG_STRUCT +#define REG_STRUCT bios_regs + bios_regs_init(); + +#undef REG_STRUCT +#define REG_STRUCT clk_src_regs + clk_src_regs_init(0, A), + clk_src_regs_init(1, B), + clk_src_regs_init(2, C), + clk_src_regs_init(3, D), + clk_src_regs_init(4, E); + +#undef REG_STRUCT +#define REG_STRUCT abm_regs + abm_regs_init(0), + abm_regs_init(1), + abm_regs_init(2), + abm_regs_init(3); + +#undef REG_STRUCT +#define REG_STRUCT dccg_regs + dccg_regs_init(); + + DC_FP_START(); + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap_dcn32; + /* max number of pipes for ASIC before checking for pipe fuses */ + num_pipes = pool->base.res_cap->num_timing_generator; + pipe_fuses = read_pipe_fuses(ctx); + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) + if (pipe_fuses & 1 << i) + num_pipes--; + + if (pipe_fuses & 1) + ASSERT(0); //Unexpected - Pipe 0 should always be fully functional! + + if (pipe_fuses & CC_DC_PIPE_DIS__DC_FULL_DIS_MASK) + ASSERT(0); //Entire DCN is harvested! + + /* within dml lib, initial value is hard coded, if ASIC pipe is fused, the + * value will be changed, update max_num_dpp and max_num_otg for dml. + */ + dcn3_2_ip.max_num_dpp = num_pipes; + dcn3_2_ip.max_num_otg = num_pipes; + + pool->base.funcs = &dcn32_res_pool_funcs; + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->base.timing_generator_count = num_pipes; + pool->base.pipe_count = num_pipes; + pool->base.mpcc_count = num_pipes; + dc->caps.max_downscale_ratio = 600; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/ + /* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/ + dc->caps.max_cursor_size = 64; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dmdata_alloc_size = 2048; + dc->caps.mall_size_per_mem_channel = 4; + dc->caps.mall_size_total = 0; + dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; + + dc->caps.cache_line_size = 64; + dc->caps.cache_num_ways = 16; + + /* Calculate the available MALL space */ + dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall( + dc, dc->ctx->dc_bios->vram_info.num_chans) * + dc->caps.mall_size_per_mem_channel * 1024 * 1024; + dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes; + + dc->caps.subvp_fw_processing_delay_us = 15; + dc->caps.subvp_drr_max_vblank_margin_us = 40; + dc->caps.subvp_prefetch_end_to_mall_start_us = 15; + dc->caps.subvp_swath_height_margin_lines = 16; + dc->caps.subvp_pstate_allow_width_us = 20; + dc->caps.subvp_vertical_int_margin_us = 30; + dc->caps.subvp_drr_vblank_start_margin_us = 100; // 100us margin + + dc->caps.max_slave_planes = 2; + dc->caps.max_slave_yuv_planes = 2; + dc->caps.max_slave_rgb_planes = 2; + dc->caps.post_blend_color_processing = true; + dc->caps.force_dp_tps4_for_cp2520 = true; + if (dc->config.forceHBR2CP2520) + dc->caps.force_dp_tps4_for_cp2520 = false; + dc->caps.dp_hpo = true; + dc->caps.dp_hdmi21_pcon_support = true; + dc->caps.edp_dsc_support = true; + dc->caps.extended_aux_timeout_support = true; + dc->caps.dmcub_support = true; + dc->caps.seamless_odm = true; + dc->caps.max_v_total = (1 << 15) - 1; + + /* Color pipeline capabilities */ + dc->caps.color.dpp.dcn_arch = 1; + dc->caps.color.dpp.input_lut_shared = 0; + dc->caps.color.dpp.icsc = 1; + dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr + dc->caps.color.dpp.dgam_rom_caps.srgb = 1; + dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; + dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; + dc->caps.color.dpp.dgam_rom_caps.pq = 1; + dc->caps.color.dpp.dgam_rom_caps.hlg = 1; + dc->caps.color.dpp.post_csc = 1; + dc->caps.color.dpp.gamma_corr = 1; + dc->caps.color.dpp.dgam_rom_for_yuv = 0; + + dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 + // no OGAM ROM on DCN2 and later ASICs + dc->caps.color.dpp.ogam_rom_caps.srgb = 0; + dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; + dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.ogam_rom_caps.pq = 0; + dc->caps.color.dpp.ogam_rom_caps.hlg = 0; + dc->caps.color.dpp.ocsc = 0; + + dc->caps.color.mpc.gamut_remap = 1; + dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //4, configurable to be before or after BLND in MPCC + dc->caps.color.mpc.ogam_ram = 1; + dc->caps.color.mpc.ogam_rom_caps.srgb = 0; + dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; + dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.mpc.ogam_rom_caps.pq = 0; + dc->caps.color.mpc.ogam_rom_caps.hlg = 0; + dc->caps.color.mpc.ocsc = 1; + + /* Use pipe context based otg sync logic */ + dc->config.use_pipe_ctx_sync_logic = true; + + dc->config.dc_mode_clk_limit_support = true; + /* read VBIOS LTTPR caps */ + { + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + /* interop bit is implicit */ + { + dc->caps.vbios_lttpr_aware = true; + } + } + + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) + dc->debug = debug_defaults_drv; + + // Init the vm_helper + if (dc->vm_helper) + vm_helper_init(dc->vm_helper, 16); + + /************************************************* + * Create resources * + *************************************************/ + + /* Clock Sources for Pixel Clock*/ + pool->base.clock_sources[DCN32_CLK_SRC_PLL0] = + dcn32_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->base.clock_sources[DCN32_CLK_SRC_PLL1] = + dcn32_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + pool->base.clock_sources[DCN32_CLK_SRC_PLL2] = + dcn32_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs[2], false); + pool->base.clock_sources[DCN32_CLK_SRC_PLL3] = + dcn32_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + pool->base.clock_sources[DCN32_CLK_SRC_PLL4] = + dcn32_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL4, + &clk_src_regs[4], false); + + pool->base.clk_src_count = DCN32_CLK_SRC_TOTAL; + + /* todo: not reuse phy_pll registers */ + pool->base.dp_clock_source = + dcn32_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, + &clk_src_regs[0], true); + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* DCCG */ + pool->base.dccg = dccg32_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); + if (pool->base.dccg == NULL) { + dm_error("DC: failed to create dccg!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* DML */ + dml_init_instance(&dc->dml, &dcn3_2_soc, &dcn3_2_ip, DML_PROJECT_DCN32); + + /* IRQ Service */ + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dcn32_create(&init_data); + if (!pool->base.irqs) + goto create_fail; + + /* HUBBUB */ + pool->base.hubbub = dcn32_hubbub_create(ctx); + if (pool->base.hubbub == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create hubbub!\n"); + goto create_fail; + } + + /* HUBPs, DPPs, OPPs, TGs, ABMs */ + for (i = 0, j = 0; i < pool->base.res_cap->num_timing_generator; i++) { + + /* if pipe is disabled, skip instance of HW pipe, + * i.e, skip ASIC register instance + */ + if (pipe_fuses & 1 << i) + continue; + + /* HUBPs */ + pool->base.hubps[j] = dcn32_hubp_create(ctx, i); + if (pool->base.hubps[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create hubps!\n"); + goto create_fail; + } + + /* DPPs */ + pool->base.dpps[j] = dcn32_dpp_create(ctx, i); + if (pool->base.dpps[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create dpps!\n"); + goto create_fail; + } + + /* OPPs */ + pool->base.opps[j] = dcn32_opp_create(ctx, i); + if (pool->base.opps[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + goto create_fail; + } + + /* TGs */ + pool->base.timing_generators[j] = dcn32_timing_generator_create( + ctx, i); + if (pool->base.timing_generators[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + + /* ABMs */ + pool->base.multiple_abms[j] = dmub_abm_create(ctx, + &abm_regs[i], + &abm_shift, + &abm_mask); + if (pool->base.multiple_abms[j] == NULL) { + dm_error("DC: failed to create abm for pipe %d!\n", i); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* index for resource pool arrays for next valid pipe */ + j++; + } + + /* PSR */ + pool->base.psr = dmub_psr_create(ctx); + if (pool->base.psr == NULL) { + dm_error("DC: failed to create psr obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* MPCCs */ + pool->base.mpc = dcn32_mpc_create(ctx, pool->base.res_cap->num_timing_generator, pool->base.res_cap->num_mpc_3dlut); + if (pool->base.mpc == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mpc!\n"); + goto create_fail; + } + + /* DSCs */ + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + pool->base.dscs[i] = dcn32_dsc_create(ctx, i); + if (pool->base.dscs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create display stream compressor %d!\n", i); + goto create_fail; + } + } + + /* DWB */ + if (!dcn32_dwbc_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create dwbc!\n"); + goto create_fail; + } + + /* MMHUBBUB */ + if (!dcn32_mmhubbub_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mcif_wb!\n"); + goto create_fail; + } + + /* AUX and I2C */ + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dcn32_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto create_fail; + } + pool->base.hw_i2cs[i] = dcn32_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create hw i2c!!\n"); + goto create_fail; + } + pool->base.sw_i2cs[i] = NULL; + } + + /* Audio, HWSeq, Stream Encoders including HPO and virtual, MPC 3D LUTs */ + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto create_fail; + + /* HW Sequencer init functions and Plane caps */ + dcn32_hw_sequencer_init_functions(dc); + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->cap_funcs = cap_funcs; + + if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { + ddc_init_data.ctx = dc->ctx; + ddc_init_data.link = NULL; + ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; + ddc_init_data.id.enum_id = 0; + ddc_init_data.id.type = OBJECT_TYPE_GENERIC; + pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data); + } else { + pool->base.oem_device = NULL; + } + + dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; + dc->dml2_options.use_native_pstate_optimization = false; + dc->dml2_options.use_native_soc_bb_construction = true; + dc->dml2_options.minimize_dispclk_using_odm = true; + + dc->dml2_options.callbacks.dc = dc; + dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; + dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; + dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; + dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; + dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; + dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; + dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; + dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; + + dc->dml2_options.svp_pstate.callbacks.dc = dc; + dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane; + dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream; + dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params; + dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane; + dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane; + dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream; + dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream; + dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane; + dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream; + dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc; + dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type; + dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type; + dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream; + + dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us; + dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us; + dc->dml2_options.svp_pstate.subvp_pstate_allow_width_us = dc->caps.subvp_pstate_allow_width_us; + dc->dml2_options.svp_pstate.subvp_swath_height_margin_lines = dc->caps.subvp_swath_height_margin_lines; + + dc->dml2_options.svp_pstate.force_disable_subvp = dc->debug.force_disable_subvp; + dc->dml2_options.svp_pstate.force_enable_subvp = dc->debug.force_subvp_mclk_switch; + + dc->dml2_options.mall_cfg.cache_line_size_bytes = dc->caps.cache_line_size; + dc->dml2_options.mall_cfg.cache_num_ways = dc->caps.cache_num_ways; + dc->dml2_options.mall_cfg.max_cab_allocation_bytes = dc->caps.max_cab_allocation_bytes; + dc->dml2_options.mall_cfg.mblk_height_4bpe_pixels = DCN3_2_MBLK_HEIGHT_4BPE; + dc->dml2_options.mall_cfg.mblk_height_8bpe_pixels = DCN3_2_MBLK_HEIGHT_8BPE; + dc->dml2_options.mall_cfg.mblk_size_bytes = DCN3_2_MALL_MBLK_SIZE_BYTES; + dc->dml2_options.mall_cfg.mblk_width_pixels = DCN3_2_MBLK_WIDTH; + + dc->dml2_options.max_segments_per_hubp = 18; + dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE; + dc->dml2_options.map_dc_pipes_with_callbacks = true; + + if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0)) + dc->config.sdpif_request_limit_words_per_umc = 16; + + DC_FP_END(); + + return true; + +create_fail: + + DC_FP_END(); + + dcn32_resource_destruct(pool); + + return false; +} + +struct resource_pool *dcn32_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc) +{ + struct dcn32_resource_pool *pool = + kzalloc(sizeof(struct dcn32_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dcn32_resource_construct(init_data->num_virtual_links, dc, pool)) + return &pool->base; + + BREAK_TO_DEBUGGER(); + kfree(pool); + return NULL; +} + +/* + * Find the most optimal free pipe from res_ctx, which could be used as a + * secondary dpp pipe for input opp head pipe. + * + * a free pipe - a pipe in input res_ctx not yet used for any streams or + * planes. + * secondary dpp pipe - a pipe gets inserted to a head OPP pipe's MPC blending + * tree. This is typical used for rendering MPO planes or additional offset + * areas in MPCC combine. + * + * Hardware Transition Minimization Algorithm for Finding a Secondary DPP Pipe + * ------------------------------------------------------------------------- + * + * PROBLEM: + * + * 1. There is a hardware limitation that a secondary DPP pipe cannot be + * transferred from one MPC blending tree to the other in a single frame. + * Otherwise it could cause glitches on the screen. + * + * For instance, we cannot transition from state 1 to state 2 in one frame. This + * is because PIPE1 is transferred from PIPE0's MPC blending tree over to + * PIPE2's MPC blending tree, which is not supported by hardware. + * To support this transition we need to first remove PIPE1 from PIPE0's MPC + * blending tree in one frame and then insert PIPE1 to PIPE2's MPC blending tree + * in the next frame. This is not optimal as it will delay the flip for two + * frames. + * + * State 1: + * PIPE0 -- secondary DPP pipe --> (PIPE1) + * PIPE2 -- secondary DPP pipe --> NONE + * + * State 2: + * PIPE0 -- secondary DPP pipe --> NONE + * PIPE2 -- secondary DPP pipe --> (PIPE1) + * + * 2. We want to in general minimize the unnecessary changes in pipe topology. + * If a pipe is already added in current blending tree and there are no changes + * to plane topology, we don't want to swap it with another free pipe + * unnecessarily in every update. Powering up and down a pipe would require a + * full update which delays the flip for 1 frame. If we use the original pipe + * we don't have to toggle its power. So we can flip faster. + */ +static int find_optimal_free_pipe_as_secondary_dpp_pipe( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *new_opp_head) +{ + const struct pipe_ctx *cur_opp_head; + int free_pipe_idx; + + cur_opp_head = &cur_res_ctx->pipe_ctx[new_opp_head->pipe_idx]; + free_pipe_idx = resource_find_free_pipe_used_in_cur_mpc_blending_tree( + cur_res_ctx, new_res_ctx, cur_opp_head); + + /* Up until here if we have not found a free secondary pipe, we will + * need to wait for at least one frame to complete the transition + * sequence. + */ + if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) + free_pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx( + cur_res_ctx, new_res_ctx, pool); + + /* Up until here if we have not found a free secondary pipe, we will + * need to wait for at least two frames to complete the transition + * sequence. It really doesn't matter which pipe we decide take from + * current enabled pipes. It won't save our frame time when we swap only + * one pipe or more pipes. + */ + if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) + free_pipe_idx = resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine( + cur_res_ctx, new_res_ctx, pool); + + if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) + free_pipe_idx = resource_find_any_free_pipe(new_res_ctx, pool); + + return free_pipe_idx; +} + +static struct pipe_ctx *find_idle_secondary_pipe_check_mpo( + struct resource_context *res_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *primary_pipe) +{ + int i; + struct pipe_ctx *secondary_pipe = NULL; + struct pipe_ctx *next_odm_mpo_pipe = NULL; + int primary_index, preferred_pipe_idx; + struct pipe_ctx *old_primary_pipe = NULL; + + /* + * Modified from find_idle_secondary_pipe + * With windowed MPO and ODM, we want to avoid the case where we want a + * free pipe for the left side but the free pipe is being used on the + * right side. + * Add check on current_state if the primary_pipe is the left side, + * to check the right side ( primary_pipe->next_odm_pipe ) to see if + * it is using a pipe for MPO ( primary_pipe->next_odm_pipe->bottom_pipe ) + * - If so, then don't use this pipe + * EXCEPTION - 3 plane ( 2 MPO plane ) case + * - in this case, the primary pipe has already gotten a free pipe for the + * MPO window in the left + * - when it tries to get a free pipe for the MPO window on the right, + * it will see that it is already assigned to the right side + * ( primary_pipe->next_odm_pipe ). But in this case, we want this + * free pipe, since it will be for the right side. So add an + * additional condition, that skipping the free pipe on the right only + * applies if the primary pipe has no bottom pipe currently assigned + */ + if (primary_pipe) { + primary_index = primary_pipe->pipe_idx; + old_primary_pipe = &primary_pipe->stream->ctx->dc->current_state->res_ctx.pipe_ctx[primary_index]; + if ((old_primary_pipe->next_odm_pipe) && (old_primary_pipe->next_odm_pipe->bottom_pipe) + && (!primary_pipe->bottom_pipe)) + next_odm_mpo_pipe = old_primary_pipe->next_odm_pipe->bottom_pipe; + + preferred_pipe_idx = (pool->pipe_count - 1) - primary_pipe->pipe_idx; + if ((res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) && + !(next_odm_mpo_pipe && next_odm_mpo_pipe->pipe_idx == preferred_pipe_idx)) { + secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; + secondary_pipe->pipe_idx = preferred_pipe_idx; + } + } + + /* + * search backwards for the second pipe to keep pipe + * assignment more consistent + */ + if (!secondary_pipe) + for (i = pool->pipe_count - 1; i >= 0; i--) { + if ((res_ctx->pipe_ctx[i].stream == NULL) && + !(next_odm_mpo_pipe && next_odm_mpo_pipe->pipe_idx == i)) { + secondary_pipe = &res_ctx->pipe_ctx[i]; + secondary_pipe->pipe_idx = i; + break; + } + } + + return secondary_pipe; +} + +static struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer( + struct dc_state *state, + const struct resource_pool *pool, + struct dc_stream_state *stream, + const struct pipe_ctx *head_pipe) +{ + struct resource_context *res_ctx = &state->res_ctx; + struct pipe_ctx *idle_pipe, *pipe; + struct resource_context *old_ctx = &stream->ctx->dc->current_state->res_ctx; + int head_index; + + if (!head_pipe) + ASSERT(0); + + /* + * Modified from dcn20_acquire_idle_pipe_for_layer + * Check if head_pipe in old_context already has bottom_pipe allocated. + * - If so, check if that pipe is available in the current context. + * -- If so, reuse pipe from old_context + */ + head_index = head_pipe->pipe_idx; + pipe = &old_ctx->pipe_ctx[head_index]; + if (pipe->bottom_pipe && res_ctx->pipe_ctx[pipe->bottom_pipe->pipe_idx].stream == NULL) { + idle_pipe = &res_ctx->pipe_ctx[pipe->bottom_pipe->pipe_idx]; + idle_pipe->pipe_idx = pipe->bottom_pipe->pipe_idx; + } else { + idle_pipe = find_idle_secondary_pipe_check_mpo(res_ctx, pool, head_pipe); + if (!idle_pipe) + return NULL; + } + + idle_pipe->stream = head_pipe->stream; + idle_pipe->stream_res.tg = head_pipe->stream_res.tg; + idle_pipe->stream_res.opp = head_pipe->stream_res.opp; + + idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; + idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx]; + idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx]; + idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst; + + return idle_pipe; +} + +static int find_optimal_free_pipe_as_secondary_opp_head( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *new_otg_master) +{ + const struct pipe_ctx *cur_otg_master; + int free_pipe_idx; + + cur_otg_master = &cur_res_ctx->pipe_ctx[new_otg_master->pipe_idx]; + free_pipe_idx = resource_find_free_pipe_used_as_sec_opp_head_by_cur_otg_master( + cur_res_ctx, new_res_ctx, cur_otg_master); + + /* Up until here if we have not found a free secondary pipe, we will + * need to wait for at least one frame to complete the transition + * sequence. + */ + if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) + free_pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx( + cur_res_ctx, new_res_ctx, pool); + + if (free_pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) + free_pipe_idx = resource_find_any_free_pipe(new_res_ctx, pool); + + return free_pipe_idx; +} + +struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *opp_head_pipe) +{ + + int free_pipe_idx; + struct pipe_ctx *free_pipe; + + if (!opp_head_pipe->stream->ctx->dc->config.enable_windowed_mpo_odm) + return dcn32_acquire_idle_pipe_for_head_pipe_in_layer( + new_ctx, pool, opp_head_pipe->stream, opp_head_pipe); + + free_pipe_idx = find_optimal_free_pipe_as_secondary_dpp_pipe( + &cur_ctx->res_ctx, &new_ctx->res_ctx, + pool, opp_head_pipe); + if (free_pipe_idx >= 0) { + free_pipe = &new_ctx->res_ctx.pipe_ctx[free_pipe_idx]; + free_pipe->pipe_idx = free_pipe_idx; + free_pipe->stream = opp_head_pipe->stream; + free_pipe->stream_res.tg = opp_head_pipe->stream_res.tg; + free_pipe->stream_res.opp = opp_head_pipe->stream_res.opp; + + free_pipe->plane_res.hubp = pool->hubps[free_pipe->pipe_idx]; + free_pipe->plane_res.ipp = pool->ipps[free_pipe->pipe_idx]; + free_pipe->plane_res.dpp = pool->dpps[free_pipe->pipe_idx]; + free_pipe->plane_res.mpcc_inst = + pool->dpps[free_pipe->pipe_idx]->inst; + } else { + ASSERT(opp_head_pipe); + free_pipe = NULL; + } + + return free_pipe; +} + +struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_opp_head( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *otg_master) +{ + int free_pipe_idx = find_optimal_free_pipe_as_secondary_opp_head( + &cur_ctx->res_ctx, &new_ctx->res_ctx, + pool, otg_master); + struct pipe_ctx *free_pipe; + + if (free_pipe_idx >= 0) { + free_pipe = &new_ctx->res_ctx.pipe_ctx[free_pipe_idx]; + free_pipe->pipe_idx = free_pipe_idx; + free_pipe->stream = otg_master->stream; + free_pipe->stream_res.tg = otg_master->stream_res.tg; + free_pipe->stream_res.dsc = NULL; + free_pipe->stream_res.opp = pool->opps[free_pipe_idx]; + free_pipe->plane_res.mi = pool->mis[free_pipe_idx]; + free_pipe->plane_res.hubp = pool->hubps[free_pipe_idx]; + free_pipe->plane_res.ipp = pool->ipps[free_pipe_idx]; + free_pipe->plane_res.xfm = pool->transforms[free_pipe_idx]; + free_pipe->plane_res.dpp = pool->dpps[free_pipe_idx]; + free_pipe->plane_res.mpcc_inst = pool->dpps[free_pipe_idx]->inst; + if (free_pipe->stream->timing.flags.DSC == 1) { + dcn20_acquire_dsc(free_pipe->stream->ctx->dc, + &new_ctx->res_ctx, + &free_pipe->stream_res.dsc, + free_pipe_idx); + ASSERT(free_pipe->stream_res.dsc); + if (free_pipe->stream_res.dsc == NULL) { + memset(free_pipe, 0, sizeof(*free_pipe)); + free_pipe = NULL; + } + } + } else { + ASSERT(otg_master); + free_pipe = NULL; + } + + return free_pipe; +} + +unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans) +{ + /* + * DCN32 and DCN321 SKUs may have different sizes for MALL + * but we may not be able to access all the MALL space. + * If the num_chans is power of 2, then we can access all + * of the available MALL space. Otherwise, we can only + * access: + * + * max_cab_size_in_bytes = total_cache_size_in_bytes * + * ((2^floor(log2(num_chans)))/num_chans) + * + * Calculating the MALL sizes for all available SKUs, we + * have come up with the follow simplified check. + * - we have max_chans which provides the max MALL size. + * Each chans supports 4MB of MALL so: + * + * total_cache_size_in_bytes = max_chans * 4 MB + * + * - we have avail_chans which shows the number of channels + * we can use if we can't access the entire MALL space. + * It is generally half of max_chans + * - so we use the following checks: + * + * if (num_chans == max_chans), return max_chans + * if (num_chans < max_chans), return avail_chans + * + * - exception is GC_11_0_0 where we can't access max_chans, + * so we define max_avail_chans as the maximum available + * MALL space + * + */ + int gc_11_0_0_max_chans = 48; + int gc_11_0_0_max_avail_chans = 32; + int gc_11_0_0_avail_chans = 16; + int gc_11_0_3_max_chans = 16; + int gc_11_0_3_avail_chans = 8; + int gc_11_0_2_max_chans = 8; + int gc_11_0_2_avail_chans = 4; + + if (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev)) { + return (num_chans == gc_11_0_0_max_chans) ? + gc_11_0_0_max_avail_chans : gc_11_0_0_avail_chans; + } else if (ASICREV_IS_GC_11_0_2(dc->ctx->asic_id.hw_internal_rev)) { + return (num_chans == gc_11_0_2_max_chans) ? + gc_11_0_2_max_chans : gc_11_0_2_avail_chans; + } else { // if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev)) { + return (num_chans == gc_11_0_3_max_chans) ? + gc_11_0_3_max_chans : gc_11_0_3_avail_chans; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h new file mode 100644 index 0000000000..2258c5c721 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h @@ -0,0 +1,1264 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DCN32_RESOURCE_H_ +#define _DCN32_RESOURCE_H_ + +#include "core_types.h" + +#define DCN3_2_DEFAULT_DET_SIZE 256 +#define DCN3_2_MAX_DET_SIZE 1152 +#define DCN3_2_MIN_DET_SIZE 128 +#define DCN3_2_MIN_COMPBUF_SIZE_KB 128 +#define DCN3_2_DET_SEG_SIZE 64 +#define DCN3_2_MALL_MBLK_SIZE_BYTES 65536 // 64 * 1024 +#define DCN3_2_MBLK_WIDTH 128 +#define DCN3_2_MBLK_HEIGHT_4BPE 128 +#define DCN3_2_MBLK_HEIGHT_8BPE 64 +#define DCN3_2_DCFCLK_DS_INIT_KHZ 10000 // Choose 10Mhz for init DCFCLK DS freq +#define SUBVP_HIGH_REFRESH_LIST_LEN 4 +#define SUBVP_ACTIVE_MARGIN_LIST_LEN 2 +#define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800 +#define DCN3_2_VMIN_DISPCLK_HZ 717000000 +#define MIN_SUBVP_DCFCLK_KHZ 400000 + +#define TO_DCN32_RES_POOL(pool)\ + container_of(pool, struct dcn32_resource_pool, base) + +extern struct _vcs_dpi_ip_params_st dcn3_2_ip; +extern struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc; + +struct subvp_high_refresh_list { + int min_refresh; + int max_refresh; + struct resolution { + int width; + int height; + } res[SUBVP_HIGH_REFRESH_LIST_LEN]; +}; + +struct subvp_active_margin_list { + int min_refresh; + int max_refresh; + struct { + int width; + int height; + } res[SUBVP_ACTIVE_MARGIN_LIST_LEN]; +}; + +struct dcn32_resource_pool { + struct resource_pool base; +}; + +struct resource_pool *dcn32_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc); + +struct panel_cntl *dcn32_panel_cntl_create( + const struct panel_cntl_init_data *init_data); + +bool dcn32_acquire_post_bldn_3dlut( + struct resource_context *res_ctx, + const struct resource_pool *pool, + int mpcc_id, + struct dc_3dlut **lut, + struct dc_transfer_func **shaper); + +bool dcn32_release_post_bldn_3dlut( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct dc_3dlut **lut, + struct dc_transfer_func **shaper); + +void dcn32_add_phantom_pipes(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + unsigned int pipe_cnt, + unsigned int index); + +bool dcn32_validate_bandwidth(struct dc *dc, + struct dc_state *context, + bool fast_validate); + +int dcn32_populate_dml_pipes_from_context( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate); + +void dcn32_calculate_wm_and_dlg( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel); + +uint32_t dcn32_helper_mall_bytes_to_ways( + struct dc *dc, + uint32_t total_size_in_mall_bytes); + +uint32_t dcn32_helper_calculate_mall_bytes_for_cursor( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool ignore_cursor_buf); + +uint32_t dcn32_helper_calculate_num_ways_for_subvp( + struct dc *dc, + struct dc_state *context); + +void dcn32_merge_pipes_for_subvp(struct dc *dc, + struct dc_state *context); + +bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc, + struct dc_state *context); + +bool dcn32_subvp_in_use(struct dc *dc, + struct dc_state *context); + +bool dcn32_mpo_in_use(struct dc_state *context); + +bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context); +bool dcn32_is_center_timing(struct pipe_ctx *pipe); +bool dcn32_is_psr_capable(struct pipe_ctx *pipe); + +struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *opp_head_pipe); + +struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_opp_head( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *otg_master); + +void dcn32_release_pipe(struct dc_state *context, + struct pipe_ctx *pipe, + const struct resource_pool *pool); + +void dcn32_determine_det_override(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes); + +void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes); + +struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context); + +bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe); + +bool dcn32_allow_subvp_high_refresh_rate(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe); + +unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans); + +double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context); + +bool dcn32_check_native_scaling_for_res(struct pipe_ctx *pipe, unsigned int width, unsigned int height); + +bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context); + +bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int vlevel); + +void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes); + +void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context); + +/* definitions for run time init of reg offsets */ + +/* CLK SRC */ +#define CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid) \ + SRI_ARR_ALPHABET(PIXCLK_RESYNC_CNTL, PHYPLL, index, pllid), \ + SRII_ARR_2(PHASE, DP_DTO, 0, index), \ + SRII_ARR_2(PHASE, DP_DTO, 1, index), \ + SRII_ARR_2(PHASE, DP_DTO, 2, index), \ + SRII_ARR_2(PHASE, DP_DTO, 3, index), \ + SRII_ARR_2(MODULO, DP_DTO, 0, index), \ + SRII_ARR_2(MODULO, DP_DTO, 1, index), \ + SRII_ARR_2(MODULO, DP_DTO, 2, index), \ + SRII_ARR_2(MODULO, DP_DTO, 3, index), \ + SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 0, index), \ + SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 1, index), \ + SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 2, index), \ + SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 3, index) + +/* ABM */ +#define ABM_DCN32_REG_LIST_RI(id) \ + SRI_ARR(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \ + SRI_ARR(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \ + SRI_ARR(BL1_PWM_BL_UPDATE_SAMPLE_RATE, ABM, id), \ + SRI_ARR(DC_ABM1_HG_MISC_CTRL, ABM, id), \ + SRI_ARR(DC_ABM1_IPCSC_COEFF_SEL, ABM, id), \ + SRI_ARR(BL1_PWM_CURRENT_ABM_LEVEL, ABM, id), \ + SRI_ARR(BL1_PWM_TARGET_ABM_LEVEL, ABM, id), \ + SRI_ARR(BL1_PWM_USER_LEVEL, ABM, id), \ + SRI_ARR(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \ + SRI_ARR(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \ + SRI_ARR(DC_ABM1_ACE_OFFSET_SLOPE_0, ABM, id), \ + SRI_ARR(DC_ABM1_ACE_THRES_12, ABM, id), NBIO_SR_ARR(BIOS_SCRATCH_2, id) + +/* Audio */ +#define AUD_COMMON_REG_LIST_RI(id) \ + SRI_ARR(AZALIA_F0_CODEC_ENDPOINT_INDEX, AZF0ENDPOINT, id), \ + SRI_ARR(AZALIA_F0_CODEC_ENDPOINT_DATA, AZF0ENDPOINT, id), \ + SR_ARR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS, id), \ + SR_ARR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES, id), \ + SR_ARR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES, id), \ + SR_ARR(DCCG_AUDIO_DTO_SOURCE, id), SR_ARR(DCCG_AUDIO_DTO0_MODULE, id), \ + SR_ARR(DCCG_AUDIO_DTO0_PHASE, id), SR_ARR(DCCG_AUDIO_DTO1_MODULE, id), \ + SR_ARR(DCCG_AUDIO_DTO1_PHASE, id) \ + +/* VPG */ + +#define VPG_DCN3_REG_LIST_RI(id) \ + SRI_ARR(VPG_GENERIC_STATUS, VPG, id), \ + SRI_ARR(VPG_GENERIC_PACKET_ACCESS_CTRL, VPG, id), \ + SRI_ARR(VPG_GENERIC_PACKET_DATA, VPG, id), \ + SRI_ARR(VPG_GSP_FRAME_UPDATE_CTRL, VPG, id), \ + SRI_ARR(VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG, id) + +/* AFMT */ +#define AFMT_DCN3_REG_LIST_RI(id) \ + SRI_ARR(AFMT_INFOFRAME_CONTROL0, AFMT, id), \ + SRI_ARR(AFMT_VBI_PACKET_CONTROL, AFMT, id), \ + SRI_ARR(AFMT_AUDIO_PACKET_CONTROL, AFMT, id), \ + SRI_ARR(AFMT_AUDIO_PACKET_CONTROL2, AFMT, id), \ + SRI_ARR(AFMT_AUDIO_SRC_CONTROL, AFMT, id), \ + SRI_ARR(AFMT_60958_0, AFMT, id), SRI_ARR(AFMT_60958_1, AFMT, id), \ + SRI_ARR(AFMT_60958_2, AFMT, id), SRI_ARR(AFMT_MEM_PWR, AFMT, id) + +/* APG */ +#define APG_DCN31_REG_LIST_RI(id) \ + SRI_ARR(APG_CONTROL, APG, id), SRI_ARR(APG_CONTROL2, APG, id), \ + SRI_ARR(APG_MEM_PWR, APG, id), SRI_ARR(APG_DBG_GEN_CONTROL, APG, id) + +/* Stream encoder */ +#define SE_DCN32_REG_LIST_RI(id) \ + SRI_ARR(AFMT_CNTL, DIG, id), SRI_ARR(DIG_FE_CNTL, DIG, id), \ + SRI_ARR(HDMI_CONTROL, DIG, id), SRI_ARR(HDMI_DB_CONTROL, DIG, id), \ + SRI_ARR(HDMI_GC, DIG, id), \ + SRI_ARR(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \ + SRI_ARR(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \ + SRI_ARR(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \ + SRI_ARR(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \ + SRI_ARR(HDMI_GENERIC_PACKET_CONTROL4, DIG, id), \ + SRI_ARR(HDMI_GENERIC_PACKET_CONTROL5, DIG, id), \ + SRI_ARR(HDMI_GENERIC_PACKET_CONTROL6, DIG, id), \ + SRI_ARR(HDMI_GENERIC_PACKET_CONTROL7, DIG, id), \ + SRI_ARR(HDMI_GENERIC_PACKET_CONTROL8, DIG, id), \ + SRI_ARR(HDMI_GENERIC_PACKET_CONTROL9, DIG, id), \ + SRI_ARR(HDMI_GENERIC_PACKET_CONTROL10, DIG, id), \ + SRI_ARR(HDMI_INFOFRAME_CONTROL0, DIG, id), \ + SRI_ARR(HDMI_INFOFRAME_CONTROL1, DIG, id), \ + SRI_ARR(HDMI_VBI_PACKET_CONTROL, DIG, id), \ + SRI_ARR(HDMI_AUDIO_PACKET_CONTROL, DIG, id), \ + SRI_ARR(HDMI_ACR_PACKET_CONTROL, DIG, id), \ + SRI_ARR(HDMI_ACR_32_0, DIG, id), SRI_ARR(HDMI_ACR_32_1, DIG, id), \ + SRI_ARR(HDMI_ACR_44_0, DIG, id), SRI_ARR(HDMI_ACR_44_1, DIG, id), \ + SRI_ARR(HDMI_ACR_48_0, DIG, id), SRI_ARR(HDMI_ACR_48_1, DIG, id), \ + SRI_ARR(DP_DB_CNTL, DP, id), SRI_ARR(DP_MSA_MISC, DP, id), \ + SRI_ARR(DP_MSA_VBID_MISC, DP, id), SRI_ARR(DP_MSA_COLORIMETRY, DP, id), \ + SRI_ARR(DP_MSA_TIMING_PARAM1, DP, id), \ + SRI_ARR(DP_MSA_TIMING_PARAM2, DP, id), \ + SRI_ARR(DP_MSA_TIMING_PARAM3, DP, id), \ + SRI_ARR(DP_MSA_TIMING_PARAM4, DP, id), \ + SRI_ARR(DP_MSE_RATE_CNTL, DP, id), SRI_ARR(DP_MSE_RATE_UPDATE, DP, id), \ + SRI_ARR(DP_PIXEL_FORMAT, DP, id), SRI_ARR(DP_SEC_CNTL, DP, id), \ + SRI_ARR(DP_SEC_CNTL1, DP, id), SRI_ARR(DP_SEC_CNTL2, DP, id), \ + SRI_ARR(DP_SEC_CNTL5, DP, id), SRI_ARR(DP_SEC_CNTL6, DP, id), \ + SRI_ARR(DP_STEER_FIFO, DP, id), SRI_ARR(DP_VID_M, DP, id), \ + SRI_ARR(DP_VID_N, DP, id), SRI_ARR(DP_VID_STREAM_CNTL, DP, id), \ + SRI_ARR(DP_VID_TIMING, DP, id), SRI_ARR(DP_SEC_AUD_N, DP, id), \ + SRI_ARR(DP_SEC_TIMESTAMP, DP, id), SRI_ARR(DP_DSC_CNTL, DP, id), \ + SRI_ARR(DP_SEC_METADATA_TRANSMISSION, DP, id), \ + SRI_ARR(HDMI_METADATA_PACKET_CONTROL, DIG, id), \ + SRI_ARR(DP_SEC_FRAMING4, DP, id), SRI_ARR(DP_GSP11_CNTL, DP, id), \ + SRI_ARR(DME_CONTROL, DME, id), \ + SRI_ARR(DP_SEC_METADATA_TRANSMISSION, DP, id), \ + SRI_ARR(HDMI_METADATA_PACKET_CONTROL, DIG, id), \ + SRI_ARR(DIG_FE_CNTL, DIG, id), SRI_ARR(DIG_CLOCK_PATTERN, DIG, id), \ + SRI_ARR(DIG_FIFO_CTRL0, DIG, id) + +/* Aux regs */ + +#define AUX_REG_LIST_RI(id) \ + SRI_ARR(AUX_CONTROL, DP_AUX, id), SRI_ARR(AUX_DPHY_RX_CONTROL0, DP_AUX, id), \ + SRI_ARR(AUX_DPHY_RX_CONTROL1, DP_AUX, id) + +#define DCN2_AUX_REG_LIST_RI(id) \ + AUX_REG_LIST_RI(id), SRI_ARR(AUX_DPHY_TX_CONTROL, DP_AUX, id) + +/* HDP */ +#define HPD_REG_LIST_RI(id) SRI_ARR(DC_HPD_CONTROL, HPD, id) + +/* Link encoder */ +#define LE_DCN3_REG_LIST_RI(id) \ + SRI_ARR(DIG_BE_CNTL, DIG, id), SRI_ARR(DIG_BE_EN_CNTL, DIG, id), \ + SRI_ARR(TMDS_CTL_BITS, DIG, id), \ + SRI_ARR(TMDS_DCBALANCER_CONTROL, DIG, id), SRI_ARR(DP_CONFIG, DP, id), \ + SRI_ARR(DP_DPHY_CNTL, DP, id), SRI_ARR(DP_DPHY_PRBS_CNTL, DP, id), \ + SRI_ARR(DP_DPHY_SCRAM_CNTL, DP, id), SRI_ARR(DP_DPHY_SYM0, DP, id), \ + SRI_ARR(DP_DPHY_SYM1, DP, id), SRI_ARR(DP_DPHY_SYM2, DP, id), \ + SRI_ARR(DP_DPHY_TRAINING_PATTERN_SEL, DP, id), \ + SRI_ARR(DP_LINK_CNTL, DP, id), SRI_ARR(DP_LINK_FRAMING_CNTL, DP, id), \ + SRI_ARR(DP_MSE_SAT0, DP, id), SRI_ARR(DP_MSE_SAT1, DP, id), \ + SRI_ARR(DP_MSE_SAT2, DP, id), SRI_ARR(DP_MSE_SAT_UPDATE, DP, id), \ + SRI_ARR(DP_SEC_CNTL, DP, id), SRI_ARR(DP_VID_STREAM_CNTL, DP, id), \ + SRI_ARR(DP_DPHY_FAST_TRAINING, DP, id), SRI_ARR(DP_SEC_CNTL1, DP, id), \ + SRI_ARR(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \ + SRI_ARR(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id) + +#define LE_DCN31_REG_LIST_RI(id) \ + LE_DCN3_REG_LIST_RI(id), SRI_ARR(DP_DPHY_INTERNAL_CTRL, DP, id), \ + SR_ARR(DIO_LINKA_CNTL, id), SR_ARR(DIO_LINKB_CNTL, id), \ + SR_ARR(DIO_LINKC_CNTL, id), SR_ARR(DIO_LINKD_CNTL, id), \ + SR_ARR(DIO_LINKE_CNTL, id), SR_ARR(DIO_LINKF_CNTL, id) + +#define UNIPHY_DCN2_REG_LIST_RI(id, phyid) \ + SRI_ARR_ALPHABET(CLOCK_ENABLE, SYMCLK, id, phyid), \ + SRI_ARR_ALPHABET(CHANNEL_XBAR_CNTL, UNIPHY, id, phyid) + +/* HPO DP stream encoder */ +#define DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id) \ + SR_ARR(DP_STREAM_MAPPER_CONTROL0, id), \ + SR_ARR(DP_STREAM_MAPPER_CONTROL1, id), \ + SR_ARR(DP_STREAM_MAPPER_CONTROL2, id), \ + SR_ARR(DP_STREAM_MAPPER_CONTROL3, id), \ + SRI_ARR(DP_STREAM_ENC_CLOCK_CONTROL, DP_STREAM_ENC, id), \ + SRI_ARR(DP_STREAM_ENC_INPUT_MUX_CONTROL, DP_STREAM_ENC, id), \ + SRI_ARR(DP_STREAM_ENC_AUDIO_CONTROL, DP_STREAM_ENC, id), \ + SRI_ARR(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, DP_STREAM_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_CONTROL, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_VID_PIXEL_FORMAT, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_VID_MSA0, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_VID_MSA1, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_VID_MSA2, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_VID_MSA3, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_VID_MSA4, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_VID_MSA5, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_VID_MSA6, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_VID_MSA7, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_VID_MSA8, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_VID_MSA_CONTROL, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_VID_FIFO_CONTROL, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_VID_STREAM_CONTROL, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_VID_VBID_CONTROL, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_SDP_CONTROL, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL0, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL2, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL3, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL5, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL11, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_VID_CRC_CONTROL, DP_SYM32_ENC, id), \ + SRI_ARR(DP_SYM32_ENC_HBLANK_CONTROL, DP_SYM32_ENC, id) + +/* HPO DP link encoder regs */ +#define DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id) \ + SRI_ARR(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC, id), \ + SRI_ARR(DP_DPHY_SYM32_CONTROL, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_STATUS, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_TP_CONFIG, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_TP_PRBS_SEED0, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_TP_PRBS_SEED1, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_TP_PRBS_SEED2, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_TP_PRBS_SEED3, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_TP_SQ_PULSE, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM0, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM1, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM2, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM3, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM4, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM5, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM6, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM7, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM8, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM9, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM10, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_SAT_VC0, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_SAT_VC1, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_SAT_VC2, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_SAT_VC3, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL0, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL1, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL2, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL3, DP_DPHY_SYM32, id), \ + SRI_ARR(DP_DPHY_SYM32_SAT_UPDATE, DP_DPHY_SYM32, id) + +/* DPP */ +#define DPP_REG_LIST_DCN30_COMMON_RI(id) \ + SRI_ARR(CM_DEALPHA, CM, id), SRI_ARR(CM_MEM_PWR_STATUS, CM, id), \ + SRI_ARR(CM_BIAS_CR_R, CM, id), SRI_ARR(CM_BIAS_Y_G_CB_B, CM, id), \ + SRI_ARR(PRE_DEGAM, CNVC_CFG, id), SRI_ARR(CM_GAMCOR_CONTROL, CM, id), \ + SRI_ARR(CM_GAMCOR_LUT_CONTROL, CM, id), \ + SRI_ARR(CM_GAMCOR_LUT_INDEX, CM, id), \ + SRI_ARR(CM_GAMCOR_LUT_INDEX, CM, id), \ + SRI_ARR(CM_GAMCOR_LUT_DATA, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMB_START_CNTL_B, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMB_START_CNTL_G, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMB_START_CNTL_R, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMB_END_CNTL1_B, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMB_END_CNTL2_B, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMB_END_CNTL1_G, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMB_END_CNTL2_G, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMB_END_CNTL1_R, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMB_END_CNTL2_R, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMB_REGION_0_1, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMB_REGION_32_33, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMB_OFFSET_B, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMB_OFFSET_G, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMB_OFFSET_R, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMB_START_BASE_CNTL_B, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMB_START_BASE_CNTL_G, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMB_START_BASE_CNTL_R, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMA_START_CNTL_B, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMA_START_CNTL_G, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMA_START_CNTL_R, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMA_END_CNTL1_B, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMA_END_CNTL2_B, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMA_END_CNTL1_G, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMA_END_CNTL2_G, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMA_END_CNTL1_R, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMA_END_CNTL2_R, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMA_REGION_0_1, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMA_REGION_32_33, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMA_OFFSET_B, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMA_OFFSET_G, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMA_OFFSET_R, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMA_START_BASE_CNTL_B, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMA_START_BASE_CNTL_G, CM, id), \ + SRI_ARR(CM_GAMCOR_RAMA_START_BASE_CNTL_R, CM, id), \ + SRI_ARR(CM_GAMUT_REMAP_CONTROL, CM, id), \ + SRI_ARR(CM_GAMUT_REMAP_C11_C12, CM, id), \ + SRI_ARR(CM_GAMUT_REMAP_C13_C14, CM, id), \ + SRI_ARR(CM_GAMUT_REMAP_C21_C22, CM, id), \ + SRI_ARR(CM_GAMUT_REMAP_C23_C24, CM, id), \ + SRI_ARR(CM_GAMUT_REMAP_C31_C32, CM, id), \ + SRI_ARR(CM_GAMUT_REMAP_C33_C34, CM, id), \ + SRI_ARR(CM_GAMUT_REMAP_B_C11_C12, CM, id), \ + SRI_ARR(CM_GAMUT_REMAP_B_C13_C14, CM, id), \ + SRI_ARR(CM_GAMUT_REMAP_B_C21_C22, CM, id), \ + SRI_ARR(CM_GAMUT_REMAP_B_C23_C24, CM, id), \ + SRI_ARR(CM_GAMUT_REMAP_B_C31_C32, CM, id), \ + SRI_ARR(CM_GAMUT_REMAP_B_C33_C34, CM, id), \ + SRI_ARR(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \ + SRI_ARR(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \ + SRI_ARR(OTG_H_BLANK, DSCL, id), SRI_ARR(OTG_V_BLANK, DSCL, id), \ + SRI_ARR(SCL_MODE, DSCL, id), SRI_ARR(LB_DATA_FORMAT, DSCL, id), \ + SRI_ARR(LB_MEMORY_CTRL, DSCL, id), SRI_ARR(DSCL_AUTOCAL, DSCL, id), \ + SRI_ARR(DSCL_CONTROL, DSCL, id), \ + SRI_ARR(SCL_TAP_CONTROL, DSCL, id), \ + SRI_ARR(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \ + SRI_ARR(SCL_COEF_RAM_TAP_DATA, DSCL, id), \ + SRI_ARR(DSCL_2TAP_CONTROL, DSCL, id), SRI_ARR(MPC_SIZE, DSCL, id), \ + SRI_ARR(SCL_HORZ_FILTER_SCALE_RATIO, DSCL, id), \ + SRI_ARR(SCL_VERT_FILTER_SCALE_RATIO, DSCL, id), \ + SRI_ARR(SCL_HORZ_FILTER_SCALE_RATIO_C, DSCL, id), \ + SRI_ARR(SCL_VERT_FILTER_SCALE_RATIO_C, DSCL, id), \ + SRI_ARR(SCL_HORZ_FILTER_INIT, DSCL, id), \ + SRI_ARR(SCL_HORZ_FILTER_INIT_C, DSCL, id), \ + SRI_ARR(SCL_VERT_FILTER_INIT, DSCL, id), \ + SRI_ARR(SCL_VERT_FILTER_INIT_C, DSCL, id), \ + SRI_ARR(RECOUT_START, DSCL, id), SRI_ARR(RECOUT_SIZE, DSCL, id), \ + SRI_ARR(PRE_DEALPHA, CNVC_CFG, id), SRI_ARR(PRE_REALPHA, CNVC_CFG, id), \ + SRI_ARR(PRE_CSC_MODE, CNVC_CFG, id), \ + SRI_ARR(PRE_CSC_C11_C12, CNVC_CFG, id), \ + SRI_ARR(PRE_CSC_C33_C34, CNVC_CFG, id), \ + SRI_ARR(PRE_CSC_B_C11_C12, CNVC_CFG, id), \ + SRI_ARR(PRE_CSC_B_C33_C34, CNVC_CFG, id), \ + SRI_ARR(CM_POST_CSC_CONTROL, CM, id), \ + SRI_ARR(CM_POST_CSC_C11_C12, CM, id), \ + SRI_ARR(CM_POST_CSC_C33_C34, CM, id), \ + SRI_ARR(CM_POST_CSC_B_C11_C12, CM, id), \ + SRI_ARR(CM_POST_CSC_B_C33_C34, CM, id), \ + SRI_ARR(CM_MEM_PWR_CTRL, CM, id), SRI_ARR(CM_CONTROL, CM, id), \ + SRI_ARR(FORMAT_CONTROL, CNVC_CFG, id), \ + SRI_ARR(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \ + SRI_ARR(CURSOR0_CONTROL, CNVC_CUR, id), \ + SRI_ARR(CURSOR0_COLOR0, CNVC_CUR, id), \ + SRI_ARR(CURSOR0_COLOR1, CNVC_CUR, id), \ + SRI_ARR(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id), \ + SRI_ARR(DPP_CONTROL, DPP_TOP, id), SRI_ARR(CM_HDR_MULT_COEF, CM, id), \ + SRI_ARR(CURSOR_CONTROL, CURSOR0_, id), \ + SRI_ARR(ALPHA_2BIT_LUT, CNVC_CFG, id), \ + SRI_ARR(FCNV_FP_BIAS_R, CNVC_CFG, id), \ + SRI_ARR(FCNV_FP_BIAS_G, CNVC_CFG, id), \ + SRI_ARR(FCNV_FP_BIAS_B, CNVC_CFG, id), \ + SRI_ARR(FCNV_FP_SCALE_R, CNVC_CFG, id), \ + SRI_ARR(FCNV_FP_SCALE_G, CNVC_CFG, id), \ + SRI_ARR(FCNV_FP_SCALE_B, CNVC_CFG, id), \ + SRI_ARR(COLOR_KEYER_CONTROL, CNVC_CFG, id), \ + SRI_ARR(COLOR_KEYER_ALPHA, CNVC_CFG, id), \ + SRI_ARR(COLOR_KEYER_RED, CNVC_CFG, id), \ + SRI_ARR(COLOR_KEYER_GREEN, CNVC_CFG, id), \ + SRI_ARR(COLOR_KEYER_BLUE, CNVC_CFG, id), \ + SRI_ARR(CURSOR_CONTROL, CURSOR0_, id), \ + SRI_ARR(OBUF_MEM_PWR_CTRL, DSCL, id), \ + SRI_ARR(DSCL_MEM_PWR_STATUS, DSCL, id), \ + SRI_ARR(DSCL_MEM_PWR_CTRL, DSCL, id) + +/* OPP */ +#define OPP_REG_LIST_DCN_RI(id) \ + SRI_ARR(FMT_BIT_DEPTH_CONTROL, FMT, id), SRI_ARR(FMT_CONTROL, FMT, id), \ + SRI_ARR(FMT_DITHER_RAND_R_SEED, FMT, id), \ + SRI_ARR(FMT_DITHER_RAND_G_SEED, FMT, id), \ + SRI_ARR(FMT_DITHER_RAND_B_SEED, FMT, id), \ + SRI_ARR(FMT_CLAMP_CNTL, FMT, id), \ + SRI_ARR(FMT_DYNAMIC_EXP_CNTL, FMT, id), \ + SRI_ARR(FMT_MAP420_MEMORY_CONTROL, FMT, id), \ + SRI_ARR(OPPBUF_CONTROL, OPPBUF, id), \ + SRI_ARR(OPPBUF_3D_PARAMETERS_0, OPPBUF, id), \ + SRI_ARR(OPPBUF_3D_PARAMETERS_1, OPPBUF, id), \ + SRI_ARR(OPP_PIPE_CONTROL, OPP_PIPE, id) \ + +#define OPP_REG_LIST_DCN10_RI(id) OPP_REG_LIST_DCN_RI(id) + +#define OPP_DPG_REG_LIST_RI(id) \ + SRI_ARR(DPG_CONTROL, DPG, id), SRI_ARR(DPG_DIMENSIONS, DPG, id), \ + SRI_ARR(DPG_OFFSET_SEGMENT, DPG, id), SRI_ARR(DPG_COLOUR_B_CB, DPG, id), \ + SRI_ARR(DPG_COLOUR_G_Y, DPG, id), SRI_ARR(DPG_COLOUR_R_CR, DPG, id), \ + SRI_ARR(DPG_RAMP_CONTROL, DPG, id), SRI_ARR(DPG_STATUS, DPG, id) + +#define OPP_REG_LIST_DCN30_RI(id) \ + OPP_REG_LIST_DCN10_RI(id), OPP_DPG_REG_LIST_RI(id), \ + SRI_ARR(FMT_422_CONTROL, FMT, id) + +/* Aux engine regs */ +#define AUX_COMMON_REG_LIST0_RI(id) \ + SRI_ARR(AUX_CONTROL, DP_AUX, id), SRI_ARR(AUX_ARB_CONTROL, DP_AUX, id), \ + SRI_ARR(AUX_SW_DATA, DP_AUX, id), SRI_ARR(AUX_SW_CONTROL, DP_AUX, id), \ + SRI_ARR(AUX_INTERRUPT_CONTROL, DP_AUX, id), \ + SRI_ARR(AUX_DPHY_RX_CONTROL1, DP_AUX, id), \ + SRI_ARR(AUX_SW_STATUS, DP_AUX, id) + +/* DWBC */ +#define DWBC_COMMON_REG_LIST_DCN30_RI(id) \ + SR_ARR(DWB_ENABLE_CLK_CTRL, id), SR_ARR(DWB_MEM_PWR_CTRL, id), \ + SR_ARR(FC_MODE_CTRL, id), SR_ARR(FC_FLOW_CTRL, id), \ + SR_ARR(FC_WINDOW_START, id), SR_ARR(FC_WINDOW_SIZE, id), \ + SR_ARR(FC_SOURCE_SIZE, id), SR_ARR(DWB_UPDATE_CTRL, id), \ + SR_ARR(DWB_CRC_CTRL, id), SR_ARR(DWB_CRC_MASK_R_G, id), \ + SR_ARR(DWB_CRC_MASK_B_A, id), SR_ARR(DWB_CRC_VAL_R_G, id), \ + SR_ARR(DWB_CRC_VAL_B_A, id), SR_ARR(DWB_OUT_CTRL, id), \ + SR_ARR(DWB_MMHUBBUB_BACKPRESSURE_CNT_EN, id), \ + SR_ARR(DWB_MMHUBBUB_BACKPRESSURE_CNT, id), \ + SR_ARR(DWB_HOST_READ_CONTROL, id), SR_ARR(DWB_SOFT_RESET, id), \ + SR_ARR(DWB_HDR_MULT_COEF, id), SR_ARR(DWB_GAMUT_REMAP_MODE, id), \ + SR_ARR(DWB_GAMUT_REMAP_COEF_FORMAT, id), \ + SR_ARR(DWB_GAMUT_REMAPA_C11_C12, id), \ + SR_ARR(DWB_GAMUT_REMAPA_C13_C14, id), \ + SR_ARR(DWB_GAMUT_REMAPA_C21_C22, id), \ + SR_ARR(DWB_GAMUT_REMAPA_C23_C24, id), \ + SR_ARR(DWB_GAMUT_REMAPA_C31_C32, id), \ + SR_ARR(DWB_GAMUT_REMAPA_C33_C34, id), \ + SR_ARR(DWB_GAMUT_REMAPB_C11_C12, id), \ + SR_ARR(DWB_GAMUT_REMAPB_C13_C14, id), \ + SR_ARR(DWB_GAMUT_REMAPB_C21_C22, id), \ + SR_ARR(DWB_GAMUT_REMAPB_C23_C24, id), \ + SR_ARR(DWB_GAMUT_REMAPB_C31_C32, id), \ + SR_ARR(DWB_GAMUT_REMAPB_C33_C34, id), SR_ARR(DWB_OGAM_CONTROL, id), \ + SR_ARR(DWB_OGAM_LUT_INDEX, id), SR_ARR(DWB_OGAM_LUT_DATA, id), \ + SR_ARR(DWB_OGAM_LUT_CONTROL, id), \ + SR_ARR(DWB_OGAM_RAMA_START_CNTL_B, id), \ + SR_ARR(DWB_OGAM_RAMA_START_CNTL_G, id), \ + SR_ARR(DWB_OGAM_RAMA_START_CNTL_R, id), \ + SR_ARR(DWB_OGAM_RAMA_START_BASE_CNTL_B, id), \ + SR_ARR(DWB_OGAM_RAMA_START_SLOPE_CNTL_B, id), \ + SR_ARR(DWB_OGAM_RAMA_START_BASE_CNTL_G, id), \ + SR_ARR(DWB_OGAM_RAMA_START_SLOPE_CNTL_G, id), \ + SR_ARR(DWB_OGAM_RAMA_START_BASE_CNTL_R, id), \ + SR_ARR(DWB_OGAM_RAMA_START_SLOPE_CNTL_R, id), \ + SR_ARR(DWB_OGAM_RAMA_END_CNTL1_B, id), \ + SR_ARR(DWB_OGAM_RAMA_END_CNTL2_B, id), \ + SR_ARR(DWB_OGAM_RAMA_END_CNTL1_G, id), \ + SR_ARR(DWB_OGAM_RAMA_END_CNTL2_G, id), \ + SR_ARR(DWB_OGAM_RAMA_END_CNTL1_R, id), \ + SR_ARR(DWB_OGAM_RAMA_END_CNTL2_R, id), \ + SR_ARR(DWB_OGAM_RAMA_OFFSET_B, id), SR_ARR(DWB_OGAM_RAMA_OFFSET_G, id), \ + SR_ARR(DWB_OGAM_RAMA_OFFSET_R, id), \ + SR_ARR(DWB_OGAM_RAMA_REGION_0_1, id), \ + SR_ARR(DWB_OGAM_RAMA_REGION_2_3, id), \ + SR_ARR(DWB_OGAM_RAMA_REGION_4_5, id), \ + SR_ARR(DWB_OGAM_RAMA_REGION_6_7, id), \ + SR_ARR(DWB_OGAM_RAMA_REGION_8_9, id), \ + SR_ARR(DWB_OGAM_RAMA_REGION_10_11, id), \ + SR_ARR(DWB_OGAM_RAMA_REGION_12_13, id), \ + SR_ARR(DWB_OGAM_RAMA_REGION_14_15, id), \ + SR_ARR(DWB_OGAM_RAMA_REGION_16_17, id), \ + SR_ARR(DWB_OGAM_RAMA_REGION_18_19, id), \ + SR_ARR(DWB_OGAM_RAMA_REGION_20_21, id), \ + SR_ARR(DWB_OGAM_RAMA_REGION_22_23, id), \ + SR_ARR(DWB_OGAM_RAMA_REGION_24_25, id), \ + SR_ARR(DWB_OGAM_RAMA_REGION_26_27, id), \ + SR_ARR(DWB_OGAM_RAMA_REGION_28_29, id), \ + SR_ARR(DWB_OGAM_RAMA_REGION_30_31, id), \ + SR_ARR(DWB_OGAM_RAMA_REGION_32_33, id), \ + SR_ARR(DWB_OGAM_RAMB_START_CNTL_B, id), \ + SR_ARR(DWB_OGAM_RAMB_START_CNTL_G, id), \ + SR_ARR(DWB_OGAM_RAMB_START_CNTL_R, id), \ + SR_ARR(DWB_OGAM_RAMB_START_BASE_CNTL_B, id), \ + SR_ARR(DWB_OGAM_RAMB_START_SLOPE_CNTL_B, id), \ + SR_ARR(DWB_OGAM_RAMB_START_BASE_CNTL_G, id), \ + SR_ARR(DWB_OGAM_RAMB_START_SLOPE_CNTL_G, id), \ + SR_ARR(DWB_OGAM_RAMB_START_BASE_CNTL_R, id), \ + SR_ARR(DWB_OGAM_RAMB_START_SLOPE_CNTL_R, id), \ + SR_ARR(DWB_OGAM_RAMB_END_CNTL1_B, id), \ + SR_ARR(DWB_OGAM_RAMB_END_CNTL2_B, id), \ + SR_ARR(DWB_OGAM_RAMB_END_CNTL1_G, id), \ + SR_ARR(DWB_OGAM_RAMB_END_CNTL2_G, id), \ + SR_ARR(DWB_OGAM_RAMB_END_CNTL1_R, id), \ + SR_ARR(DWB_OGAM_RAMB_END_CNTL2_R, id), \ + SR_ARR(DWB_OGAM_RAMB_OFFSET_B, id), SR_ARR(DWB_OGAM_RAMB_OFFSET_G, id), \ + SR_ARR(DWB_OGAM_RAMB_OFFSET_R, id), \ + SR_ARR(DWB_OGAM_RAMB_REGION_0_1, id), \ + SR_ARR(DWB_OGAM_RAMB_REGION_2_3, id), \ + SR_ARR(DWB_OGAM_RAMB_REGION_4_5, id), \ + SR_ARR(DWB_OGAM_RAMB_REGION_6_7, id), \ + SR_ARR(DWB_OGAM_RAMB_REGION_8_9, id), \ + SR_ARR(DWB_OGAM_RAMB_REGION_10_11, id), \ + SR_ARR(DWB_OGAM_RAMB_REGION_12_13, id), \ + SR_ARR(DWB_OGAM_RAMB_REGION_14_15, id), \ + SR_ARR(DWB_OGAM_RAMB_REGION_16_17, id), \ + SR_ARR(DWB_OGAM_RAMB_REGION_18_19, id), \ + SR_ARR(DWB_OGAM_RAMB_REGION_20_21, id), \ + SR_ARR(DWB_OGAM_RAMB_REGION_22_23, id), \ + SR_ARR(DWB_OGAM_RAMB_REGION_24_25, id), \ + SR_ARR(DWB_OGAM_RAMB_REGION_26_27, id), \ + SR_ARR(DWB_OGAM_RAMB_REGION_28_29, id), \ + SR_ARR(DWB_OGAM_RAMB_REGION_30_31, id), \ + SR_ARR(DWB_OGAM_RAMB_REGION_32_33, id) + +/* MCIF */ + +#define MCIF_WB_COMMON_REG_LIST_DCN32_RI(inst) \ + SRI2_ARR(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUFMGR_STATUS, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_PITCH, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_1_STATUS, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_1_STATUS2, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_2_STATUS, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_2_STATUS2, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_3_STATUS, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_3_STATUS2, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_4_STATUS, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_4_STATUS2, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_SCLK_CHANGE, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_1_ADDR_Y, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_1_ADDR_C, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_2_ADDR_Y, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_2_ADDR_C, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_3_ADDR_Y, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_3_ADDR_C, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_4_ADDR_C, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, MMHUBBUB, inst), \ + SRI2_ARR(MCIF_WB_NB_PSTATE_CONTROL, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_WATERMARK, MMHUBBUB, inst), \ + SRI2_ARR(MCIF_WB_CLOCK_GATER_CONTROL, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_SELF_REFRESH_CONTROL, MCIF_WB, inst), \ + SRI2_ARR(MULTI_LEVEL_QOS_CTRL, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_SECURITY_LEVEL, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_LUMA_SIZE, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_1_RESOLUTION, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_2_RESOLUTION, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_3_RESOLUTION, MCIF_WB, inst), \ + SRI2_ARR(MCIF_WB_BUF_4_RESOLUTION, MCIF_WB, inst), \ + SRI2_ARR(MMHUBBUB_MEM_PWR_CNTL, MMHUBBUB, inst), \ + SRI2_ARR(MMHUBBUB_WARMUP_ADDR_REGION, MMHUBBUB, inst), \ + SRI2_ARR(MMHUBBUB_WARMUP_BASE_ADDR_HIGH, MMHUBBUB, inst), \ + SRI2_ARR(MMHUBBUB_WARMUP_BASE_ADDR_LOW, MMHUBBUB, inst), \ + SRI2_ARR(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB, inst) + +/* DSC */ + +#define DSC_REG_LIST_DCN20_RI(id) \ + SRI_ARR(DSC_TOP_CONTROL, DSC_TOP, id), \ + SRI_ARR(DSC_DEBUG_CONTROL, DSC_TOP, id), \ + SRI_ARR(DSCC_CONFIG0, DSCC, id), SRI_ARR(DSCC_CONFIG1, DSCC, id), \ + SRI_ARR(DSCC_STATUS, DSCC, id), \ + SRI_ARR(DSCC_INTERRUPT_CONTROL_STATUS, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG0, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG1, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG2, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG3, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG4, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG5, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG6, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG7, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG8, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG9, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG10, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG11, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG12, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG13, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG14, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG15, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG16, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG17, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG18, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG19, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG20, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG21, DSCC, id), \ + SRI_ARR(DSCC_PPS_CONFIG22, DSCC, id), \ + SRI_ARR(DSCC_MEM_POWER_CONTROL, DSCC, id), \ + SRI_ARR(DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC, id), \ + SRI_ARR(DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC, id), \ + SRI_ARR(DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC, id), \ + SRI_ARR(DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC, id), \ + SRI_ARR(DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC, id), \ + SRI_ARR(DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC, id), \ + SRI_ARR(DSCC_MAX_ABS_ERROR0, DSCC, id), \ + SRI_ARR(DSCC_MAX_ABS_ERROR1, DSCC, id), \ + SRI_ARR(DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id), \ + SRI_ARR(DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id), \ + SRI_ARR(DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id), \ + SRI_ARR(DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id), \ + SRI_ARR(DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id), \ + SRI_ARR(DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id), \ + SRI_ARR(DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id), \ + SRI_ARR(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id), \ + SRI_ARR(DSCCIF_CONFIG0, DSCCIF, id), \ + SRI_ARR(DSCCIF_CONFIG1, DSCCIF, id), \ + SRI_ARR(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id) + +/* MPC */ + +#define MPC_DWB_MUX_REG_LIST_DCN3_0_RI(inst) \ + SRII_DWB(DWB_MUX, MUX, MPC_DWB, inst) + +#define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0_RI(inst) \ + SRII(MUX, MPC_OUT, inst), VUPDATE_SRII(CUR, VUPDATE_LOCK_SET, inst) + +#define MPC_OUT_MUX_REG_LIST_DCN3_0_RI(inst) \ + MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0_RI(inst), SRII(CSC_MODE, MPC_OUT, inst), \ + SRII(CSC_C11_C12_A, MPC_OUT, inst), SRII(CSC_C33_C34_A, MPC_OUT, inst), \ + SRII(CSC_C11_C12_B, MPC_OUT, inst), SRII(CSC_C33_C34_B, MPC_OUT, inst), \ + SRII(DENORM_CONTROL, MPC_OUT, inst), \ + SRII(DENORM_CLAMP_G_Y, MPC_OUT, inst), \ + SRII(DENORM_CLAMP_B_CB, MPC_OUT, inst), SR(MPC_OUT_CSC_COEF_FORMAT) + +#define MPC_COMMON_REG_LIST_DCN1_0_RI(inst) \ + SRII(MPCC_TOP_SEL, MPCC, inst), SRII(MPCC_BOT_SEL, MPCC, inst), \ + SRII(MPCC_CONTROL, MPCC, inst), SRII(MPCC_STATUS, MPCC, inst), \ + SRII(MPCC_OPP_ID, MPCC, inst), SRII(MPCC_BG_G_Y, MPCC, inst), \ + SRII(MPCC_BG_R_CR, MPCC, inst), SRII(MPCC_BG_B_CB, MPCC, inst), \ + SRII(MPCC_SM_CONTROL, MPCC, inst), \ + SRII(MPCC_UPDATE_LOCK_SEL, MPCC, inst) + +#define MPC_REG_LIST_DCN3_0_RI(inst) \ + MPC_COMMON_REG_LIST_DCN1_0_RI(inst), SRII(MPCC_TOP_GAIN, MPCC, inst), \ + SRII(MPCC_BOT_GAIN_INSIDE, MPCC, inst), \ + SRII(MPCC_BOT_GAIN_OUTSIDE, MPCC, inst), \ + SRII(MPCC_MEM_PWR_CTRL, MPCC, inst), \ + SRII(MPCC_OGAM_LUT_INDEX, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_LUT_DATA, MPCC_OGAM, inst), \ + SRII(MPCC_GAMUT_REMAP_COEF_FORMAT, MPCC_OGAM, inst), \ + SRII(MPCC_GAMUT_REMAP_MODE, MPCC_OGAM, inst), \ + SRII(MPC_GAMUT_REMAP_C11_C12_A, MPCC_OGAM, inst), \ + SRII(MPC_GAMUT_REMAP_C33_C34_A, MPCC_OGAM, inst), \ + SRII(MPC_GAMUT_REMAP_C11_C12_B, MPCC_OGAM, inst), \ + SRII(MPC_GAMUT_REMAP_C33_C34_B, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMA_START_CNTL_G, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMA_START_CNTL_R, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_B, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_G, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_R, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMA_END_CNTL1_G, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMA_END_CNTL2_G, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMA_END_CNTL1_R, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMA_END_CNTL2_R, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMA_REGION_32_33, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMA_OFFSET_B, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMA_OFFSET_G, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMA_OFFSET_R, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_B, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_G, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_R, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMB_START_CNTL_B, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMB_START_CNTL_G, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMB_START_CNTL_R, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_B, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_G, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_R, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMB_END_CNTL1_B, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMB_END_CNTL2_B, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMB_END_CNTL1_G, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMB_END_CNTL2_G, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMB_END_CNTL1_R, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMB_END_CNTL2_R, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMB_REGION_32_33, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMB_OFFSET_B, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMB_OFFSET_G, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMB_OFFSET_R, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_B, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_G, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_R, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_CONTROL, MPCC_OGAM, inst), \ + SRII(MPCC_OGAM_LUT_CONTROL, MPCC_OGAM, inst) + +#define MPC_REG_LIST_DCN3_2_RI(inst) \ + MPC_REG_LIST_DCN3_0_RI(inst),\ + SRII(MPCC_MOVABLE_CM_LOCATION_CONTROL, MPCC, inst),\ + SRII(MPCC_MCM_SHAPER_CONTROL, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_OFFSET_R, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_OFFSET_G, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_OFFSET_B, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_SCALE_R, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_SCALE_G_B, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_LUT_INDEX, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_LUT_DATA, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_START_CNTL_B, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_START_CNTL_G, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_START_CNTL_R, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_END_CNTL_B, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_END_CNTL_G, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_END_CNTL_R, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_REGION_0_1, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_REGION_2_3, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_REGION_4_5, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_REGION_6_7, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_REGION_8_9, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_REGION_10_11, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_REGION_12_13, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_REGION_14_15, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_REGION_16_17, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_REGION_18_19, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_REGION_20_21, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_REGION_22_23, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_REGION_24_25, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_REGION_26_27, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_REGION_28_29, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_REGION_30_31, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMA_REGION_32_33, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_START_CNTL_B, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_START_CNTL_G, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_START_CNTL_R, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_END_CNTL_B, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_END_CNTL_G, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_END_CNTL_R, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_REGION_0_1, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_REGION_2_3, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_REGION_4_5, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_REGION_6_7, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_REGION_8_9, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_REGION_10_11, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_REGION_12_13, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_REGION_14_15, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_REGION_16_17, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_REGION_18_19, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_REGION_20_21, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_REGION_22_23, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_REGION_24_25, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_REGION_26_27, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_REGION_28_29, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_REGION_30_31, MPCC_MCM, inst),\ + SRII(MPCC_MCM_SHAPER_RAMB_REGION_32_33, MPCC_MCM, inst),\ + SRII(MPCC_MCM_3DLUT_MODE, MPCC_MCM, inst), /*TODO: may need to add other 3DLUT regs*/\ + SRII(MPCC_MCM_3DLUT_INDEX, MPCC_MCM, inst),\ + SRII(MPCC_MCM_3DLUT_DATA, MPCC_MCM, inst),\ + SRII(MPCC_MCM_3DLUT_DATA_30BIT, MPCC_MCM, inst),\ + SRII(MPCC_MCM_3DLUT_READ_WRITE_CONTROL, MPCC_MCM, inst),\ + SRII(MPCC_MCM_3DLUT_OUT_NORM_FACTOR, MPCC_MCM, inst),\ + SRII(MPCC_MCM_3DLUT_OUT_OFFSET_R, MPCC_MCM, inst),\ + SRII(MPCC_MCM_3DLUT_OUT_OFFSET_G, MPCC_MCM, inst),\ + SRII(MPCC_MCM_3DLUT_OUT_OFFSET_B, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_CONTROL, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_LUT_INDEX, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_LUT_DATA, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_LUT_CONTROL, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_START_CNTL_B, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_START_CNTL_G, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_START_CNTL_R, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL1_B, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL2_B, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL1_G, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL2_G, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL1_R, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL2_R, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_OFFSET_B, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_OFFSET_G, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_OFFSET_R, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_REGION_0_1, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_REGION_2_3, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_REGION_4_5, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_REGION_6_7, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_REGION_8_9, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_REGION_10_11, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_REGION_12_13, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_REGION_14_15, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_REGION_16_17, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_REGION_18_19, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_REGION_20_21, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_REGION_22_23, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_REGION_24_25, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_REGION_26_27, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_REGION_28_29, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_REGION_30_31, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMA_REGION_32_33, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_START_CNTL_B, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_START_CNTL_G, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_START_CNTL_R, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL1_B, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL2_B, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL1_G, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL2_G, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL1_R, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL2_R, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_OFFSET_B, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_OFFSET_G, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_OFFSET_R, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_REGION_0_1, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_REGION_2_3, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_REGION_4_5, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_REGION_6_7, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_REGION_8_9, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_REGION_10_11, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_REGION_12_13, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_REGION_14_15, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_REGION_16_17, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_REGION_18_19, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_REGION_20_21, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_REGION_22_23, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_REGION_24_25, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_REGION_26_27, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_REGION_28_29, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_REGION_30_31, MPCC_MCM, inst),\ + SRII(MPCC_MCM_1DLUT_RAMB_REGION_32_33, MPCC_MCM, inst),\ + SRII(MPCC_MCM_MEM_PWR_CTRL, MPCC_MCM, inst) +/* OPTC */ + +#define OPTC_COMMON_REG_LIST_DCN3_2_RI(inst) \ + SRI_ARR(OTG_VSTARTUP_PARAM, OTG, inst), \ + SRI_ARR(OTG_VUPDATE_PARAM, OTG, inst), \ + SRI_ARR(OTG_VREADY_PARAM, OTG, inst), \ + SRI_ARR(OTG_MASTER_UPDATE_LOCK, OTG, inst), \ + SRI_ARR(OTG_GLOBAL_CONTROL0, OTG, inst), \ + SRI_ARR(OTG_GLOBAL_CONTROL1, OTG, inst), \ + SRI_ARR(OTG_GLOBAL_CONTROL2, OTG, inst), \ + SRI_ARR(OTG_GLOBAL_CONTROL4, OTG, inst), \ + SRI_ARR(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst), \ + SRI_ARR(OTG_H_TOTAL, OTG, inst), \ + SRI_ARR(OTG_H_BLANK_START_END, OTG, inst), \ + SRI_ARR(OTG_H_SYNC_A, OTG, inst), SRI_ARR(OTG_H_SYNC_A_CNTL, OTG, inst), \ + SRI_ARR(OTG_H_TIMING_CNTL, OTG, inst), SRI_ARR(OTG_V_TOTAL, OTG, inst), \ + SRI_ARR(OTG_V_BLANK_START_END, OTG, inst), \ + SRI_ARR(OTG_V_SYNC_A, OTG, inst), SRI_ARR(OTG_V_SYNC_A_CNTL, OTG, inst), \ + SRI_ARR(OTG_CONTROL, OTG, inst), SRI_ARR(OTG_STEREO_CONTROL, OTG, inst), \ + SRI_ARR(OTG_3D_STRUCTURE_CONTROL, OTG, inst), \ + SRI_ARR(OTG_STEREO_STATUS, OTG, inst), \ + SRI_ARR(OTG_V_TOTAL_MAX, OTG, inst), \ + SRI_ARR(OTG_V_TOTAL_MIN, OTG, inst), \ + SRI_ARR(OTG_V_TOTAL_CONTROL, OTG, inst), \ + SRI_ARR(OTG_TRIGA_CNTL, OTG, inst), \ + SRI_ARR(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst), \ + SRI_ARR(OTG_STATIC_SCREEN_CONTROL, OTG, inst), \ + SRI_ARR(OTG_STATUS_FRAME_COUNT, OTG, inst), \ + SRI_ARR(OTG_STATUS, OTG, inst), SRI_ARR(OTG_STATUS_POSITION, OTG, inst), \ + SRI_ARR(OTG_NOM_VERT_POSITION, OTG, inst), \ + SRI_ARR(OTG_M_CONST_DTO0, OTG, inst), \ + SRI_ARR(OTG_M_CONST_DTO1, OTG, inst), \ + SRI_ARR(OTG_CLOCK_CONTROL, OTG, inst), \ + SRI_ARR(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst), \ + SRI_ARR(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst), \ + SRI_ARR(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst), \ + SRI_ARR(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst), \ + SRI_ARR(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst), \ + SRI_ARR(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst), \ + SRI_ARR(OPTC_INPUT_CLOCK_CONTROL, ODM, inst), \ + SRI_ARR(OPTC_DATA_SOURCE_SELECT, ODM, inst), \ + SRI_ARR(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst), \ + SRI_ARR(CONTROL, VTG, inst), SRI_ARR(OTG_VERT_SYNC_CONTROL, OTG, inst), \ + SRI_ARR(OTG_GSL_CONTROL, OTG, inst), SRI_ARR(OTG_CRC_CNTL, OTG, inst), \ + SRI_ARR(OTG_CRC0_DATA_RG, OTG, inst), \ + SRI_ARR(OTG_CRC0_DATA_B, OTG, inst), \ + SRI_ARR(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst), \ + SRI_ARR(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst), \ + SRI_ARR(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst), \ + SRI_ARR(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst), \ + SR_ARR(GSL_SOURCE_SELECT, inst), \ + SRI_ARR(OTG_TRIGA_MANUAL_TRIG, OTG, inst), \ + SRI_ARR(OTG_GLOBAL_CONTROL1, OTG, inst), \ + SRI_ARR(OTG_GLOBAL_CONTROL2, OTG, inst), \ + SRI_ARR(OTG_GSL_WINDOW_X, OTG, inst), \ + SRI_ARR(OTG_GSL_WINDOW_Y, OTG, inst), \ + SRI_ARR(OTG_VUPDATE_KEEPOUT, OTG, inst), \ + SRI_ARR(OTG_DSC_START_POSITION, OTG, inst), \ + SRI_ARR(OTG_DRR_TRIGGER_WINDOW, OTG, inst), \ + SRI_ARR(OTG_DRR_V_TOTAL_CHANGE, OTG, inst), \ + SRI_ARR(OPTC_DATA_FORMAT_CONTROL, ODM, inst), \ + SRI_ARR(OPTC_BYTES_PER_PIXEL, ODM, inst), \ + SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst), \ + SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \ + SRI_ARR(OTG_DRR_CONTROL, OTG, inst) + +/* HUBP */ + +#define HUBP_REG_LIST_DCN_VM_RI(id) \ + SRI_ARR(NOM_PARAMETERS_0, HUBPREQ, id), \ + SRI_ARR(NOM_PARAMETERS_1, HUBPREQ, id), \ + SRI_ARR(NOM_PARAMETERS_2, HUBPREQ, id), \ + SRI_ARR(NOM_PARAMETERS_3, HUBPREQ, id), \ + SRI_ARR(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id) +#define HUBP_REG_LIST_DCN_RI(id) \ + SRI_ARR(DCHUBP_CNTL, HUBP, id), SRI_ARR(HUBPREQ_DEBUG_DB, HUBP, id), \ + SRI_ARR(HUBPREQ_DEBUG, HUBP, id), SRI_ARR(DCSURF_ADDR_CONFIG, HUBP, id), \ + SRI_ARR(DCSURF_TILING_CONFIG, HUBP, id), \ + SRI_ARR(DCSURF_SURFACE_PITCH, HUBPREQ, id), \ + SRI_ARR(DCSURF_SURFACE_PITCH_C, HUBPREQ, id), \ + SRI_ARR(DCSURF_SURFACE_CONFIG, HUBP, id), \ + SRI_ARR(DCSURF_FLIP_CONTROL, HUBPREQ, id), \ + SRI_ARR(DCSURF_PRI_VIEWPORT_DIMENSION, HUBP, id), \ + SRI_ARR(DCSURF_PRI_VIEWPORT_START, HUBP, id), \ + SRI_ARR(DCSURF_SEC_VIEWPORT_DIMENSION, HUBP, id), \ + SRI_ARR(DCSURF_SEC_VIEWPORT_START, HUBP, id), \ + SRI_ARR(DCSURF_PRI_VIEWPORT_DIMENSION_C, HUBP, id), \ + SRI_ARR(DCSURF_PRI_VIEWPORT_START_C, HUBP, id), \ + SRI_ARR(DCSURF_SEC_VIEWPORT_DIMENSION_C, HUBP, id), \ + SRI_ARR(DCSURF_SEC_VIEWPORT_START_C, HUBP, id), \ + SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id), \ + SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS, HUBPREQ, id), \ + SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id), \ + SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS, HUBPREQ, id), \ + SRI_ARR(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, HUBPREQ, id), \ + SRI_ARR(DCSURF_PRIMARY_META_SURFACE_ADDRESS, HUBPREQ, id), \ + SRI_ARR(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, HUBPREQ, id), \ + SRI_ARR(DCSURF_SECONDARY_META_SURFACE_ADDRESS, HUBPREQ, id), \ + SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id), \ + SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS_C, HUBPREQ, id), \ + SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id), \ + SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS_C, HUBPREQ, id), \ + SRI_ARR(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id), \ + SRI_ARR(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, HUBPREQ, id), \ + SRI_ARR(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id), \ + SRI_ARR(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, HUBPREQ, id), \ + SRI_ARR(DCSURF_SURFACE_INUSE, HUBPREQ, id), \ + SRI_ARR(DCSURF_SURFACE_INUSE_HIGH, HUBPREQ, id), \ + SRI_ARR(DCSURF_SURFACE_INUSE_C, HUBPREQ, id), \ + SRI_ARR(DCSURF_SURFACE_INUSE_HIGH_C, HUBPREQ, id), \ + SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE, HUBPREQ, id), \ + SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE_HIGH, HUBPREQ, id), \ + SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE_C, HUBPREQ, id), \ + SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, HUBPREQ, id), \ + SRI_ARR(DCSURF_SURFACE_CONTROL, HUBPREQ, id), \ + SRI_ARR(DCSURF_SURFACE_FLIP_INTERRUPT, HUBPREQ, id), \ + SRI_ARR(HUBPRET_CONTROL, HUBPRET, id), \ + SRI_ARR(HUBPRET_READ_LINE_STATUS, HUBPRET, id), \ + SRI_ARR(DCN_EXPANSION_MODE, HUBPREQ, id), \ + SRI_ARR(DCHUBP_REQ_SIZE_CONFIG, HUBP, id), \ + SRI_ARR(DCHUBP_REQ_SIZE_CONFIG_C, HUBP, id), \ + SRI_ARR(BLANK_OFFSET_0, HUBPREQ, id), \ + SRI_ARR(BLANK_OFFSET_1, HUBPREQ, id), \ + SRI_ARR(DST_DIMENSIONS, HUBPREQ, id), \ + SRI_ARR(DST_AFTER_SCALER, HUBPREQ, id), \ + SRI_ARR(VBLANK_PARAMETERS_0, HUBPREQ, id), \ + SRI_ARR(REF_FREQ_TO_PIX_FREQ, HUBPREQ, id), \ + SRI_ARR(VBLANK_PARAMETERS_1, HUBPREQ, id), \ + SRI_ARR(VBLANK_PARAMETERS_3, HUBPREQ, id), \ + SRI_ARR(NOM_PARAMETERS_4, HUBPREQ, id), \ + SRI_ARR(NOM_PARAMETERS_5, HUBPREQ, id), \ + SRI_ARR(PER_LINE_DELIVERY_PRE, HUBPREQ, id), \ + SRI_ARR(PER_LINE_DELIVERY, HUBPREQ, id), \ + SRI_ARR(VBLANK_PARAMETERS_2, HUBPREQ, id), \ + SRI_ARR(VBLANK_PARAMETERS_4, HUBPREQ, id), \ + SRI_ARR(NOM_PARAMETERS_6, HUBPREQ, id), \ + SRI_ARR(NOM_PARAMETERS_7, HUBPREQ, id), \ + SRI_ARR(DCN_TTU_QOS_WM, HUBPREQ, id), \ + SRI_ARR(DCN_GLOBAL_TTU_CNTL, HUBPREQ, id), \ + SRI_ARR(DCN_SURF0_TTU_CNTL0, HUBPREQ, id), \ + SRI_ARR(DCN_SURF0_TTU_CNTL1, HUBPREQ, id), \ + SRI_ARR(DCN_SURF1_TTU_CNTL0, HUBPREQ, id), \ + SRI_ARR(DCN_SURF1_TTU_CNTL1, HUBPREQ, id), \ + SRI_ARR(DCN_CUR0_TTU_CNTL0, HUBPREQ, id), \ + SRI_ARR(DCN_CUR0_TTU_CNTL1, HUBPREQ, id), \ + SRI_ARR(HUBP_CLK_CNTL, HUBP, id) +#define HUBP_REG_LIST_DCN2_COMMON_RI(id) \ + HUBP_REG_LIST_DCN_RI(id), HUBP_REG_LIST_DCN_VM_RI(id), \ + SRI_ARR(PREFETCH_SETTINGS, HUBPREQ, id), \ + SRI_ARR(PREFETCH_SETTINGS_C, HUBPREQ, id), \ + SRI_ARR(DCN_VM_SYSTEM_APERTURE_LOW_ADDR, HUBPREQ, id), \ + SRI_ARR(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, HUBPREQ, id), \ + SRI_ARR(CURSOR_SETTINGS, HUBPREQ, id), \ + SRI_ARR(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR0_, id), \ + SRI_ARR(CURSOR_SURFACE_ADDRESS, CURSOR0_, id), \ + SRI_ARR(CURSOR_SIZE, CURSOR0_, id), \ + SRI_ARR(CURSOR_CONTROL, CURSOR0_, id), \ + SRI_ARR(CURSOR_POSITION, CURSOR0_, id), \ + SRI_ARR(CURSOR_HOT_SPOT, CURSOR0_, id), \ + SRI_ARR(CURSOR_DST_OFFSET, CURSOR0_, id), \ + SRI_ARR(DMDATA_ADDRESS_HIGH, CURSOR0_, id), \ + SRI_ARR(DMDATA_ADDRESS_LOW, CURSOR0_, id), \ + SRI_ARR(DMDATA_CNTL, CURSOR0_, id), \ + SRI_ARR(DMDATA_SW_CNTL, CURSOR0_, id), \ + SRI_ARR(DMDATA_QOS_CNTL, CURSOR0_, id), \ + SRI_ARR(DMDATA_SW_DATA, CURSOR0_, id), \ + SRI_ARR(DMDATA_STATUS, CURSOR0_, id), \ + SRI_ARR(FLIP_PARAMETERS_0, HUBPREQ, id), \ + SRI_ARR(FLIP_PARAMETERS_1, HUBPREQ, id), \ + SRI_ARR(FLIP_PARAMETERS_2, HUBPREQ, id), \ + SRI_ARR(DCN_CUR1_TTU_CNTL0, HUBPREQ, id), \ + SRI_ARR(DCN_CUR1_TTU_CNTL1, HUBPREQ, id), \ + SRI_ARR(DCSURF_FLIP_CONTROL2, HUBPREQ, id), \ + SRI_ARR(VMID_SETTINGS_0, HUBPREQ, id) +#define HUBP_REG_LIST_DCN21_RI(id) \ + HUBP_REG_LIST_DCN2_COMMON_RI(id), SRI_ARR(FLIP_PARAMETERS_3, HUBPREQ, id), \ + SRI_ARR(FLIP_PARAMETERS_4, HUBPREQ, id), \ + SRI_ARR(FLIP_PARAMETERS_5, HUBPREQ, id), \ + SRI_ARR(FLIP_PARAMETERS_6, HUBPREQ, id), \ + SRI_ARR(VBLANK_PARAMETERS_5, HUBPREQ, id), \ + SRI_ARR(VBLANK_PARAMETERS_6, HUBPREQ, id) +#define HUBP_REG_LIST_DCN30_RI(id) \ + HUBP_REG_LIST_DCN21_RI(id), SRI_ARR(DCN_DMDATA_VM_CNTL, HUBPREQ, id) +#define HUBP_REG_LIST_DCN32_RI(id) \ + HUBP_REG_LIST_DCN30_RI(id), SRI_ARR(DCHUBP_MALL_CONFIG, HUBP, id), \ + SRI_ARR(DCHUBP_VMPG_CONFIG, HUBP, id), \ + SRI_ARR(UCLK_PSTATE_FORCE, HUBPREQ, id) + +/* HUBBUB */ + +#define HUBBUB_REG_LIST_DCN32_RI(id) \ + SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A), \ + SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B), \ + SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C), \ + SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D), \ + SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL), \ + SR(DCHUBBUB_ARB_DRAM_STATE_CNTL), SR(DCHUBBUB_ARB_SAT_LEVEL), \ + SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND), SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ + SR(DCHUBBUB_SOFT_RESET), SR(DCHUBBUB_CRC_CTRL), \ + SR(DCN_VM_FB_LOCATION_BASE), SR(DCN_VM_FB_LOCATION_TOP), \ + SR(DCN_VM_FB_OFFSET), SR(DCN_VM_AGP_BOT), SR(DCN_VM_AGP_TOP), \ + SR(DCN_VM_AGP_BASE), HUBBUB_SR_WATERMARK_REG_LIST(), \ + SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A), SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B), \ + SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C), SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D), \ + SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A), \ + SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B), \ + SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C), \ + SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D), \ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A), \ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B), \ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C), \ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D), SR(DCHUBBUB_DET0_CTRL), \ + SR(DCHUBBUB_DET1_CTRL), SR(DCHUBBUB_DET2_CTRL), SR(DCHUBBUB_DET3_CTRL), \ + SR(DCHUBBUB_COMPBUF_CTRL), SR(COMPBUF_RESERVED_SPACE), \ + SR(DCHUBBUB_DEBUG_CTRL_0), \ + SR(DCHUBBUB_ARB_USR_RETRAINING_CNTL), \ + SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A), \ + SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B), \ + SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C), \ + SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D), \ + SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A), \ + SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B), \ + SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C), \ + SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D), \ + SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A), \ + SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B), \ + SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C), \ + SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D), \ + SR(DCHUBBUB_ARB_MALL_CNTL), \ + SR(DCN_VM_FAULT_ADDR_MSB), SR(DCN_VM_FAULT_ADDR_LSB), \ + SR(DCN_VM_FAULT_CNTL), SR(DCN_VM_FAULT_STATUS), \ + SR(SDPIF_REQUEST_RATE_LIMIT) + +/* DCCG */ + +#define DCCG_REG_LIST_DCN32_RI() \ + SR(DPPCLK_DTO_CTRL), DCCG_SRII(DTO_PARAM, DPPCLK, 0), \ + DCCG_SRII(DTO_PARAM, DPPCLK, 1), DCCG_SRII(DTO_PARAM, DPPCLK, 2), \ + DCCG_SRII(DTO_PARAM, DPPCLK, 3), DCCG_SRII(CLOCK_CNTL, HDMICHARCLK, 0), \ + SR(PHYASYMCLK_CLOCK_CNTL), SR(PHYBSYMCLK_CLOCK_CNTL), \ + SR(PHYCSYMCLK_CLOCK_CNTL), SR(PHYDSYMCLK_CLOCK_CNTL), \ + SR(PHYESYMCLK_CLOCK_CNTL), SR(DPSTREAMCLK_CNTL), SR(HDMISTREAMCLK_CNTL), \ + SR(SYMCLK32_SE_CNTL), SR(SYMCLK32_LE_CNTL), \ + DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0), DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1), \ + DCCG_SRII(PIXEL_RATE_CNTL, OTG, 2), DCCG_SRII(PIXEL_RATE_CNTL, OTG, 3), \ + DCCG_SRII(MODULO, DTBCLK_DTO, 0), DCCG_SRII(MODULO, DTBCLK_DTO, 1), \ + DCCG_SRII(MODULO, DTBCLK_DTO, 2), DCCG_SRII(MODULO, DTBCLK_DTO, 3), \ + DCCG_SRII(PHASE, DTBCLK_DTO, 0), DCCG_SRII(PHASE, DTBCLK_DTO, 1), \ + DCCG_SRII(PHASE, DTBCLK_DTO, 2), DCCG_SRII(PHASE, DTBCLK_DTO, 3), \ + SR(DCCG_AUDIO_DTBCLK_DTO_MODULO), SR(DCCG_AUDIO_DTBCLK_DTO_PHASE), \ + SR(OTG_PIXEL_RATE_DIV), SR(DTBCLK_P_CNTL), \ + SR(DCCG_AUDIO_DTO_SOURCE), SR(DENTIST_DISPCLK_CNTL) + +/* VMID */ +#define DCN20_VMID_REG_LIST_RI(id) \ + SRI_ARR(CNTL, DCN_VM_CONTEXT, id), \ + SRI_ARR(PAGE_TABLE_BASE_ADDR_HI32, DCN_VM_CONTEXT, id), \ + SRI_ARR(PAGE_TABLE_BASE_ADDR_LO32, DCN_VM_CONTEXT, id), \ + SRI_ARR(PAGE_TABLE_START_ADDR_HI32, DCN_VM_CONTEXT, id), \ + SRI_ARR(PAGE_TABLE_START_ADDR_LO32, DCN_VM_CONTEXT, id), \ + SRI_ARR(PAGE_TABLE_END_ADDR_HI32, DCN_VM_CONTEXT, id), \ + SRI_ARR(PAGE_TABLE_END_ADDR_LO32, DCN_VM_CONTEXT, id) + +/* I2C HW */ + +#define I2C_HW_ENGINE_COMMON_REG_LIST_RI(id) \ + SRI_ARR_I2C(SETUP, DC_I2C_DDC, id), SRI_ARR_I2C(SPEED, DC_I2C_DDC, id), \ + SRI_ARR_I2C(HW_STATUS, DC_I2C_DDC, id), \ + SR_ARR_I2C(DC_I2C_ARBITRATION, id), \ + SR_ARR_I2C(DC_I2C_CONTROL, id), SR_ARR_I2C(DC_I2C_SW_STATUS, id), \ + SR_ARR_I2C(DC_I2C_TRANSACTION0, id), SR_ARR_I2C(DC_I2C_TRANSACTION1, id),\ + SR_ARR_I2C(DC_I2C_TRANSACTION2, id), SR_ARR_I2C(DC_I2C_TRANSACTION3, id),\ + SR_ARR_I2C(DC_I2C_DATA, id), SR_ARR_I2C(MICROSECOND_TIME_BASE_DIV, id) + +#define I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id) \ + I2C_HW_ENGINE_COMMON_REG_LIST_RI(id), SR_ARR_I2C(DIO_MEM_PWR_CTRL, id), \ + SR_ARR_I2C(DIO_MEM_PWR_STATUS, id) + +#endif /* _DCN32_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c new file mode 100644 index 0000000000..f4dd6443a3 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -0,0 +1,2069 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "dc.h" + +#include "dcn32/dcn32_init.h" + +#include "resource.h" +#include "include/irq_service_interface.h" +#include "dcn32/dcn32_resource.h" +#include "dcn321_resource.h" + +#include "dcn20/dcn20_resource.h" +#include "dcn30/dcn30_resource.h" + +#include "dml/dcn321/dcn321_fpu.h" + +#include "dcn10/dcn10_ipp.h" +#include "dcn30/dcn30_hubbub.h" +#include "dcn31/dcn31_hubbub.h" +#include "dcn32/dcn32_hubbub.h" +#include "dcn32/dcn32_mpc.h" +#include "dcn32/dcn32_hubp.h" +#include "irq/dcn32/irq_service_dcn32.h" +#include "dcn32/dcn32_dpp.h" +#include "dcn32/dcn32_optc.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn30/dcn30_hwseq.h" +#include "dce110/dce110_hwseq.h" +#include "dcn30/dcn30_opp.h" +#include "dcn20/dcn20_dsc.h" +#include "dcn30/dcn30_vpg.h" +#include "dcn30/dcn30_afmt.h" +#include "dcn30/dcn30_dio_stream_encoder.h" +#include "dcn32/dcn32_dio_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_link_encoder.h" +#include "dcn32/dcn32_hpo_dp_link_encoder.h" +#include "dcn31/dcn31_apg.h" +#include "dcn31/dcn31_dio_link_encoder.h" +#include "dcn32/dcn32_dio_link_encoder.h" +#include "dcn321/dcn321_dio_link_encoder.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_audio.h" +#include "dce/dce_hwseq.h" +#include "clk_mgr.h" +#include "virtual/virtual_stream_encoder.h" +#include "dml/display_mode_vba.h" +#include "dcn32/dcn32_dccg.h" +#include "dcn10/dcn10_resource.h" +#include "link.h" +#include "dcn31/dcn31_panel_cntl.h" + +#include "dcn30/dcn30_dwb.h" +#include "dcn32/dcn32_mmhubbub.h" + +#include "dcn/dcn_3_2_1_offset.h" +#include "dcn/dcn_3_2_1_sh_mask.h" +#include "nbio/nbio_4_3_0_offset.h" + +#include "reg_helper.h" +#include "dce/dmub_abm.h" +#include "dce/dmub_psr.h" +#include "dce/dce_aux.h" +#include "dce/dce_i2c.h" + +#include "dml/dcn30/display_mode_vba_30.h" +#include "vm_helper.h" +#include "dcn20/dcn20_vmid.h" + +#include "dc_state_priv.h" + +#define DC_LOGGER_INIT(logger) + +enum dcn321_clk_src_array_id { + DCN321_CLK_SRC_PLL0, + DCN321_CLK_SRC_PLL1, + DCN321_CLK_SRC_PLL2, + DCN321_CLK_SRC_PLL3, + DCN321_CLK_SRC_PLL4, + DCN321_CLK_SRC_TOTAL +}; + +/* begin ********************* + * macros to expend register list macro defined in HW object header file + */ + +/* DCN */ +#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg] + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + REG_STRUCT.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name +#define SR_ARR(reg_name, id)\ + REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name +#define SR_ARR_INIT(reg_name, id, value)\ + REG_STRUCT[id].reg_name = value + +#define SRI(reg_name, block, id)\ + REG_STRUCT.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRI_ARR(reg_name, block, id)\ + REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SR_ARR_I2C(reg_name, id) \ + REG_STRUCT[id-1].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name + +#define SRI_ARR_I2C(reg_name, block, id)\ + REG_STRUCT[id-1].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRI_ARR_ALPHABET(reg_name, block, index, id)\ + REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRI2(reg_name, block, id)\ + .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name +#define SRI2_ARR(reg_name, block, id)\ + REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define SRIR(var_name, reg_name, block, id)\ + .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII(reg_name, block, id)\ + REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_ARR_2(reg_name, block, id, inst)\ + REG_STRUCT[inst].reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_MPC_RMU(reg_name, block, id)\ + .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_DWB(reg_name, temp_name, block, id)\ + REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## temp_name + +#define DCCG_SRII(reg_name, block, id)\ + REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define VUPDATE_SRII(reg_name, block, id)\ + REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + reg ## reg_name ## _ ## block ## id + +/* NBIO */ +#define NBIO_BASE_INNER(seg) ctx->nbio_reg_offsets[seg] + +#define NBIO_BASE(seg) \ + NBIO_BASE_INNER(seg) + +#define NBIO_SR(reg_name)\ + REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \ + regBIF_BX0_ ## reg_name +#define NBIO_SR_ARR(reg_name, id)\ + REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \ + regBIF_BX0_ ## reg_name + +#define CTX ctx +#define REG(reg_name) \ + (ctx->dcn_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) + +static struct bios_registers bios_regs; + +#define bios_regs_init() \ + ( \ + NBIO_SR(BIOS_SCRATCH_3),\ + NBIO_SR(BIOS_SCRATCH_6)\ + ) + +#define clk_src_regs_init(index, pllid)\ + CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid) + +static struct dce110_clk_src_regs clk_src_regs[5]; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCN3_2(_MASK) +}; + +#define abm_regs_init(id)\ + ABM_DCN32_REG_LIST_RI(id) + +static struct dce_abm_registers abm_regs[4]; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCN32(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCN32(_MASK) +}; + +#define audio_regs_init(id)\ + AUD_COMMON_REG_LIST_RI(id) + +static struct dce_audio_registers audio_regs[5]; + +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ + AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) + +static const struct dce_audio_shift audio_shift = { + DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +#define vpg_regs_init(id)\ + VPG_DCN3_REG_LIST_RI(id) + +static struct dcn30_vpg_registers vpg_regs[10]; + +static const struct dcn30_vpg_shift vpg_shift = { + DCN3_VPG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn30_vpg_mask vpg_mask = { + DCN3_VPG_MASK_SH_LIST(_MASK) +}; + +#define afmt_regs_init(id)\ + AFMT_DCN3_REG_LIST_RI(id) + +static struct dcn30_afmt_registers afmt_regs[6]; + +static const struct dcn30_afmt_shift afmt_shift = { + DCN3_AFMT_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn30_afmt_mask afmt_mask = { + DCN3_AFMT_MASK_SH_LIST(_MASK) +}; + +#define apg_regs_init(id)\ + APG_DCN31_REG_LIST_RI(id) + +static struct dcn31_apg_registers apg_regs[4]; + +static const struct dcn31_apg_shift apg_shift = { + DCN31_APG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_apg_mask apg_mask = { + DCN31_APG_MASK_SH_LIST(_MASK) +}; + +#define stream_enc_regs_init(id)\ + SE_DCN32_REG_LIST_RI(id) + +static struct dcn10_stream_enc_registers stream_enc_regs[5]; + +static const struct dcn10_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCN32(__SHIFT) +}; + +static const struct dcn10_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCN32(_MASK) +}; + + +#define aux_regs_init(id)\ + DCN2_AUX_REG_LIST_RI(id) + +static struct dcn10_link_enc_aux_registers link_enc_aux_regs[5]; + +#define hpd_regs_init(id)\ + HPD_REG_LIST_RI(id) + +static struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[5]; + +#define link_regs_init(id, phyid)\ + ( \ + LE_DCN31_REG_LIST_RI(id), \ + UNIPHY_DCN2_REG_LIST_RI(id, phyid)\ + ) + /*DPCS_DCN31_REG_LIST(id),*/ \ + +static struct dcn10_link_enc_registers link_enc_regs[5]; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \ +// DPCS_DCN31_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), \ +// DPCS_DCN31_MASK_SH_LIST(_MASK) +}; + +#define hpo_dp_stream_encoder_reg_init(id)\ + DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id) + +static struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[4]; + +static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) +}; + + +#define hpo_dp_link_encoder_reg_init(id)\ + DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id) + /*DCN3_1_RDPCSTX_REG_LIST(0),*/ + /*DCN3_1_RDPCSTX_REG_LIST(1),*/ + /*DCN3_1_RDPCSTX_REG_LIST(2),*/ + /*DCN3_1_RDPCSTX_REG_LIST(3),*/ + +static struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[2]; + +static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { + DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { + DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK) +}; + +#define dpp_regs_init(id)\ + DPP_REG_LIST_DCN30_COMMON_RI(id) + +static struct dcn3_dpp_registers dpp_regs[4]; + +static const struct dcn3_dpp_shift tf_shift = { + DPP_REG_LIST_SH_MASK_DCN30_COMMON(__SHIFT) +}; + +static const struct dcn3_dpp_mask tf_mask = { + DPP_REG_LIST_SH_MASK_DCN30_COMMON(_MASK) +}; + + +#define opp_regs_init(id)\ + OPP_REG_LIST_DCN30_RI(id) + +static struct dcn20_opp_registers opp_regs[4]; + +static const struct dcn20_opp_shift opp_shift = { + OPP_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn20_opp_mask opp_mask = { + OPP_MASK_SH_LIST_DCN20(_MASK) +}; + +#define aux_engine_regs_init(id) \ + ( \ + AUX_COMMON_REG_LIST0_RI(id), SR_ARR_INIT(AUXN_IMPCAL, id, 0), \ + SR_ARR_INIT(AUXP_IMPCAL, id, 0), \ + SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK), \ + SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK)\ + ) + +static struct dce110_aux_registers aux_engine_regs[5]; + +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +#define dwbc_regs_dcn3_init(id)\ + DWBC_COMMON_REG_LIST_DCN30_RI(id) + +static struct dcn30_dwbc_registers dwbc30_regs[1]; + +static const struct dcn30_dwbc_shift dwbc30_shift = { + DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_dwbc_mask dwbc30_mask = { + DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +#define mcif_wb_regs_dcn3_init(id)\ + MCIF_WB_COMMON_REG_LIST_DCN32_RI(id) + +static struct dcn30_mmhubbub_registers mcif_wb30_regs[1]; + +static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN32(__SHIFT) +}; + +static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN32(_MASK) +}; + +#define dsc_regsDCN20_init(id)\ + DSC_REG_LIST_DCN20_RI(id) + +static struct dcn20_dsc_registers dsc_regs[4]; + +static const struct dcn20_dsc_shift dsc_shift = { + DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) +}; + +static const struct dcn20_dsc_mask dsc_mask = { + DSC_REG_LIST_SH_MASK_DCN20(_MASK) +}; + +static struct dcn30_mpc_registers mpc_regs; +#define dcn_mpc_regs_init()\ + MPC_REG_LIST_DCN3_2_RI(0),\ + MPC_REG_LIST_DCN3_2_RI(1),\ + MPC_REG_LIST_DCN3_2_RI(2),\ + MPC_REG_LIST_DCN3_2_RI(3),\ + MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\ + MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\ + MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\ + MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\ + MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0) + +static const struct dcn30_mpc_shift mpc_shift = { + MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT) +}; + +static const struct dcn30_mpc_mask mpc_mask = { + MPC_COMMON_MASK_SH_LIST_DCN32(_MASK) +}; + +#define optc_regs_init(id)\ + OPTC_COMMON_REG_LIST_DCN3_2_RI(id) + +static struct dcn_optc_registers optc_regs[4]; + +static const struct dcn_optc_shift optc_shift = { + OPTC_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT) +}; + +static const struct dcn_optc_mask optc_mask = { + OPTC_COMMON_MASK_SH_LIST_DCN3_2(_MASK) +}; + +#define hubp_regs_init(id) \ + HUBP_REG_LIST_DCN32_RI(id) + +static struct dcn_hubp2_registers hubp_regs[4]; + +static const struct dcn_hubp2_shift hubp_shift = { + HUBP_MASK_SH_LIST_DCN32(__SHIFT) +}; + +static const struct dcn_hubp2_mask hubp_mask = { + HUBP_MASK_SH_LIST_DCN32(_MASK) +}; + +static struct dcn_hubbub_registers hubbub_reg; +#define hubbub_reg_init()\ + HUBBUB_REG_LIST_DCN32_RI(0) + +static const struct dcn_hubbub_shift hubbub_shift = { + HUBBUB_MASK_SH_LIST_DCN32(__SHIFT) +}; + +static const struct dcn_hubbub_mask hubbub_mask = { + HUBBUB_MASK_SH_LIST_DCN32(_MASK) +}; + +static struct dccg_registers dccg_regs; + +#define dccg_regs_init()\ + DCCG_REG_LIST_DCN32_RI() + +static const struct dccg_shift dccg_shift = { + DCCG_MASK_SH_LIST_DCN32(__SHIFT) +}; + +static const struct dccg_mask dccg_mask = { + DCCG_MASK_SH_LIST_DCN32(_MASK) +}; + + +#define SRII2(reg_name_pre, reg_name_post, id)\ + .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \ + ## id ## _ ## reg_name_post ## _BASE_IDX) + \ + reg ## reg_name_pre ## id ## _ ## reg_name_post + + +#define HWSEQ_DCN32_REG_LIST()\ + SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ + SR(DIO_MEM_PWR_CTRL), \ + SR(ODM_MEM_PWR_CTRL3), \ + SR(MMHUBBUB_MEM_PWR_CNTL), \ + SR(DCCG_GATE_DISABLE_CNTL), \ + SR(DCCG_GATE_DISABLE_CNTL2), \ + SR(DCFCLK_CNTL),\ + SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ + SRII(PIXEL_RATE_CNTL, OTG, 0), \ + SRII(PIXEL_RATE_CNTL, OTG, 1),\ + SRII(PIXEL_RATE_CNTL, OTG, 2),\ + SRII(PIXEL_RATE_CNTL, OTG, 3),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\ + SR(MICROSECOND_TIME_BASE_DIV), \ + SR(MILLISECOND_TIME_BASE_DIV), \ + SR(DISPCLK_FREQ_CHANGE_CNTL), \ + SR(RBBMIF_TIMEOUT_DIS), \ + SR(RBBMIF_TIMEOUT_DIS_2), \ + SR(DCHUBBUB_CRC_CTRL), \ + SR(DPP_TOP0_DPP_CRC_CTRL), \ + SR(DPP_TOP0_DPP_CRC_VAL_B_A), \ + SR(DPP_TOP0_DPP_CRC_VAL_R_G), \ + SR(MPC_CRC_CTRL), \ + SR(MPC_CRC_RESULT_GB), \ + SR(MPC_CRC_RESULT_C), \ + SR(MPC_CRC_RESULT_AR), \ + SR(DOMAIN0_PG_CONFIG), \ + SR(DOMAIN1_PG_CONFIG), \ + SR(DOMAIN2_PG_CONFIG), \ + SR(DOMAIN3_PG_CONFIG), \ + SR(DOMAIN16_PG_CONFIG), \ + SR(DOMAIN17_PG_CONFIG), \ + SR(DOMAIN18_PG_CONFIG), \ + SR(DOMAIN19_PG_CONFIG), \ + SR(DOMAIN0_PG_STATUS), \ + SR(DOMAIN1_PG_STATUS), \ + SR(DOMAIN2_PG_STATUS), \ + SR(DOMAIN3_PG_STATUS), \ + SR(DOMAIN16_PG_STATUS), \ + SR(DOMAIN17_PG_STATUS), \ + SR(DOMAIN18_PG_STATUS), \ + SR(DOMAIN19_PG_STATUS), \ + SR(D1VGA_CONTROL), \ + SR(D2VGA_CONTROL), \ + SR(D3VGA_CONTROL), \ + SR(D4VGA_CONTROL), \ + SR(D5VGA_CONTROL), \ + SR(D6VGA_CONTROL), \ + SR(DC_IP_REQUEST_CNTL), \ + SR(AZALIA_AUDIO_DTO), \ + SR(AZALIA_CONTROLLER_CLOCK_GATING) + +static struct dce_hwseq_registers hwseq_reg; + +#define hwseq_reg_init()\ + HWSEQ_DCN32_REG_LIST() + +#define HWSEQ_DCN32_MASK_SH_LIST(mask_sh)\ + HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ + HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ + HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ + HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ + HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \ + HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ + HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ + HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh) + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCN32_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCN32_MASK_SH_LIST(_MASK) +}; +#define vmid_regs_init(id)\ + DCN20_VMID_REG_LIST_RI(id) + +static struct dcn_vmid_registers vmid_regs[16]; + +static const struct dcn20_vmid_shift vmid_shifts = { + DCN20_VMID_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn20_vmid_mask vmid_masks = { + DCN20_VMID_MASK_SH_LIST(_MASK) +}; + +static const struct resource_caps res_cap_dcn321 = { + .num_timing_generator = 4, + .num_opp = 4, + .num_video_plane = 4, + .num_audio = 5, + .num_stream_encoder = 5, + .num_hpo_dp_stream_encoder = 4, + .num_hpo_dp_link_encoder = 2, + .num_pll = 5, + .num_dwb = 1, + .num_ddc = 5, + .num_vmid = 16, + .num_mpc_3dlut = 4, + .num_dsc = 4, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCN_UNIVERSAL, + .per_pixel_alpha = true, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = true, + .fp16 = true, + .p010 = true, + .ayuv = false, + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 16000, + .fp16 = 16000 + }, + + // 6:1 downscaling ratio: 1000/6 = 166.666 + .max_downscale_factor = { + .argb8888 = 167, + .nv12 = 167, + .fp16 = 167 + }, + 64, + 64 +}; + +static const struct dc_debug_options debug_defaults_drv = { + .disable_dmcu = true, + .force_abm_enable = false, + .timing_trace = false, + .clock_trace = true, + .disable_pplib_clock_request = false, + .pipe_split_policy = MPC_SPLIT_AVOID, + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .vsr_support = true, + .performance_trace = false, + .max_downscale_src_width = 7680,/*upto 8K*/ + .disable_pplib_wm_range = false, + .scl_reset_length10 = true, + .sanity_checks = false, + .underflow_assert_delay_us = 0xFFFFFFFF, + .dwb_fi_phase = -1, // -1 = disable, + .dmub_command_table = true, + .enable_mem_low_power = { + .bits = { + .vga = false, + .i2c = false, + .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled + .dscl = false, + .cm = false, + .mpc = false, + .optc = true, + } + }, + .use_max_lb = true, + .force_disable_subvp = false, + .exit_idle_opt_for_cursor_updates = true, + .enable_single_display_2to1_odm_policy = true, + + /*must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions*/ + .enable_double_buffered_dsc_pg_support = true, + .enable_dp_dig_pixel_rate_div_policy = 1, + .allow_sw_cursor_fallback = false, // Linux can't do SW cursor "fallback" + .alloc_extra_way_for_cursor = true, + .min_prefetch_in_strobe_ns = 60000, // 60us + .disable_unbounded_requesting = false, + .override_dispclk_programming = true, + .disable_fpo_optimizations = false, + .fpo_vactive_margin_us = 2000, // 2000us + .disable_fpo_vactive = false, + .disable_boot_optimizations = false, + .disable_subvp_high_refresh = false, + .fpo_vactive_min_active_margin_us = 200, + .fpo_vactive_max_blank_us = 1000, + .enable_legacy_fast_update = false, + .disable_dc_mode_overwrite = true, + .using_dml2 = false, +}; + +static struct dce_aux *dcn321_aux_engine_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT aux_engine_regs + aux_engine_regs_init(0), + aux_engine_regs_init(1), + aux_engine_regs_init(2), + aux_engine_regs_init(3), + aux_engine_regs_init(4); + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} +#define i2c_inst_regs_init(id)\ + I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id) + +static struct dce_i2c_registers i2c_hw_regs[5]; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +static struct dce_i2c_hw *dcn321_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT i2c_hw_regs + i2c_inst_regs_init(1), + i2c_inst_regs_init(2), + i2c_inst_regs_init(3), + i2c_inst_regs_init(4), + i2c_inst_regs_init(5); + + dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} + +static struct clock_source *dcn321_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dcn31_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + kfree(clk_src); + BREAK_TO_DEBUGGER(); + return NULL; +} + +static struct hubbub *dcn321_hubbub_create(struct dc_context *ctx) +{ + int i; + + struct dcn20_hubbub *hubbub2 = kzalloc(sizeof(struct dcn20_hubbub), + GFP_KERNEL); + + if (!hubbub2) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT hubbub_reg + hubbub_reg_init(); + +#undef REG_STRUCT +#define REG_STRUCT vmid_regs + vmid_regs_init(0), + vmid_regs_init(1), + vmid_regs_init(2), + vmid_regs_init(3), + vmid_regs_init(4), + vmid_regs_init(5), + vmid_regs_init(6), + vmid_regs_init(7), + vmid_regs_init(8), + vmid_regs_init(9), + vmid_regs_init(10), + vmid_regs_init(11), + vmid_regs_init(12), + vmid_regs_init(13), + vmid_regs_init(14), + vmid_regs_init(15); + + hubbub32_construct(hubbub2, ctx, + &hubbub_reg, + &hubbub_shift, + &hubbub_mask, + ctx->dc->dml.ip.det_buffer_size_kbytes, + ctx->dc->dml.ip.pixel_chunk_size_kbytes, + ctx->dc->dml.ip.config_return_buffer_size_in_kbytes); + + + for (i = 0; i < res_cap_dcn321.num_vmid; i++) { + struct dcn20_vmid *vmid = &hubbub2->vmid[i]; + + vmid->ctx = ctx; + + vmid->regs = &vmid_regs[i]; + vmid->shifts = &vmid_shifts; + vmid->masks = &vmid_masks; + } + + return &hubbub2->base; +} + +static struct hubp *dcn321_hubp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn20_hubp *hubp2 = + kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); + + if (!hubp2) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT hubp_regs + hubp_regs_init(0), + hubp_regs_init(1), + hubp_regs_init(2), + hubp_regs_init(3); + + if (hubp32_construct(hubp2, ctx, inst, + &hubp_regs[inst], &hubp_shift, &hubp_mask)) + return &hubp2->base; + + BREAK_TO_DEBUGGER(); + kfree(hubp2); + return NULL; +} + +static void dcn321_dpp_destroy(struct dpp **dpp) +{ + kfree(TO_DCN30_DPP(*dpp)); + *dpp = NULL; +} + +static struct dpp *dcn321_dpp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn3_dpp *dpp3 = + kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); + + if (!dpp3) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT dpp_regs + dpp_regs_init(0), + dpp_regs_init(1), + dpp_regs_init(2), + dpp_regs_init(3); + + if (dpp32_construct(dpp3, ctx, inst, + &dpp_regs[inst], &tf_shift, &tf_mask)) + return &dpp3->base; + + BREAK_TO_DEBUGGER(); + kfree(dpp3); + return NULL; +} + +static struct mpc *dcn321_mpc_create( + struct dc_context *ctx, + int num_mpcc, + int num_rmu) +{ + struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), + GFP_KERNEL); + + if (!mpc30) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT mpc_regs + dcn_mpc_regs_init(); + + dcn32_mpc_construct(mpc30, ctx, + &mpc_regs, + &mpc_shift, + &mpc_mask, + num_mpcc, + num_rmu); + + return &mpc30->base; +} + +static struct output_pixel_processor *dcn321_opp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_opp *opp2 = + kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); + + if (!opp2) { + BREAK_TO_DEBUGGER(); + return NULL; + } + +#undef REG_STRUCT +#define REG_STRUCT opp_regs + opp_regs_init(0), + opp_regs_init(1), + opp_regs_init(2), + opp_regs_init(3); + + dcn20_opp_construct(opp2, ctx, inst, + &opp_regs[inst], &opp_shift, &opp_mask); + return &opp2->base; +} + + +static struct timing_generator *dcn321_timing_generator_create( + struct dc_context *ctx, + uint32_t instance) +{ + struct optc *tgn10 = + kzalloc(sizeof(struct optc), GFP_KERNEL); + + if (!tgn10) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT optc_regs + optc_regs_init(0), + optc_regs_init(1), + optc_regs_init(2), + optc_regs_init(3); + + tgn10->base.inst = instance; + tgn10->base.ctx = ctx; + + tgn10->tg_regs = &optc_regs[instance]; + tgn10->tg_shift = &optc_shift; + tgn10->tg_mask = &optc_mask; + + dcn32_timing_generator_init(tgn10); + + return &tgn10->base; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .fec_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + +static struct link_encoder *dcn321_link_encoder_create( + struct dc_context *ctx, + const struct encoder_init_data *enc_init_data) +{ + struct dcn20_link_encoder *enc20 = + kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + + if (!enc20) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT link_enc_aux_regs + aux_regs_init(0), + aux_regs_init(1), + aux_regs_init(2), + aux_regs_init(3), + aux_regs_init(4); + +#undef REG_STRUCT +#define REG_STRUCT link_enc_hpd_regs + hpd_regs_init(0), + hpd_regs_init(1), + hpd_regs_init(2), + hpd_regs_init(3), + hpd_regs_init(4); + +#undef REG_STRUCT +#define REG_STRUCT link_enc_regs + link_regs_init(0, A), + link_regs_init(1, B), + link_regs_init(2, C), + link_regs_init(3, D), + link_regs_init(4, E); + + dcn321_link_encoder_construct(enc20, + enc_init_data, + &link_enc_feature, + &link_enc_regs[enc_init_data->transmitter], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], + &le_shift, + &le_mask); + + return &enc20->enc10.base; +} + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + generic_reg_get(ctx, ctx->dcn_reg_offsets[regDC_PINSTRAPS_BASE_IDX] + regDC_PINSTRAPS, + FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); + +} + +static struct audio *dcn321_create_audio( + struct dc_context *ctx, unsigned int inst) +{ + +#undef REG_STRUCT +#define REG_STRUCT audio_regs + audio_regs_init(0), + audio_regs_init(1), + audio_regs_init(2), + audio_regs_init(3), + audio_regs_init(4); + + return dce_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + +static struct vpg *dcn321_vpg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL); + + if (!vpg3) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT vpg_regs + vpg_regs_init(0), + vpg_regs_init(1), + vpg_regs_init(2), + vpg_regs_init(3), + vpg_regs_init(4), + vpg_regs_init(5), + vpg_regs_init(6), + vpg_regs_init(7), + vpg_regs_init(8), + vpg_regs_init(9); + + vpg3_construct(vpg3, ctx, inst, + &vpg_regs[inst], + &vpg_shift, + &vpg_mask); + + return &vpg3->base; +} + +static struct afmt *dcn321_afmt_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL); + + if (!afmt3) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT afmt_regs + afmt_regs_init(0), + afmt_regs_init(1), + afmt_regs_init(2), + afmt_regs_init(3), + afmt_regs_init(4), + afmt_regs_init(5); + + afmt3_construct(afmt3, ctx, inst, + &afmt_regs[inst], + &afmt_shift, + &afmt_mask); + + return &afmt3->base; +} + +static struct apg *dcn321_apg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); + + if (!apg31) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT apg_regs + apg_regs_init(0), + apg_regs_init(1), + apg_regs_init(2), + apg_regs_init(3); + + apg31_construct(apg31, ctx, inst, + &apg_regs[inst], + &apg_shift, + &apg_mask); + + return &apg31->base; +} + +static struct stream_encoder *dcn321_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn10_stream_encoder *enc1; + struct vpg *vpg; + struct afmt *afmt; + int vpg_inst; + int afmt_inst; + + /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ + if (eng_id <= ENGINE_ID_DIGF) { + vpg_inst = eng_id; + afmt_inst = eng_id; + } else + return NULL; + + enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); + vpg = dcn321_vpg_create(ctx, vpg_inst); + afmt = dcn321_afmt_create(ctx, afmt_inst); + + if (!enc1 || !vpg || !afmt) { + kfree(enc1); + kfree(vpg); + kfree(afmt); + return NULL; + } + +#undef REG_STRUCT +#define REG_STRUCT stream_enc_regs + stream_enc_regs_init(0), + stream_enc_regs_init(1), + stream_enc_regs_init(2), + stream_enc_regs_init(3), + stream_enc_regs_init(4); + + dcn32_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, + eng_id, vpg, afmt, + &stream_enc_regs[eng_id], + &se_shift, &se_mask); + + return &enc1->base; +} + +static struct hpo_dp_stream_encoder *dcn321_hpo_dp_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; + struct vpg *vpg; + struct apg *apg; + uint32_t hpo_dp_inst; + uint32_t vpg_inst; + uint32_t apg_inst; + + ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); + hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; + + /* Mapping of VPG register blocks to HPO DP block instance: + * VPG[6] -> HPO_DP[0] + * VPG[7] -> HPO_DP[1] + * VPG[8] -> HPO_DP[2] + * VPG[9] -> HPO_DP[3] + */ + vpg_inst = hpo_dp_inst + 6; + + /* Mapping of APG register blocks to HPO DP block instance: + * APG[0] -> HPO_DP[0] + * APG[1] -> HPO_DP[1] + * APG[2] -> HPO_DP[2] + * APG[3] -> HPO_DP[3] + */ + apg_inst = hpo_dp_inst; + + /* allocate HPO stream encoder and create VPG sub-block */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); + vpg = dcn321_vpg_create(ctx, vpg_inst); + apg = dcn321_apg_create(ctx, apg_inst); + + if (!hpo_dp_enc31 || !vpg || !apg) { + kfree(hpo_dp_enc31); + kfree(vpg); + kfree(apg); + return NULL; + } + +#undef REG_STRUCT +#define REG_STRUCT hpo_dp_stream_enc_regs + hpo_dp_stream_encoder_reg_init(0), + hpo_dp_stream_encoder_reg_init(1), + hpo_dp_stream_encoder_reg_init(2), + hpo_dp_stream_encoder_reg_init(3); + + dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, + hpo_dp_inst, eng_id, vpg, apg, + &hpo_dp_stream_enc_regs[hpo_dp_inst], + &hpo_dp_se_shift, &hpo_dp_se_mask); + + return &hpo_dp_enc31->base; +} + +static struct hpo_dp_link_encoder *dcn321_hpo_dp_link_encoder_create( + uint8_t inst, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; + + /* allocate HPO link encoder */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + +#undef REG_STRUCT +#define REG_STRUCT hpo_dp_link_enc_regs + hpo_dp_link_encoder_reg_init(0), + hpo_dp_link_encoder_reg_init(1); + + hpo_dp_link_encoder32_construct(hpo_dp_enc31, ctx, inst, + &hpo_dp_link_enc_regs[inst], + &hpo_dp_le_shift, &hpo_dp_le_mask); + + return &hpo_dp_enc31->base; +} + +static struct dce_hwseq *dcn321_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + +#undef REG_STRUCT +#define REG_STRUCT hwseq_reg + hwseq_reg_init(); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + } + return hws; +} +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = dcn321_create_audio, + .create_stream_encoder = dcn321_stream_encoder_create, + .create_hpo_dp_stream_encoder = dcn321_hpo_dp_stream_encoder_create, + .create_hpo_dp_link_encoder = dcn321_hpo_dp_link_encoder_create, + .create_hwseq = dcn321_hwseq_create, +}; + +static void dcn321_resource_destruct(struct dcn321_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) { + if (pool->base.stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); + pool->base.stream_enc[i]->vpg = NULL; + } + if (pool->base.stream_enc[i]->afmt != NULL) { + kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); + pool->base.stream_enc[i]->afmt = NULL; + } + kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); + pool->base.stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { + if (pool->base.hpo_dp_stream_enc[i] != NULL) { + if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); + pool->base.hpo_dp_stream_enc[i]->vpg = NULL; + } + if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { + kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); + pool->base.hpo_dp_stream_enc[i]->apg = NULL; + } + kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); + pool->base.hpo_dp_stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { + if (pool->base.hpo_dp_link_enc[i] != NULL) { + kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); + pool->base.hpo_dp_link_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + if (pool->base.dscs[i] != NULL) + dcn20_dsc_destroy(&pool->base.dscs[i]); + } + + if (pool->base.mpc != NULL) { + kfree(TO_DCN20_MPC(pool->base.mpc)); + pool->base.mpc = NULL; + } + if (pool->base.hubbub != NULL) { + kfree(TO_DCN20_HUBBUB(pool->base.hubbub)); + pool->base.hubbub = NULL; + } + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.dpps[i] != NULL) + dcn321_dpp_destroy(&pool->base.dpps[i]); + + if (pool->base.ipps[i] != NULL) + pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.hubps[i] != NULL) { + kfree(TO_DCN20_HUBP(pool->base.hubps[i])); + pool->base.hubps[i] = NULL; + } + + if (pool->base.irqs != NULL) + dal_irq_service_destroy(&pool->base.irqs); + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + if (pool->base.engines[i] != NULL) + dce110_engine_destroy(&pool->base.engines[i]); + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + if (pool->base.opps[i] != NULL) + pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.timing_generators[i] != NULL) { + kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dwb; i++) { + if (pool->base.dwbc[i] != NULL) { + kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); + pool->base.dwbc[i] = NULL; + } + if (pool->base.mcif_wb[i] != NULL) { + kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); + pool->base.mcif_wb[i] = NULL; + } + } + + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i]) + dce_aud_destroy(&pool->base.audios[i]); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) { + dcn20_clock_source_destroy(&pool->base.clock_sources[i]); + pool->base.clock_sources[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { + if (pool->base.mpc_lut[i] != NULL) { + dc_3dlut_func_release(pool->base.mpc_lut[i]); + pool->base.mpc_lut[i] = NULL; + } + if (pool->base.mpc_shaper[i] != NULL) { + dc_transfer_func_release(pool->base.mpc_shaper[i]); + pool->base.mpc_shaper[i] = NULL; + } + } + + if (pool->base.dp_clock_source != NULL) { + dcn20_clock_source_destroy(&pool->base.dp_clock_source); + pool->base.dp_clock_source = NULL; + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.multiple_abms[i] != NULL) + dce_abm_destroy(&pool->base.multiple_abms[i]); + } + + if (pool->base.psr != NULL) + dmub_psr_destroy(&pool->base.psr); + + if (pool->base.dccg != NULL) + dcn_dccg_destroy(&pool->base.dccg); + + if (pool->base.oem_device != NULL) { + struct dc *dc = pool->base.oem_device->ctx->dc; + + dc->link_srv->destroy_ddc_service(&pool->base.oem_device); + } +} + + +static bool dcn321_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t dwb_count = pool->res_cap->num_dwb; + + for (i = 0; i < dwb_count; i++) { + struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), + GFP_KERNEL); + + if (!dwbc30) { + dm_error("DC: failed to create dwbc30!\n"); + return false; + } + +#undef REG_STRUCT +#define REG_STRUCT dwbc30_regs + dwbc_regs_dcn3_init(0); + + dcn30_dwbc_construct(dwbc30, ctx, + &dwbc30_regs[i], + &dwbc30_shift, + &dwbc30_mask, + i); + + pool->dwbc[i] = &dwbc30->base; + } + return true; +} + +static bool dcn321_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t dwb_count = pool->res_cap->num_dwb; + + for (i = 0; i < dwb_count; i++) { + struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), + GFP_KERNEL); + + if (!mcif_wb30) { + dm_error("DC: failed to create mcif_wb30!\n"); + return false; + } + +#undef REG_STRUCT +#define REG_STRUCT mcif_wb30_regs + mcif_wb_regs_dcn3_init(0); + + dcn32_mmhubbub_construct(mcif_wb30, ctx, + &mcif_wb30_regs[i], + &mcif_wb30_shift, + &mcif_wb30_mask, + i); + + pool->mcif_wb[i] = &mcif_wb30->base; + } + return true; +} + +static struct display_stream_compressor *dcn321_dsc_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_dsc *dsc = + kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); + + if (!dsc) { + BREAK_TO_DEBUGGER(); + return NULL; + } + +#undef REG_STRUCT +#define REG_STRUCT dsc_regs + dsc_regsDCN20_init(0), + dsc_regsDCN20_init(1), + dsc_regsDCN20_init(2), + dsc_regsDCN20_init(3); + + dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); + + dsc->max_image_width = 6016; + + return &dsc->base; +} + +static void dcn321_destroy_resource_pool(struct resource_pool **pool) +{ + struct dcn321_resource_pool *dcn321_pool = TO_DCN321_RES_POOL(*pool); + + dcn321_resource_destruct(dcn321_pool); + kfree(dcn321_pool); + *pool = NULL; +} + +static struct dc_cap_funcs cap_funcs = { + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap, + .get_subvp_en = dcn32_subvp_in_use, +}; + +static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) +{ + DC_FP_START(); + dcn321_update_bw_bounding_box_fpu(dc, bw_params); + if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) + dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); + DC_FP_END(); +} + +static struct resource_funcs dcn321_res_pool_funcs = { + .destroy = dcn321_destroy_resource_pool, + .link_enc_create = dcn321_link_encoder_create, + .link_enc_create_minimal = NULL, + .panel_cntl_create = dcn32_panel_cntl_create, + .validate_bandwidth = dcn32_validate_bandwidth, + .calculate_wm_and_dlg = dcn32_calculate_wm_and_dlg, + .populate_dml_pipes = dcn32_populate_dml_pipes_from_context, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn32_acquire_free_pipe_as_secondary_dpp_pipe, + .acquire_free_pipe_as_secondary_opp_head = dcn32_acquire_free_pipe_as_secondary_opp_head, + .release_pipe = dcn20_release_pipe, + .add_stream_to_ctx = dcn30_add_stream_to_ctx, + .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, + .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, + .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, + .set_mcif_arb_params = dcn30_set_mcif_arb_params, + .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, + .acquire_post_bldn_3dlut = dcn32_acquire_post_bldn_3dlut, + .release_post_bldn_3dlut = dcn32_release_post_bldn_3dlut, + .update_bw_bounding_box = dcn321_update_bw_bounding_box, + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, + .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, + .add_phantom_pipes = dcn32_add_phantom_pipes, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params, +}; + +static uint32_t read_pipe_fuses(struct dc_context *ctx) +{ + uint32_t value = REG_READ(CC_DC_PIPE_DIS); + /* DCN321 support max 4 pipes */ + value = value & 0xf; + return value; +} + + +static bool dcn321_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dcn321_resource_pool *pool) +{ + int i, j; + struct dc_context *ctx = dc->ctx; + struct irq_service_init_data init_data; + struct ddc_service_init_data ddc_init_data = {0}; + uint32_t pipe_fuses = 0; + uint32_t num_pipes = 4; + +#undef REG_STRUCT +#define REG_STRUCT bios_regs + bios_regs_init(); + +#undef REG_STRUCT +#define REG_STRUCT clk_src_regs + clk_src_regs_init(0, A), + clk_src_regs_init(1, B), + clk_src_regs_init(2, C), + clk_src_regs_init(3, D), + clk_src_regs_init(4, E); + +#undef REG_STRUCT +#define REG_STRUCT abm_regs + abm_regs_init(0), + abm_regs_init(1), + abm_regs_init(2), + abm_regs_init(3); + +#undef REG_STRUCT +#define REG_STRUCT dccg_regs + dccg_regs_init(); + + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap_dcn321; + /* max number of pipes for ASIC before checking for pipe fuses */ + num_pipes = pool->base.res_cap->num_timing_generator; + pipe_fuses = read_pipe_fuses(ctx); + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) + if (pipe_fuses & 1 << i) + num_pipes--; + + if (pipe_fuses & 1) + ASSERT(0); //Unexpected - Pipe 0 should always be fully functional! + + if (pipe_fuses & CC_DC_PIPE_DIS__DC_FULL_DIS_MASK) + ASSERT(0); //Entire DCN is harvested! + + /* within dml lib, initial value is hard coded, if ASIC pipe is fused, the + * value will be changed, update max_num_dpp and max_num_otg for dml. + */ + dcn3_21_ip.max_num_dpp = num_pipes; + dcn3_21_ip.max_num_otg = num_pipes; + + pool->base.funcs = &dcn321_res_pool_funcs; + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->base.timing_generator_count = num_pipes; + pool->base.pipe_count = num_pipes; + pool->base.mpcc_count = num_pipes; + dc->caps.max_downscale_ratio = 600; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/ + /* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/ + dc->caps.max_cursor_size = 64; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dmdata_alloc_size = 2048; + dc->caps.mall_size_per_mem_channel = 4; + dc->caps.mall_size_total = 0; + dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; + dc->caps.cache_line_size = 64; + dc->caps.cache_num_ways = 16; + + /* Calculate the available MALL space */ + dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall( + dc, dc->ctx->dc_bios->vram_info.num_chans) * + dc->caps.mall_size_per_mem_channel * 1024 * 1024; + dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes; + + dc->caps.subvp_fw_processing_delay_us = 15; + dc->caps.subvp_drr_max_vblank_margin_us = 40; + dc->caps.subvp_prefetch_end_to_mall_start_us = 15; + dc->caps.subvp_swath_height_margin_lines = 16; + dc->caps.subvp_pstate_allow_width_us = 20; + dc->caps.subvp_vertical_int_margin_us = 30; + dc->caps.subvp_drr_vblank_start_margin_us = 100; // 100us margin + dc->caps.max_slave_planes = 2; + dc->caps.max_slave_yuv_planes = 2; + dc->caps.max_slave_rgb_planes = 2; + dc->caps.post_blend_color_processing = true; + dc->caps.force_dp_tps4_for_cp2520 = true; + dc->caps.dp_hpo = true; + dc->caps.dp_hdmi21_pcon_support = true; + dc->caps.edp_dsc_support = true; + dc->caps.extended_aux_timeout_support = true; + dc->caps.dmcub_support = true; + dc->caps.max_v_total = (1 << 15) - 1; + + /* Color pipeline capabilities */ + dc->caps.color.dpp.dcn_arch = 1; + dc->caps.color.dpp.input_lut_shared = 0; + dc->caps.color.dpp.icsc = 1; + dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr + dc->caps.color.dpp.dgam_rom_caps.srgb = 1; + dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; + dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; + dc->caps.color.dpp.dgam_rom_caps.pq = 1; + dc->caps.color.dpp.dgam_rom_caps.hlg = 1; + dc->caps.color.dpp.post_csc = 1; + dc->caps.color.dpp.gamma_corr = 1; + dc->caps.color.dpp.dgam_rom_for_yuv = 0; + + dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.ogam_ram = 1; + // no OGAM ROM on DCN2 and later ASICs + dc->caps.color.dpp.ogam_rom_caps.srgb = 0; + dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; + dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.ogam_rom_caps.pq = 0; + dc->caps.color.dpp.ogam_rom_caps.hlg = 0; + dc->caps.color.dpp.ocsc = 0; + + dc->caps.color.mpc.gamut_remap = 1; + dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //4, configurable to be before or after BLND in MPCC + dc->caps.color.mpc.ogam_ram = 1; + dc->caps.color.mpc.ogam_rom_caps.srgb = 0; + dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; + dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.mpc.ogam_rom_caps.pq = 0; + dc->caps.color.mpc.ogam_rom_caps.hlg = 0; + dc->caps.color.mpc.ocsc = 1; + + dc->config.dc_mode_clk_limit_support = true; + /* read VBIOS LTTPR caps */ + { + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + /* interop bit is implicit */ + { + dc->caps.vbios_lttpr_aware = true; + } + } + + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) + dc->debug = debug_defaults_drv; + + // Init the vm_helper + if (dc->vm_helper) + vm_helper_init(dc->vm_helper, 16); + + /************************************************* + * Create resources * + *************************************************/ + + /* Clock Sources for Pixel Clock*/ + pool->base.clock_sources[DCN321_CLK_SRC_PLL0] = + dcn321_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->base.clock_sources[DCN321_CLK_SRC_PLL1] = + dcn321_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + pool->base.clock_sources[DCN321_CLK_SRC_PLL2] = + dcn321_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs[2], false); + pool->base.clock_sources[DCN321_CLK_SRC_PLL3] = + dcn321_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + pool->base.clock_sources[DCN321_CLK_SRC_PLL4] = + dcn321_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL4, + &clk_src_regs[4], false); + + pool->base.clk_src_count = DCN321_CLK_SRC_TOTAL; + + /* todo: not reuse phy_pll registers */ + pool->base.dp_clock_source = + dcn321_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, + &clk_src_regs[0], true); + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* DCCG */ + pool->base.dccg = dccg32_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); + if (pool->base.dccg == NULL) { + dm_error("DC: failed to create dccg!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* DML */ + dml_init_instance(&dc->dml, &dcn3_21_soc, &dcn3_21_ip, DML_PROJECT_DCN32); + + /* IRQ Service */ + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dcn32_create(&init_data); + if (!pool->base.irqs) + goto create_fail; + + /* HUBBUB */ + pool->base.hubbub = dcn321_hubbub_create(ctx); + if (pool->base.hubbub == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create hubbub!\n"); + goto create_fail; + } + + /* HUBPs, DPPs, OPPs, TGs, ABMs */ + for (i = 0, j = 0; i < pool->base.res_cap->num_timing_generator; i++) { + + /* if pipe is disabled, skip instance of HW pipe, + * i.e, skip ASIC register instance + */ + if (pipe_fuses & 1 << i) + continue; + + pool->base.hubps[j] = dcn321_hubp_create(ctx, i); + if (pool->base.hubps[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create hubps!\n"); + goto create_fail; + } + + pool->base.dpps[j] = dcn321_dpp_create(ctx, i); + if (pool->base.dpps[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create dpps!\n"); + goto create_fail; + } + + pool->base.opps[j] = dcn321_opp_create(ctx, i); + if (pool->base.opps[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + goto create_fail; + } + + pool->base.timing_generators[j] = dcn321_timing_generator_create( + ctx, i); + if (pool->base.timing_generators[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + + pool->base.multiple_abms[j] = dmub_abm_create(ctx, + &abm_regs[i], + &abm_shift, + &abm_mask); + if (pool->base.multiple_abms[j] == NULL) { + dm_error("DC: failed to create abm for pipe %d!\n", i); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* index for resource pool arrays for next valid pipe */ + j++; + } + + /* PSR */ + pool->base.psr = dmub_psr_create(ctx); + if (pool->base.psr == NULL) { + dm_error("DC: failed to create psr obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* MPCCs */ + pool->base.mpc = dcn321_mpc_create(ctx, pool->base.res_cap->num_timing_generator, pool->base.res_cap->num_mpc_3dlut); + if (pool->base.mpc == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mpc!\n"); + goto create_fail; + } + + /* DSCs */ + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + pool->base.dscs[i] = dcn321_dsc_create(ctx, i); + if (pool->base.dscs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create display stream compressor %d!\n", i); + goto create_fail; + } + } + + /* DWB */ + if (!dcn321_dwbc_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create dwbc!\n"); + goto create_fail; + } + + /* MMHUBBUB */ + if (!dcn321_mmhubbub_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mcif_wb!\n"); + goto create_fail; + } + + /* AUX and I2C */ + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dcn321_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto create_fail; + } + pool->base.hw_i2cs[i] = dcn321_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create hw i2c!!\n"); + goto create_fail; + } + pool->base.sw_i2cs[i] = NULL; + } + + /* Audio, HWSeq, Stream Encoders including HPO and virtual, MPC 3D LUTs */ + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto create_fail; + + /* HW Sequencer init functions and Plane caps */ + dcn32_hw_sequencer_init_functions(dc); + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->cap_funcs = cap_funcs; + + if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { + ddc_init_data.ctx = dc->ctx; + ddc_init_data.link = NULL; + ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; + ddc_init_data.id.enum_id = 0; + ddc_init_data.id.type = OBJECT_TYPE_GENERIC; + pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data); + } else { + pool->base.oem_device = NULL; + } + + dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; + dc->dml2_options.use_native_pstate_optimization = false; + dc->dml2_options.use_native_soc_bb_construction = true; + dc->dml2_options.minimize_dispclk_using_odm = true; + + dc->dml2_options.callbacks.dc = dc; + dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; + dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; + dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; + dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; + dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; + dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; + dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; + dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; + + dc->dml2_options.svp_pstate.callbacks.dc = dc; + dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane; + dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream; + dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params; + dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane; + dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane; + dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream; + dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream; + dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane; + dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream; + dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc; + dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type; + dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type; + dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream; + + dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us; + dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us; + dc->dml2_options.svp_pstate.subvp_pstate_allow_width_us = dc->caps.subvp_pstate_allow_width_us; + dc->dml2_options.svp_pstate.subvp_swath_height_margin_lines = dc->caps.subvp_swath_height_margin_lines; + + dc->dml2_options.svp_pstate.force_disable_subvp = dc->debug.force_disable_subvp; + dc->dml2_options.svp_pstate.force_enable_subvp = dc->debug.force_subvp_mclk_switch; + + dc->dml2_options.mall_cfg.cache_line_size_bytes = dc->caps.cache_line_size; + dc->dml2_options.mall_cfg.cache_num_ways = dc->caps.cache_num_ways; + dc->dml2_options.mall_cfg.max_cab_allocation_bytes = dc->caps.max_cab_allocation_bytes; + dc->dml2_options.mall_cfg.mblk_height_4bpe_pixels = DCN3_2_MBLK_HEIGHT_4BPE; + dc->dml2_options.mall_cfg.mblk_height_8bpe_pixels = DCN3_2_MBLK_HEIGHT_8BPE; + dc->dml2_options.mall_cfg.mblk_size_bytes = DCN3_2_MALL_MBLK_SIZE_BYTES; + dc->dml2_options.mall_cfg.mblk_width_pixels = DCN3_2_MBLK_WIDTH; + + dc->dml2_options.max_segments_per_hubp = 18; + dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE; + + return true; + +create_fail: + + dcn321_resource_destruct(pool); + + return false; +} + +struct resource_pool *dcn321_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc) +{ + struct dcn321_resource_pool *pool = + kzalloc(sizeof(struct dcn321_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dcn321_resource_construct(init_data->num_virtual_links, dc, pool)) + return &pool->base; + + BREAK_TO_DEBUGGER(); + kfree(pool); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.h new file mode 100644 index 0000000000..82cbf009f2 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.h @@ -0,0 +1,45 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DCN321_RESOURCE_H_ +#define _DCN321_RESOURCE_H_ + +#include "core_types.h" + +#define TO_DCN321_RES_POOL(pool)\ + container_of(pool, struct dcn321_resource_pool, base) + +extern struct _vcs_dpi_ip_params_st dcn3_21_ip; +extern struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc; + +struct dcn321_resource_pool { + struct resource_pool base; +}; + +struct resource_pool *dcn321_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc); + +#endif /* _DCN321_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c new file mode 100644 index 0000000000..78c315541f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -0,0 +1,2180 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "dc.h" + +#include "dcn31/dcn31_init.h" +#include "dcn35/dcn35_init.h" + +#include "resource.h" +#include "include/irq_service_interface.h" +#include "dcn35_resource.h" +#include "dml2/dml2_wrapper.h" + +#include "dcn20/dcn20_resource.h" +#include "dcn30/dcn30_resource.h" +#include "dcn31/dcn31_resource.h" +#include "dcn32/dcn32_resource.h" + +#include "dcn10/dcn10_ipp.h" +#include "dcn30/dcn30_hubbub.h" +#include "dcn31/dcn31_hubbub.h" +#include "dcn35/dcn35_hubbub.h" +#include "dcn32/dcn32_mpc.h" +#include "dcn35/dcn35_hubp.h" +#include "irq/dcn35/irq_service_dcn35.h" +#include "dcn35/dcn35_dpp.h" +#include "dcn35/dcn35_optc.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn30/dcn30_hwseq.h" +#include "dce110/dce110_hwseq.h" +#include "dcn35/dcn35_opp.h" +#include "dcn35/dcn35_dsc.h" +#include "dcn30/dcn30_vpg.h" +#include "dcn30/dcn30_afmt.h" +#include "dcn31/dcn31_dio_link_encoder.h" +#include "dcn35/dcn35_dio_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_link_encoder.h" +#include "dcn32/dcn32_hpo_dp_link_encoder.h" +#include "link.h" +#include "dcn31/dcn31_apg.h" +#include "dcn32/dcn32_dio_link_encoder.h" +#include "dcn31/dcn31_vpg.h" +#include "dcn31/dcn31_afmt.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_audio.h" +#include "dce/dce_hwseq.h" +#include "clk_mgr.h" +#include "virtual/virtual_stream_encoder.h" +#include "dce110/dce110_resource.h" +#include "dml/display_mode_vba.h" +#include "dcn35/dcn35_dccg.h" +#include "dcn35/dcn35_pg_cntl.h" +#include "dcn10/dcn10_resource.h" +#include "dcn31/dcn31_panel_cntl.h" +#include "dcn35/dcn35_hwseq.h" +#include "dcn35/dcn35_dio_link_encoder.h" +#include "dml/dcn31/dcn31_fpu.h" /*todo*/ +#include "dml/dcn35/dcn35_fpu.h" +#include "dcn35/dcn35_dwb.h" +#include "dcn35/dcn35_mmhubbub.h" + +#include "dcn/dcn_3_5_0_offset.h" +#include "dcn/dcn_3_5_0_sh_mask.h" +#include "nbio/nbio_7_11_0_offset.h" +#include "mmhub/mmhub_3_3_0_offset.h" +#include "mmhub/mmhub_3_3_0_sh_mask.h" + +#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0 +#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL + +#include "reg_helper.h" +#include "dce/dmub_abm.h" +#include "dce/dmub_psr.h" +#include "dce/dmub_replay.h" +#include "dce/dce_aux.h" +#include "dce/dce_i2c.h" +#include "dml/dcn31/display_mode_vba_31.h" /*temp*/ +#include "vm_helper.h" +#include "dcn20/dcn20_vmid.h" + +#include "dc_state_priv.h" + +#include "link_enc_cfg.h" +#define DC_LOGGER_INIT(logger) + +enum dcn35_clk_src_array_id { + DCN35_CLK_SRC_PLL0, + DCN35_CLK_SRC_PLL1, + DCN35_CLK_SRC_PLL2, + DCN35_CLK_SRC_PLL3, + DCN35_CLK_SRC_PLL4, + DCN35_CLK_SRC_TOTAL +}; + +/* begin ********************* + * macros to expend register list macro defined in HW object header file + */ + +/* DCN */ +/* TODO awful hack. fixup dcn20_dwb.h */ +#undef BASE_INNER +#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg] + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + REG_STRUCT.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define SR_ARR(reg_name, id) \ + REG_STRUCT[id].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name + +#define SR_ARR_INIT(reg_name, id, value) \ + REG_STRUCT[id].reg_name = value + +#define SRI(reg_name, block, id)\ + REG_STRUCT.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRI_ARR(reg_name, block, id)\ + REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SR_ARR_I2C(reg_name, id) \ + REG_STRUCT[id-1].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name + +#define SRI_ARR_I2C(reg_name, block, id)\ + REG_STRUCT[id-1].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRI_ARR_ALPHABET(reg_name, block, index, id)\ + REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRI2(reg_name, block, id)\ + .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define SRI2_ARR(reg_name, block, id)\ + REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \ + reg ## reg_name + +#define SRIR(var_name, reg_name, block, id)\ + .var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII(reg_name, block, id)\ + REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_ARR_2(reg_name, block, id, inst)\ + REG_STRUCT[inst].reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_MPC_RMU(reg_name, block, id)\ + .RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define SRII_DWB(reg_name, temp_name, block, id)\ + REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## temp_name + +#define SF_DWB2(reg_name, block, id, field_name, post_fix) \ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define DCCG_SRII(reg_name, block, id)\ + REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + reg ## block ## id ## _ ## reg_name + +#define VUPDATE_SRII(reg_name, block, id)\ + REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + reg ## reg_name ## _ ## block ## id + +/* NBIO */ +#define NBIO_BASE_INNER(seg) ctx->nbio_reg_offsets[seg] + +#define NBIO_BASE(seg) \ + NBIO_BASE_INNER(seg) + +#define NBIO_SR(reg_name)\ + REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \ + regBIF_BX2_ ## reg_name + +#define NBIO_SR_ARR(reg_name, id)\ + REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX2_ ## reg_name ## _BASE_IDX) + \ + regBIF_BX2_ ## reg_name + +#define bios_regs_init() \ + ( \ + NBIO_SR(BIOS_SCRATCH_3),\ + NBIO_SR(BIOS_SCRATCH_6)\ + ) + +static struct bios_registers bios_regs; + +#define clk_src_regs_init(index, pllid)\ + CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid) + +static struct dce110_clk_src_regs clk_src_regs[5]; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCN3_1_4(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCN3_1_4(_MASK) +}; + +#define abm_regs_init(id)\ + ABM_DCN32_REG_LIST_RI(id) + +static struct dce_abm_registers abm_regs[4]; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCN35(_MASK) +}; + +#define audio_regs_init(id)\ + AUD_COMMON_REG_LIST_RI(id) + +static struct dce_audio_registers audio_regs[7]; + + +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ + AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) + +static const struct dce_audio_shift audio_shift = { + DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +#define vpg_regs_init(id)\ + VPG_DCN31_REG_LIST_RI(id) + +static struct dcn31_vpg_registers vpg_regs[10]; + +static const struct dcn31_vpg_shift vpg_shift = { + DCN31_VPG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_vpg_mask vpg_mask = { + DCN31_VPG_MASK_SH_LIST(_MASK) +}; + +#define afmt_regs_init(id)\ + AFMT_DCN31_REG_LIST_RI(id) + +static struct dcn31_afmt_registers afmt_regs[6]; + +static const struct dcn31_afmt_shift afmt_shift = { + DCN31_AFMT_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_afmt_mask afmt_mask = { + DCN31_AFMT_MASK_SH_LIST(_MASK) +}; + +#define apg_regs_init(id)\ + APG_DCN31_REG_LIST_RI(id) + +static struct dcn31_apg_registers apg_regs[4]; + +static const struct dcn31_apg_shift apg_shift = { + DCN31_APG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_apg_mask apg_mask = { + DCN31_APG_MASK_SH_LIST(_MASK) +}; + +#define stream_enc_regs_init(id)\ + SE_DCN35_REG_LIST_RI(id) + +static struct dcn10_stream_enc_registers stream_enc_regs[5]; + +static const struct dcn10_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dcn10_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCN35(_MASK) +}; + +#define aux_regs_init(id)\ + DCN2_AUX_REG_LIST_RI(id) + +static struct dcn10_link_enc_aux_registers link_enc_aux_regs[5]; + +#define hpd_regs_init(id)\ + HPD_REG_LIST_RI(id) + +static struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[5]; + + +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +#define link_regs_init(id, phyid)\ + ( \ + LE_DCN35_REG_LIST_RI(id), \ + UNIPHY_DCN2_REG_LIST_RI(id, phyid)\ + ) + +static struct dcn10_link_enc_registers link_enc_regs[5]; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN35(__SHIFT), \ + //DPCS_DCN31_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN35(_MASK), \ + //DPCS_DCN31_MASK_SH_LIST(_MASK) +}; + +#define hpo_dp_stream_encoder_reg_init(id)\ + DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id) + +static struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[4]; + +static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) +}; + +#define hpo_dp_link_encoder_reg_init(id)\ + DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id) + /*DCN3_1_RDPCSTX_REG_LIST(0),*/ + /*DCN3_1_RDPCSTX_REG_LIST(1),*/ + /*DCN3_1_RDPCSTX_REG_LIST(2),*/ + /*DCN3_1_RDPCSTX_REG_LIST(3),*/ + +static struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[2]; + +static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { + DCN3_1_HPO_DP_LINK_ENC_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { + DCN3_1_HPO_DP_LINK_ENC_COMMON_MASK_SH_LIST(_MASK) +}; + +#define dpp_regs_init(id)\ + DPP_REG_LIST_DCN35_RI(id) + +static struct dcn3_dpp_registers dpp_regs[4]; + +static const struct dcn35_dpp_shift tf_shift = { + DPP_REG_LIST_SH_MASK_DCN35(__SHIFT) +}; + +static const struct dcn35_dpp_mask tf_mask = { + DPP_REG_LIST_SH_MASK_DCN35(_MASK) +}; + +#define opp_regs_init(id)\ + OPP_REG_LIST_DCN35_RI(id) + +static struct dcn35_opp_registers opp_regs[4]; + +static const struct dcn35_opp_shift opp_shift = { + OPP_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dcn35_opp_mask opp_mask = { + OPP_MASK_SH_LIST_DCN35(_MASK) +}; + +#define aux_engine_regs_init(id)\ + ( \ + AUX_COMMON_REG_LIST0_RI(id), \ + SR_ARR_INIT(AUXN_IMPCAL, id, 0), \ + SR_ARR_INIT(AUXP_IMPCAL, id, 0), \ + SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK) \ + ) + +static struct dce110_aux_registers aux_engine_regs[5]; + +#define dwbc_regs_dcn3_init(id)\ + DWBC_COMMON_REG_LIST_DCN30_RI(id) + +static struct dcn30_dwbc_registers dwbc35_regs[1]; + +static const struct dcn35_dwbc_shift dwbc35_shift = { + DWBC_COMMON_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dcn35_dwbc_mask dwbc35_mask = { + DWBC_COMMON_MASK_SH_LIST_DCN35(_MASK) +}; + +#define mcif_wb_regs_dcn3_init(id)\ + MCIF_WB_COMMON_REG_LIST_DCN3_5_RI(id) + +static struct dcn35_mmhubbub_registers mcif_wb35_regs[1]; + +static const struct dcn35_mmhubbub_shift mcif_wb35_shift = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN3_5(__SHIFT) +}; + +static const struct dcn35_mmhubbub_mask mcif_wb35_mask = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN3_5(_MASK) +}; + +#define dsc_regsDCN35_init(id)\ + DSC_REG_LIST_DCN20_RI(id) + +static struct dcn20_dsc_registers dsc_regs[4]; + +static const struct dcn35_dsc_shift dsc_shift = { + DSC_REG_LIST_SH_MASK_DCN35(__SHIFT) +}; + +static const struct dcn35_dsc_mask dsc_mask = { + DSC_REG_LIST_SH_MASK_DCN35(_MASK) +}; + +static struct dcn30_mpc_registers mpc_regs; + +#define dcn_mpc_regs_init() \ + MPC_REG_LIST_DCN3_2_RI(0),\ + MPC_REG_LIST_DCN3_2_RI(1),\ + MPC_REG_LIST_DCN3_2_RI(2),\ + MPC_REG_LIST_DCN3_2_RI(3),\ + MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\ + MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\ + MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\ + MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\ + MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0) + +static const struct dcn30_mpc_shift mpc_shift = { + MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT) +}; + +static const struct dcn30_mpc_mask mpc_mask = { + MPC_COMMON_MASK_SH_LIST_DCN32(_MASK) +}; + +#define optc_regs_init(id)\ + OPTC_COMMON_REG_LIST_DCN3_5_RI(id) + +static struct dcn_optc_registers optc_regs[4]; + +static const struct dcn_optc_shift optc_shift = { + OPTC_COMMON_MASK_SH_LIST_DCN3_5(__SHIFT) +}; + +static const struct dcn_optc_mask optc_mask = { + OPTC_COMMON_MASK_SH_LIST_DCN3_5(_MASK) +}; + +#define hubp_regs_init(id)\ + HUBP_REG_LIST_DCN30_RI(id) + +static struct dcn_hubp2_registers hubp_regs[4]; + + +static const struct dcn35_hubp2_shift hubp_shift = { + HUBP_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dcn35_hubp2_mask hubp_mask = { + HUBP_MASK_SH_LIST_DCN35(_MASK) +}; + +static struct dcn_hubbub_registers hubbub_reg; + +#define hubbub_reg_init()\ + HUBBUB_REG_LIST_DCN35(0) + +static const struct dcn_hubbub_shift hubbub_shift = { + HUBBUB_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dcn_hubbub_mask hubbub_mask = { + HUBBUB_MASK_SH_LIST_DCN35(_MASK) +}; + +static struct dccg_registers dccg_regs; + +#define dccg_regs_init()\ + DCCG_REG_LIST_DCN35() + +static const struct dccg_shift dccg_shift = { + DCCG_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dccg_mask dccg_mask = { + DCCG_MASK_SH_LIST_DCN35(_MASK) +}; + +static struct pg_cntl_registers pg_cntl_regs; + +#define pg_cntl_dcn35_regs_init() \ + PG_CNTL_REG_LIST_DCN35() + +static const struct pg_cntl_shift pg_cntl_shift = { + PG_CNTL_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct pg_cntl_mask pg_cntl_mask = { + PG_CNTL_MASK_SH_LIST_DCN35(_MASK) +}; + +#define SRII2(reg_name_pre, reg_name_post, id)\ + .reg_name_pre ## _ ## reg_name_post[id] = BASE(reg ## reg_name_pre \ + ## id ## _ ## reg_name_post ## _BASE_IDX) + \ + reg ## reg_name_pre ## id ## _ ## reg_name_post + +static struct dce_hwseq_registers hwseq_reg; + +#define hwseq_reg_init()\ + HWSEQ_DCN35_REG_LIST() + +#define HWSEQ_DCN35_MASK_SH_LIST(mask_sh)\ + HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ + HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ + HWS_SF(, DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, mask_sh), \ + HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN16_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN17_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN18_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN19_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN22_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN23_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN23_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN24_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN24_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN25_PG_CONFIG, DOMAIN_POWER_FORCEON, mask_sh), \ + HWS_SF(, DOMAIN25_PG_CONFIG, DOMAIN_POWER_GATE, mask_sh), \ + HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN22_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN23_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN24_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DOMAIN25_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, mask_sh), \ + HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ + HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ + HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_G_GATE_DIS, mask_sh), \ + HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ + HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \ + HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, DISPCLK_R_DMU_GATE_DIS, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, DISPCLK_G_RBBMIF_GATE_DIS, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, RBBMIF_FGCG_REP_DIS, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, DPREFCLK_ALLOW_DS_CLKSTOP, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, DISPCLK_ALLOW_DS_CLKSTOP, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, DPPCLK_ALLOW_DS_CLKSTOP, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, DTBCLK_ALLOW_DS_CLKSTOP, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, DCFCLK_ALLOW_DS_CLKSTOP, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, DPIACLK_ALLOW_DS_CLKSTOP, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, LONO_FGCG_REP_DIS, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, LONO_DISPCLK_GATE_DISABLE, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, LONO_SOCCLK_GATE_DISABLE, mask_sh),\ + HWS_SF(, DMU_CLK_CNTL, LONO_DMCUBCLK_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKA_FE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKB_FE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKC_FE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKD_FE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKE_FE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKA_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKB_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKC_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKD_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKE_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_ROOT_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_ROOT_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_ROOT_GATE_DISABLE, mask_sh), \ + HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_ROOT_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK0_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK1_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK2_GATE_DISABLE, mask_sh),\ + HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK3_GATE_DISABLE, mask_sh) + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCN35_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCN35_MASK_SH_LIST(_MASK) +}; + +#define vmid_regs_init(id)\ + DCN20_VMID_REG_LIST_RI(id) + +static struct dcn_vmid_registers vmid_regs[16]; + +static const struct dcn20_vmid_shift vmid_shifts = { + DCN20_VMID_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn20_vmid_mask vmid_masks = { + DCN20_VMID_MASK_SH_LIST(_MASK) +}; + +static const struct resource_caps res_cap_dcn35 = { + .num_timing_generator = 4, + .num_opp = 4, + .num_video_plane = 4, + .num_audio = 5, + .num_stream_encoder = 5, + .num_dig_link_enc = 5, + .num_hpo_dp_stream_encoder = 4, + .num_hpo_dp_link_encoder = 2, + .num_pll = 4,/*1 c10 edp, 3xc20 combo PHY*/ + .num_dwb = 1, + .num_ddc = 5, + .num_vmid = 16, + .num_mpc_3dlut = 2, + .num_dsc = 4, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCN_UNIVERSAL, + .per_pixel_alpha = true, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = true, + .fp16 = true, + .p010 = true, + .ayuv = false, + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 16000, + .fp16 = 16000 + }, + + // 6:1 downscaling ratio: 1000/6 = 166.666 + .max_downscale_factor = { + .argb8888 = 250, + .nv12 = 167, + .fp16 = 167 + }, + 64, + 64 +}; + +static const struct dc_debug_options debug_defaults_drv = { + .disable_dmcu = true, + .force_abm_enable = false, + .timing_trace = false, + .clock_trace = true, + .disable_pplib_clock_request = false, + .pipe_split_policy = MPC_SPLIT_AVOID, + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .disable_dpp_power_gate = true, + .disable_hubp_power_gate = true, + .disable_optc_power_gate = true, /*should the same as above two*/ + .disable_hpo_power_gate = true, /*dmubfw force domain25 on*/ + .disable_clock_gate = false, + .disable_dsc_power_gate = true, + .vsr_support = true, + .performance_trace = false, + .max_downscale_src_width = 4096,/*upto true 4k*/ + .disable_pplib_wm_range = false, + .scl_reset_length10 = true, + .sanity_checks = false, + .underflow_assert_delay_us = 0xFFFFFFFF, + .dwb_fi_phase = -1, // -1 = disable, + .dmub_command_table = true, + .pstate_enabled = true, + .use_max_lb = true, + .enable_mem_low_power = { + .bits = { + .vga = false, + .i2c = true, + .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled + .dscl = true, + .cm = true, + .mpc = true, + .optc = true, + .vpg = true, + .afmt = true, + } + }, + .root_clock_optimization = { + .bits = { + .dpp = true, + .dsc = true,/*dscclk and dsc pg*/ + .hdmistream = true, + .hdmichar = true, + .dpstream = true, + .symclk32_se = true, + .symclk32_le = true, + .symclk_fe = true, + .physymclk = true, + .dpiasymclk = true, + } + }, + .seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT, + .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ + .using_dml2 = true, + .support_eDP1_5 = true, + .enable_hpo_pg_support = false, + .enable_legacy_fast_update = true, + .enable_single_display_2to1_odm_policy = false, + .disable_idle_power_optimizations = false, + .dmcub_emulation = false, + .disable_boot_optimizations = false, + .disable_unbounded_requesting = false, + .disable_mem_low_power = false, + //must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions + .enable_double_buffered_dsc_pg_support = true, + .enable_dp_dig_pixel_rate_div_policy = 1, + .disable_z10 = false, + .ignore_pg = true, + .psp_disabled_wa = true, + .ips2_eval_delay_us = 2000, + .ips2_entry_delay_us = 800, + .disable_dmub_reallow_idle = true, + .static_screen_wait_frames = 2, +}; + +static const struct dc_panel_config panel_config_defaults = { + .psr = { + .disable_psr = false, + .disallow_psrsu = false, + .disallow_replay = false, + }, + .ilr = { + .optimize_edp_link_rate = true, + }, +}; + +static void dcn35_dpp_destroy(struct dpp **dpp) +{ + kfree(TO_DCN20_DPP(*dpp)); + *dpp = NULL; +} + +static struct dpp *dcn35_dpp_create(struct dc_context *ctx, uint32_t inst) +{ + struct dcn3_dpp *dpp = kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); + bool success = (dpp != NULL); + + if (!success) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT dpp_regs + dpp_regs_init(0), + dpp_regs_init(1), + dpp_regs_init(2), + dpp_regs_init(3); + + success = dpp35_construct(dpp, ctx, inst, &dpp_regs[inst], &tf_shift, + &tf_mask); + if (success) { + dpp35_set_fgcg( + dpp, + ctx->dc->debug.enable_fine_grain_clock_gating.bits.dpp); + return &dpp->base; + } + + BREAK_TO_DEBUGGER(); + kfree(dpp); + return NULL; +} + +static struct output_pixel_processor *dcn35_opp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_opp *opp = + kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); + + if (!opp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + +#undef REG_STRUCT +#define REG_STRUCT opp_regs + opp_regs_init(0), + opp_regs_init(1), + opp_regs_init(2), + opp_regs_init(3); + + dcn35_opp_construct(opp, ctx, inst, + &opp_regs[inst], &opp_shift, &opp_mask); + + dcn35_opp_set_fgcg(opp, ctx->dc->debug.enable_fine_grain_clock_gating.bits.opp); + + return &opp->base; +} + +static struct dce_aux *dcn31_aux_engine_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT aux_engine_regs + aux_engine_regs_init(0), + aux_engine_regs_init(1), + aux_engine_regs_init(2), + aux_engine_regs_init(3), + aux_engine_regs_init(4); + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} + +#define i2c_inst_regs_init(id)\ + I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id) + +static struct dce_i2c_registers i2c_hw_regs[5]; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCN35(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCN35(_MASK) +}; + +/* ========================================================== */ + +/* + * DPIA index | Preferred Encoder | Host Router + * 0 | C | 0 + * 1 | First Available | 0 + * 2 | D | 1 + * 3 | First Available | 1 + */ +/* ========================================================== */ +static const enum engine_id dpia_to_preferred_enc_id_table[] = { + ENGINE_ID_DIGC, + ENGINE_ID_DIGC, + ENGINE_ID_DIGD, + ENGINE_ID_DIGD +}; + +static enum engine_id dcn35_get_preferred_eng_id_dpia(unsigned int dpia_index) +{ + return dpia_to_preferred_enc_id_table[dpia_index]; +} + +static struct dce_i2c_hw *dcn31_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT i2c_hw_regs + i2c_inst_regs_init(1), + i2c_inst_regs_init(2), + i2c_inst_regs_init(3), + i2c_inst_regs_init(4), + i2c_inst_regs_init(5); + + dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} +static struct mpc *dcn35_mpc_create( + struct dc_context *ctx, + int num_mpcc, + int num_rmu) +{ + struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), GFP_KERNEL); + + if (!mpc30) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT mpc_regs + dcn_mpc_regs_init(); + + dcn32_mpc_construct(mpc30, ctx, + &mpc_regs, + &mpc_shift, + &mpc_mask, + num_mpcc, + num_rmu); + + return &mpc30->base; +} + +static struct hubbub *dcn35_hubbub_create(struct dc_context *ctx) +{ + int i; + + struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), + GFP_KERNEL); + + if (!hubbub3) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT hubbub_reg + hubbub_reg_init(); + +#undef REG_STRUCT +#define REG_STRUCT vmid_regs + vmid_regs_init(0), + vmid_regs_init(1), + vmid_regs_init(2), + vmid_regs_init(3), + vmid_regs_init(4), + vmid_regs_init(5), + vmid_regs_init(6), + vmid_regs_init(7), + vmid_regs_init(8), + vmid_regs_init(9), + vmid_regs_init(10), + vmid_regs_init(11), + vmid_regs_init(12), + vmid_regs_init(13), + vmid_regs_init(14), + vmid_regs_init(15); + + hubbub35_construct(hubbub3, ctx, + &hubbub_reg, + &hubbub_shift, + &hubbub_mask, + 384,/*ctx->dc->dml.ip.det_buffer_size_kbytes,*/ + 8, /*ctx->dc->dml.ip.pixel_chunk_size_kbytes,*/ + 1792 /*ctx->dc->dml.ip.config_return_buffer_size_in_kbytes*/); + + + for (i = 0; i < res_cap_dcn35.num_vmid; i++) { + struct dcn20_vmid *vmid = &hubbub3->vmid[i]; + + vmid->ctx = ctx; + + vmid->regs = &vmid_regs[i]; + vmid->shifts = &vmid_shifts; + vmid->masks = &vmid_masks; + } + + return &hubbub3->base; +} + +static struct timing_generator *dcn35_timing_generator_create( + struct dc_context *ctx, + uint32_t instance) +{ + struct optc *tgn10 = + kzalloc(sizeof(struct optc), GFP_KERNEL); + + if (!tgn10) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT optc_regs + optc_regs_init(0), + optc_regs_init(1), + optc_regs_init(2), + optc_regs_init(3); + + tgn10->base.inst = instance; + tgn10->base.ctx = ctx; + + tgn10->tg_regs = &optc_regs[instance]; + tgn10->tg_shift = &optc_shift; + tgn10->tg_mask = &optc_mask; + + dcn35_timing_generator_init(tgn10); + + return &tgn10->base; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .fec_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + +static struct link_encoder *dcn35_link_encoder_create( + struct dc_context *ctx, + const struct encoder_init_data *enc_init_data) +{ + struct dcn20_link_encoder *enc20 = + kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + + if (!enc20) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT link_enc_aux_regs + aux_regs_init(0), + aux_regs_init(1), + aux_regs_init(2), + aux_regs_init(3), + aux_regs_init(4); + +#undef REG_STRUCT +#define REG_STRUCT link_enc_hpd_regs + hpd_regs_init(0), + hpd_regs_init(1), + hpd_regs_init(2), + hpd_regs_init(3), + hpd_regs_init(4); + +#undef REG_STRUCT +#define REG_STRUCT link_enc_regs + link_regs_init(0, A), + link_regs_init(1, B), + link_regs_init(2, C), + link_regs_init(3, D), + link_regs_init(4, E); + + dcn35_link_encoder_construct(enc20, + enc_init_data, + &link_enc_feature, + &link_enc_regs[enc_init_data->transmitter], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], + &le_shift, + &le_mask); + + return &enc20->enc10.base; +} + +/* Create a minimal link encoder object not associated with a particular + * physical connector. + * resource_funcs.link_enc_create_minimal + */ +static struct link_encoder *dcn31_link_enc_create_minimal( + struct dc_context *ctx, enum engine_id eng_id) +{ + struct dcn20_link_encoder *enc20; + + if ((eng_id - ENGINE_ID_DIGA) > ctx->dc->res_pool->res_cap->num_dig_link_enc) + return NULL; + + enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + if (!enc20) + return NULL; + + dcn31_link_encoder_construct_minimal( + enc20, + ctx, + &link_enc_feature, + &link_enc_regs[eng_id - ENGINE_ID_DIGA], + eng_id); + + return &enc20->enc10.base; +} + +static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dcn31_panel_cntl *panel_cntl = + kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dcn31_panel_cntl_construct(panel_cntl, init_data); + + return &panel_cntl->base; +} + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX), + FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); + +} + +static struct audio *dcn31_create_audio( + struct dc_context *ctx, unsigned int inst) +{ + +#undef REG_STRUCT +#define REG_STRUCT audio_regs + audio_regs_init(0), + audio_regs_init(1), + audio_regs_init(2), + audio_regs_init(3), + audio_regs_init(4); + audio_regs_init(5); + audio_regs_init(6); + + return dce_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + +static struct vpg *dcn31_vpg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL); + + if (!vpg31) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT vpg_regs + vpg_regs_init(0), + vpg_regs_init(1), + vpg_regs_init(2), + vpg_regs_init(3), + vpg_regs_init(4), + vpg_regs_init(5), + vpg_regs_init(6), + vpg_regs_init(7), + vpg_regs_init(8), + vpg_regs_init(9); + + vpg31_construct(vpg31, ctx, inst, + &vpg_regs[inst], + &vpg_shift, + &vpg_mask); + + return &vpg31->base; +} + +static struct afmt *dcn31_afmt_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL); + + if (!afmt31) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT afmt_regs + afmt_regs_init(0), + afmt_regs_init(1), + afmt_regs_init(2), + afmt_regs_init(3), + afmt_regs_init(4), + afmt_regs_init(5); + + afmt31_construct(afmt31, ctx, inst, + &afmt_regs[inst], + &afmt_shift, + &afmt_mask); + + // Light sleep by default, no need to power down here + + return &afmt31->base; +} + +static struct apg *dcn31_apg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); + + if (!apg31) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT apg_regs + apg_regs_init(0), + apg_regs_init(1), + apg_regs_init(2), + apg_regs_init(3); + + apg31_construct(apg31, ctx, inst, + &apg_regs[inst], + &apg_shift, + &apg_mask); + + return &apg31->base; +} + +static struct stream_encoder *dcn35_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn10_stream_encoder *enc1; + struct vpg *vpg; + struct afmt *afmt; + int vpg_inst; + int afmt_inst; + + /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ + if (eng_id <= ENGINE_ID_DIGF) { + vpg_inst = eng_id; + afmt_inst = eng_id; + } else + return NULL; + + enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); + vpg = dcn31_vpg_create(ctx, vpg_inst); + afmt = dcn31_afmt_create(ctx, afmt_inst); + + if (!enc1 || !vpg || !afmt) { + kfree(enc1); + kfree(vpg); + kfree(afmt); + return NULL; + } + +#undef REG_STRUCT +#define REG_STRUCT stream_enc_regs + stream_enc_regs_init(0), + stream_enc_regs_init(1), + stream_enc_regs_init(2), + stream_enc_regs_init(3), + stream_enc_regs_init(4); + + dcn35_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, + eng_id, vpg, afmt, + &stream_enc_regs[eng_id], + &se_shift, &se_mask); + + return &enc1->base; +} + +static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; + struct vpg *vpg; + struct apg *apg; + uint32_t hpo_dp_inst; + uint32_t vpg_inst; + uint32_t apg_inst; + + ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); + hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; + + /* Mapping of VPG register blocks to HPO DP block instance: + * VPG[6] -> HPO_DP[0] + * VPG[7] -> HPO_DP[1] + * VPG[8] -> HPO_DP[2] + * VPG[9] -> HPO_DP[3] + */ + vpg_inst = hpo_dp_inst + 6; + + /* Mapping of APG register blocks to HPO DP block instance: + * APG[0] -> HPO_DP[0] + * APG[1] -> HPO_DP[1] + * APG[2] -> HPO_DP[2] + * APG[3] -> HPO_DP[3] + */ + apg_inst = hpo_dp_inst; + + /* allocate HPO stream encoder and create VPG sub-block */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); + vpg = dcn31_vpg_create(ctx, vpg_inst); + apg = dcn31_apg_create(ctx, apg_inst); + + if (!hpo_dp_enc31 || !vpg || !apg) { + kfree(hpo_dp_enc31); + kfree(vpg); + kfree(apg); + return NULL; + } + +#undef REG_STRUCT +#define REG_STRUCT hpo_dp_stream_enc_regs + hpo_dp_stream_encoder_reg_init(0), + hpo_dp_stream_encoder_reg_init(1), + hpo_dp_stream_encoder_reg_init(2), + hpo_dp_stream_encoder_reg_init(3); + + dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, + hpo_dp_inst, eng_id, vpg, apg, + &hpo_dp_stream_enc_regs[hpo_dp_inst], + &hpo_dp_se_shift, &hpo_dp_se_mask); + + return &hpo_dp_enc31->base; +} + +static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( + uint8_t inst, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; + + /* allocate HPO link encoder */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + +#undef REG_STRUCT +#define REG_STRUCT hpo_dp_link_enc_regs + hpo_dp_link_encoder_reg_init(0), + hpo_dp_link_encoder_reg_init(1); + + hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, + &hpo_dp_link_enc_regs[inst], + &hpo_dp_le_shift, &hpo_dp_le_mask); + + return &hpo_dp_enc31->base; +} + +static struct dce_hwseq *dcn35_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + +#undef REG_STRUCT +#define REG_STRUCT hwseq_reg + hwseq_reg_init(); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + } + return hws; +} +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = dcn31_create_audio, + .create_stream_encoder = dcn35_stream_encoder_create, + .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, + .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, + .create_hwseq = dcn35_hwseq_create, +}; + +static void dcn35_resource_destruct(struct dcn35_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) { + if (pool->base.stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); + pool->base.stream_enc[i]->vpg = NULL; + } + if (pool->base.stream_enc[i]->afmt != NULL) { + kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); + pool->base.stream_enc[i]->afmt = NULL; + } + kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); + pool->base.stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { + if (pool->base.hpo_dp_stream_enc[i] != NULL) { + if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); + pool->base.hpo_dp_stream_enc[i]->vpg = NULL; + } + if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { + kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); + pool->base.hpo_dp_stream_enc[i]->apg = NULL; + } + kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); + pool->base.hpo_dp_stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { + if (pool->base.hpo_dp_link_enc[i] != NULL) { + kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); + pool->base.hpo_dp_link_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + if (pool->base.dscs[i] != NULL) + dcn20_dsc_destroy(&pool->base.dscs[i]); + } + + if (pool->base.mpc != NULL) { + kfree(TO_DCN20_MPC(pool->base.mpc)); + pool->base.mpc = NULL; + } + if (pool->base.hubbub != NULL) { + kfree(pool->base.hubbub); + pool->base.hubbub = NULL; + } + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.dpps[i] != NULL) + dcn35_dpp_destroy(&pool->base.dpps[i]); + + if (pool->base.ipps[i] != NULL) + pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.hubps[i] != NULL) { + kfree(TO_DCN20_HUBP(pool->base.hubps[i])); + pool->base.hubps[i] = NULL; + } + + if (pool->base.irqs != NULL) { + dal_irq_service_destroy(&pool->base.irqs); + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + if (pool->base.engines[i] != NULL) + dce110_engine_destroy(&pool->base.engines[i]); + if (pool->base.hw_i2cs[i] != NULL) { + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + } + if (pool->base.sw_i2cs[i] != NULL) { + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + if (pool->base.opps[i] != NULL) + pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.timing_generators[i] != NULL) { + kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_dwb; i++) { + if (pool->base.dwbc[i] != NULL) { + kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); + pool->base.dwbc[i] = NULL; + } + if (pool->base.mcif_wb[i] != NULL) { + kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); + pool->base.mcif_wb[i] = NULL; + } + } + + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i]) + dce_aud_destroy(&pool->base.audios[i]); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) { + dcn20_clock_source_destroy(&pool->base.clock_sources[i]); + pool->base.clock_sources[i] = NULL; + } + } + + for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { + if (pool->base.mpc_lut[i] != NULL) { + dc_3dlut_func_release(pool->base.mpc_lut[i]); + pool->base.mpc_lut[i] = NULL; + } + if (pool->base.mpc_shaper[i] != NULL) { + dc_transfer_func_release(pool->base.mpc_shaper[i]); + pool->base.mpc_shaper[i] = NULL; + } + } + + if (pool->base.dp_clock_source != NULL) { + dcn20_clock_source_destroy(&pool->base.dp_clock_source); + pool->base.dp_clock_source = NULL; + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.multiple_abms[i] != NULL) + dce_abm_destroy(&pool->base.multiple_abms[i]); + } + + if (pool->base.psr != NULL) + dmub_psr_destroy(&pool->base.psr); + + if (pool->base.replay != NULL) + dmub_replay_destroy(&pool->base.replay); + + if (pool->base.pg_cntl != NULL) + dcn_pg_cntl_destroy(&pool->base.pg_cntl); + + if (pool->base.dccg != NULL) + dcn_dccg_destroy(&pool->base.dccg); +} + +static struct hubp *dcn35_hubp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn20_hubp *hubp2 = + kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); + + if (!hubp2) + return NULL; + +#undef REG_STRUCT +#define REG_STRUCT hubp_regs + hubp_regs_init(0), + hubp_regs_init(1), + hubp_regs_init(2), + hubp_regs_init(3); + + if (hubp35_construct(hubp2, ctx, inst, + &hubp_regs[inst], &hubp_shift, &hubp_mask)) + return &hubp2->base; + + BREAK_TO_DEBUGGER(); + kfree(hubp2); + return NULL; +} + +static void dcn35_dwbc_init(struct dcn30_dwbc *dwbc30, struct dc_context *ctx) +{ + dcn35_dwbc_set_fgcg( + dwbc30, ctx->dc->debug.enable_fine_grain_clock_gating.bits.dwb); +} + +static bool dcn35_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), + GFP_KERNEL); + + if (!dwbc30) { + dm_error("DC: failed to create dwbc30!\n"); + return false; + } + +#undef REG_STRUCT +#define REG_STRUCT dwbc35_regs + dwbc_regs_dcn3_init(0); + + dcn35_dwbc_construct(dwbc30, ctx, + &dwbc35_regs[i], + &dwbc35_shift, + &dwbc35_mask, + i); + + pool->dwbc[i] = &dwbc30->base; + + dcn35_dwbc_init(dwbc30, ctx); + } + return true; +} + +static void dcn35_mmhubbub_init(struct dcn30_mmhubbub *mcif_wb30, + struct dc_context *ctx) +{ + dcn35_mmhubbub_set_fgcg( + mcif_wb30, + ctx->dc->debug.enable_fine_grain_clock_gating.bits.mmhubbub); +} + +static bool dcn35_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), + GFP_KERNEL); + + if (!mcif_wb30) { + dm_error("DC: failed to create mcif_wb30!\n"); + return false; + } + +#undef REG_STRUCT +#define REG_STRUCT mcif_wb35_regs + mcif_wb_regs_dcn3_init(0); + + dcn35_mmhubbub_construct(mcif_wb30, ctx, + &mcif_wb35_regs[i], + &mcif_wb35_shift, + &mcif_wb35_mask, + i); + + dcn35_mmhubbub_init(mcif_wb30, ctx); + + pool->mcif_wb[i] = &mcif_wb30->base; + } + return true; +} + +static struct display_stream_compressor *dcn35_dsc_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_dsc *dsc = + kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); + + if (!dsc) { + BREAK_TO_DEBUGGER(); + return NULL; + } + +#undef REG_STRUCT +#define REG_STRUCT dsc_regs + dsc_regsDCN35_init(0), + dsc_regsDCN35_init(1), + dsc_regsDCN35_init(2), + dsc_regsDCN35_init(3); + + dsc35_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); + dsc35_set_fgcg(dsc, + ctx->dc->debug.enable_fine_grain_clock_gating.bits.dsc); + return &dsc->base; +} + +static void dcn35_destroy_resource_pool(struct resource_pool **pool) +{ + struct dcn35_resource_pool *dcn35_pool = TO_DCN35_RES_POOL(*pool); + + dcn35_resource_destruct(dcn35_pool); + kfree(dcn35_pool); + *pool = NULL; +} + +static struct clock_source *dcn35_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dcn31_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + BREAK_TO_DEBUGGER(); + return NULL; +} + +static struct dc_cap_funcs cap_funcs = { + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap +}; + +static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config) +{ + *panel_config = panel_config_defaults; +} + + +static bool dcn35_validate_bandwidth(struct dc *dc, + struct dc_state *context, + bool fast_validate) +{ + bool out = false; + + out = dml2_validate(dc, context, fast_validate); + + if (fast_validate) + return out; + + DC_FP_START(); + dcn35_decide_zstate_support(dc, context); + DC_FP_END(); + + return out; +} + + +static struct resource_funcs dcn35_res_pool_funcs = { + .destroy = dcn35_destroy_resource_pool, + .link_enc_create = dcn35_link_encoder_create, + .link_enc_create_minimal = dcn31_link_enc_create_minimal, + .link_encs_assign = link_enc_cfg_link_encs_assign, + .link_enc_unassign = link_enc_cfg_link_enc_unassign, + .panel_cntl_create = dcn31_panel_cntl_create, + .validate_bandwidth = dcn35_validate_bandwidth, + .calculate_wm_and_dlg = NULL, + .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, + .populate_dml_pipes = dcn35_populate_dml_pipes_from_context_fpu, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, + .release_pipe = dcn20_release_pipe, + .add_stream_to_ctx = dcn30_add_stream_to_ctx, + .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, + .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, + .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, + .set_mcif_arb_params = dcn30_set_mcif_arb_params, + .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, + .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, + .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, + .update_bw_bounding_box = dcn35_update_bw_bounding_box_fpu, + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, + .get_panel_config_defaults = dcn35_get_panel_config_defaults, + .get_preferred_eng_id_dpia = dcn35_get_preferred_eng_id_dpia, +}; + +static bool dcn35_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dcn35_resource_pool *pool) +{ + int i; + struct dc_context *ctx = dc->ctx; + struct irq_service_init_data init_data; + +#undef REG_STRUCT +#define REG_STRUCT bios_regs + bios_regs_init(); + +#undef REG_STRUCT +#define REG_STRUCT clk_src_regs + clk_src_regs_init(0, A), + clk_src_regs_init(1, B), + clk_src_regs_init(2, C), + clk_src_regs_init(3, D), + clk_src_regs_init(4, E); + +#undef REG_STRUCT +#define REG_STRUCT abm_regs + abm_regs_init(0), + abm_regs_init(1), + abm_regs_init(2), + abm_regs_init(3); + +#undef REG_STRUCT +#define REG_STRUCT dccg_regs + dccg_regs_init(); + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap_dcn35; + + pool->base.funcs = &dcn35_res_pool_funcs; + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->base.pipe_count = pool->base.res_cap->num_timing_generator; + pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; + dc->caps.max_downscale_ratio = 600; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.i2c_speed_in_khz_hdcp = 100; + dc->caps.max_cursor_size = 256; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dmdata_alloc_size = 2048; + dc->caps.max_slave_planes = 2; + dc->caps.max_slave_yuv_planes = 2; + dc->caps.max_slave_rgb_planes = 2; + dc->caps.post_blend_color_processing = true; + dc->caps.force_dp_tps4_for_cp2520 = true; + if (dc->config.forceHBR2CP2520) + dc->caps.force_dp_tps4_for_cp2520 = false; + dc->caps.dp_hpo = true; + dc->caps.dp_hdmi21_pcon_support = true; + + dc->caps.edp_dsc_support = true; + dc->caps.extended_aux_timeout_support = true; + dc->caps.dmcub_support = true; + dc->caps.is_apu = true; + dc->caps.seamless_odm = true; + + dc->caps.zstate_support = true; + dc->caps.ips_support = true; + dc->caps.max_v_total = (1 << 15) - 1; + + /* Color pipeline capabilities */ + dc->caps.color.dpp.dcn_arch = 1; + dc->caps.color.dpp.input_lut_shared = 0; + dc->caps.color.dpp.icsc = 1; + dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr + dc->caps.color.dpp.dgam_rom_caps.srgb = 1; + dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; + dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; + dc->caps.color.dpp.dgam_rom_caps.pq = 1; + dc->caps.color.dpp.dgam_rom_caps.hlg = 1; + dc->caps.color.dpp.post_csc = 1; + dc->caps.color.dpp.gamma_corr = 1; + dc->caps.color.dpp.dgam_rom_for_yuv = 0; + + dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 + // no OGAM ROM on DCN301 + dc->caps.color.dpp.ogam_rom_caps.srgb = 0; + dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; + dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.ogam_rom_caps.pq = 0; + dc->caps.color.dpp.ogam_rom_caps.hlg = 0; + dc->caps.color.dpp.ocsc = 0; + + dc->caps.color.mpc.gamut_remap = 1; + dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //2 + dc->caps.color.mpc.ogam_ram = 1; + dc->caps.color.mpc.ogam_rom_caps.srgb = 0; + dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; + dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.mpc.ogam_rom_caps.pq = 0; + dc->caps.color.mpc.ogam_rom_caps.hlg = 0; + dc->caps.color.mpc.ocsc = 1; + + /* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order + * to provide some margin. + * It's expected for furture ASIC to have equal or higher value, in order to + * have determinstic power improvement from generate to genration. + * (i.e., we should not expect new ASIC generation with lower vmin rate) + */ + dc->caps.max_disp_clock_khz_at_vmin = 650000; + + /* Use pipe context based otg sync logic */ + dc->config.use_pipe_ctx_sync_logic = true; + + /* read VBIOS LTTPR caps */ + { + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + /* interop bit is implicit */ + { + dc->caps.vbios_lttpr_aware = true; + } + } + + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) + dc->debug = debug_defaults_drv; + + // Init the vm_helper + if (dc->vm_helper) + vm_helper_init(dc->vm_helper, 16); + + /************************************************* + * Create resources * + *************************************************/ + + /* Clock Sources for Pixel Clock*/ + pool->base.clock_sources[DCN35_CLK_SRC_PLL0] = + dcn35_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->base.clock_sources[DCN35_CLK_SRC_PLL1] = + dcn35_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + pool->base.clock_sources[DCN35_CLK_SRC_PLL2] = + dcn35_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs[2], false); + pool->base.clock_sources[DCN35_CLK_SRC_PLL3] = + dcn35_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + pool->base.clock_sources[DCN35_CLK_SRC_PLL4] = + dcn35_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL4, + &clk_src_regs[4], false); + + pool->base.clk_src_count = DCN35_CLK_SRC_TOTAL; + + /* todo: not reuse phy_pll registers */ + pool->base.dp_clock_source = + dcn35_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, + &clk_src_regs[0], true); + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + /*temp till dml2 fully work without dml1*/ + dml_init_instance(&dc->dml, &dcn3_5_soc, &dcn3_5_ip, DML_PROJECT_DCN31); + + /* TODO: DCCG */ + pool->base.dccg = dccg35_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); + if (pool->base.dccg == NULL) { + dm_error("DC: failed to create dccg!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + +#undef REG_STRUCT +#define REG_STRUCT pg_cntl_regs + pg_cntl_dcn35_regs_init(); + + pool->base.pg_cntl = pg_cntl35_create(ctx, &pg_cntl_regs, &pg_cntl_shift, &pg_cntl_mask); + if (pool->base.pg_cntl == NULL) { + dm_error("DC: failed to create power gate control!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* TODO: IRQ */ + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dcn35_create(&init_data); + if (!pool->base.irqs) + goto create_fail; + + /* HUBBUB */ + pool->base.hubbub = dcn35_hubbub_create(ctx); + if (pool->base.hubbub == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create hubbub!\n"); + goto create_fail; + } + + /* HUBPs, DPPs, OPPs and TGs */ + for (i = 0; i < pool->base.pipe_count; i++) { + pool->base.hubps[i] = dcn35_hubp_create(ctx, i); + if (pool->base.hubps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create hubps!\n"); + goto create_fail; + } + + pool->base.dpps[i] = dcn35_dpp_create(ctx, i); + if (pool->base.dpps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create dpps!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + pool->base.opps[i] = dcn35_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + pool->base.timing_generators[i] = dcn35_timing_generator_create( + ctx, i); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + } + pool->base.timing_generator_count = i; + + /* PSR */ + pool->base.psr = dmub_psr_create(ctx); + if (pool->base.psr == NULL) { + dm_error("DC: failed to create psr obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* Replay */ + pool->base.replay = dmub_replay_create(ctx); + if (pool->base.replay == NULL) { + dm_error("DC: failed to create replay obj!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* ABM */ + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + pool->base.multiple_abms[i] = dmub_abm_create(ctx, + &abm_regs[i], + &abm_shift, + &abm_mask); + if (pool->base.multiple_abms[i] == NULL) { + dm_error("DC: failed to create abm for pipe %d!\n", i); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* MPC and DSC */ + pool->base.mpc = dcn35_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut); + if (pool->base.mpc == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mpc!\n"); + goto create_fail; + } + + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { + pool->base.dscs[i] = dcn35_dsc_create(ctx, i); + if (pool->base.dscs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create display stream compressor %d!\n", i); + goto create_fail; + } + } + + /* DWB and MMHUBBUB */ + if (!dcn35_dwbc_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create dwbc!\n"); + goto create_fail; + } + + if (!dcn35_mmhubbub_create(ctx, &pool->base)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mcif_wb!\n"); + goto create_fail; + } + + /* AUX and I2C */ + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dcn31_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create aux engine!!\n"); + goto create_fail; + } + pool->base.hw_i2cs[i] = dcn31_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC:failed to create hw i2c!!\n"); + goto create_fail; + } + pool->base.sw_i2cs[i] = NULL; + } + + /* DCN3.5 has 6 DPIA */ + pool->base.usb4_dpia_count = 4; + if (dc->debug.dpia_debug.bits.disable_dpia) + pool->base.usb4_dpia_count = 0; + + /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto create_fail; + + /* HW Sequencer and Plane caps */ + dcn35_hw_sequencer_construct(dc); + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->cap_funcs = cap_funcs; + + dc->dcn_ip->max_num_dpp = pool->base.pipe_count; + + dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; + dc->dml2_options.use_native_pstate_optimization = true; + dc->dml2_options.use_native_soc_bb_construction = true; + dc->dml2_options.minimize_dispclk_using_odm = false; + if (dc->config.EnableMinDispClkODM) + dc->dml2_options.minimize_dispclk_using_odm = true; + dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm; + + dc->dml2_options.callbacks.dc = dc; + dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; + dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; + dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; + dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; + dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; + dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; + dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; + dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; + dc->dml2_options.max_segments_per_hubp = 24; + + dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/ + + if (dc->config.sdpif_request_limit_words_per_umc == 0) + dc->config.sdpif_request_limit_words_per_umc = 16;/*todo*/ + + return true; + +create_fail: + + dcn35_resource_destruct(pool); + + return false; +} + +struct resource_pool *dcn35_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc) +{ + struct dcn35_resource_pool *pool = + kzalloc(sizeof(struct dcn35_resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dcn35_resource_construct(init_data->num_virtual_links, dc, pool)) + return &pool->base; + + BREAK_TO_DEBUGGER(); + kfree(pool); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h new file mode 100644 index 0000000000..a51c4a9eaa --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h @@ -0,0 +1,311 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DCN35_RESOURCE_H_ +#define _DCN35_RESOURCE_H_ + +#include "core_types.h" + +#define DCN3_5_VMIN_DISPCLK_HZ 717000000 +#define TO_DCN35_RES_POOL(pool)\ + container_of(pool, struct dcn35_resource_pool, base) + +extern struct _vcs_dpi_ip_params_st dcn3_5_ip; +extern struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc; + +struct dcn35_resource_pool { + struct resource_pool base; +}; + +struct resource_pool *dcn35_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc); + +/* Defs for runtime init of registers */ + +#define OPP_REG_LIST_DCN20_RI(id) \ + OPP_REG_LIST_DCN10_RI(id), \ + OPP_DPG_REG_LIST_RI(id), \ + SRI_ARR(FMT_422_CONTROL, FMT, id), \ + SRI_ARR(OPPBUF_CONTROL1, OPPBUF, id) + +#define OPP_REG_LIST_DCN35_RI(id) \ + OPP_REG_LIST_DCN20_RI(id), \ + SRI2_ARR(OPP_TOP_CLK_CONTROL, OPP, id) + +#define VPG_DCN31_REG_LIST_RI(id) \ + SRI_ARR(VPG_GENERIC_STATUS, VPG, id), \ + SRI_ARR(VPG_GENERIC_PACKET_ACCESS_CTRL, VPG, id), \ + SRI_ARR(VPG_GENERIC_PACKET_DATA, VPG, id), \ + SRI_ARR(VPG_GSP_FRAME_UPDATE_CTRL, VPG, id), \ + SRI_ARR(VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG, id), \ + SRI_ARR(VPG_MEM_PWR, VPG, id) + +#define AFMT_DCN31_REG_LIST_RI(id) \ + SRI_ARR(AFMT_INFOFRAME_CONTROL0, AFMT, id), \ + SRI_ARR(AFMT_VBI_PACKET_CONTROL, AFMT, id), \ + SRI_ARR(AFMT_AUDIO_PACKET_CONTROL, AFMT, id), \ + SRI_ARR(AFMT_AUDIO_PACKET_CONTROL2, AFMT, id), \ + SRI_ARR(AFMT_AUDIO_SRC_CONTROL, AFMT, id), \ + SRI_ARR(AFMT_60958_0, AFMT, id), \ + SRI_ARR(AFMT_60958_1, AFMT, id), \ + SRI_ARR(AFMT_60958_2, AFMT, id), \ + SRI_ARR(AFMT_MEM_PWR, AFMT, id) + +/* Stream encoder */ +#define SE_DCN35_REG_LIST_RI(id) \ + SRI_ARR(AFMT_CNTL, DIG, id), \ + SRI_ARR(DIG_FE_CNTL, DIG, id), \ + SRI_ARR(HDMI_CONTROL, DIG, id), \ + SRI_ARR(HDMI_DB_CONTROL, DIG, id), \ + SRI_ARR(HDMI_GC, DIG, id), \ + SRI_ARR(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \ + SRI_ARR(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \ + SRI_ARR(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \ + SRI_ARR(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \ + SRI_ARR(HDMI_GENERIC_PACKET_CONTROL4, DIG, id), \ + SRI_ARR(HDMI_GENERIC_PACKET_CONTROL5, DIG, id), \ + SRI_ARR(HDMI_GENERIC_PACKET_CONTROL6, DIG, id), \ + SRI_ARR(HDMI_GENERIC_PACKET_CONTROL7, DIG, id), \ + SRI_ARR(HDMI_GENERIC_PACKET_CONTROL8, DIG, id), \ + SRI_ARR(HDMI_GENERIC_PACKET_CONTROL9, DIG, id), \ + SRI_ARR(HDMI_GENERIC_PACKET_CONTROL10, DIG, id), \ + SRI_ARR(HDMI_INFOFRAME_CONTROL0, DIG, id), \ + SRI_ARR(HDMI_INFOFRAME_CONTROL1, DIG, id), \ + SRI_ARR(HDMI_VBI_PACKET_CONTROL, DIG, id), \ + SRI_ARR(HDMI_AUDIO_PACKET_CONTROL, DIG, id),\ + SRI_ARR(HDMI_ACR_PACKET_CONTROL, DIG, id),\ + SRI_ARR(HDMI_ACR_32_0, DIG, id),\ + SRI_ARR(HDMI_ACR_32_1, DIG, id),\ + SRI_ARR(HDMI_ACR_44_0, DIG, id),\ + SRI_ARR(HDMI_ACR_44_1, DIG, id),\ + SRI_ARR(HDMI_ACR_48_0, DIG, id),\ + SRI_ARR(HDMI_ACR_48_1, DIG, id),\ + SRI_ARR(DP_DB_CNTL, DP, id), \ + SRI_ARR(DP_MSA_MISC, DP, id), \ + SRI_ARR(DP_MSA_VBID_MISC, DP, id), \ + SRI_ARR(DP_MSA_COLORIMETRY, DP, id), \ + SRI_ARR(DP_MSA_TIMING_PARAM1, DP, id), \ + SRI_ARR(DP_MSA_TIMING_PARAM2, DP, id), \ + SRI_ARR(DP_MSA_TIMING_PARAM3, DP, id), \ + SRI_ARR(DP_MSA_TIMING_PARAM4, DP, id), \ + SRI_ARR(DP_MSE_RATE_CNTL, DP, id), \ + SRI_ARR(DP_MSE_RATE_UPDATE, DP, id), \ + SRI_ARR(DP_PIXEL_FORMAT, DP, id), \ + SRI_ARR(DP_SEC_CNTL, DP, id), \ + SRI_ARR(DP_SEC_CNTL1, DP, id), \ + SRI_ARR(DP_SEC_CNTL2, DP, id), \ + SRI_ARR(DP_SEC_CNTL5, DP, id), \ + SRI_ARR(DP_SEC_CNTL6, DP, id), \ + SRI_ARR(DP_STEER_FIFO, DP, id), \ + SRI_ARR(DP_VID_M, DP, id), \ + SRI_ARR(DP_VID_N, DP, id), \ + SRI_ARR(DP_VID_STREAM_CNTL, DP, id), \ + SRI_ARR(DP_VID_TIMING, DP, id), \ + SRI_ARR(DP_SEC_AUD_N, DP, id), \ + SRI_ARR(DP_SEC_TIMESTAMP, DP, id), \ + SRI_ARR(DP_DSC_CNTL, DP, id), \ + SRI_ARR(DP_SEC_METADATA_TRANSMISSION, DP, id), \ + SRI_ARR(HDMI_METADATA_PACKET_CONTROL, DIG, id), \ + SRI_ARR(DP_SEC_FRAMING4, DP, id), \ + SRI_ARR(DP_GSP11_CNTL, DP, id), \ + SRI_ARR(DME_CONTROL, DME, id),\ + SRI_ARR(DP_SEC_METADATA_TRANSMISSION, DP, id), \ + SRI_ARR(HDMI_METADATA_PACKET_CONTROL, DIG, id), \ + SRI_ARR(DIG_FE_CNTL, DIG, id), \ + SRI_ARR(DIG_FE_EN_CNTL, DIG, id), \ + SRI_ARR(DIG_FE_CLK_CNTL, DIG, id), \ + SRI_ARR(DIG_CLOCK_PATTERN, DIG, id), \ + SRI_ARR(DIG_FIFO_CTRL0, DIG, id), \ + SRI_ARR(STREAM_MAPPER_CONTROL, DIG, id) + +#define LE_DCN35_REG_LIST_RI(id)\ + LE_DCN3_REG_LIST_RI(id),\ + SRI_ARR(DP_DPHY_INTERNAL_CTRL, DP, id), \ + SR_ARR(DIO_LINKA_CNTL, id), \ + SR_ARR(DIO_LINKB_CNTL, id), \ + SR_ARR(DIO_LINKC_CNTL, id), \ + SR_ARR(DIO_LINKD_CNTL, id), \ + SR_ARR(DIO_LINKE_CNTL, id), \ + SR_ARR(DIO_LINKF_CNTL, id),\ + SRI_ARR(DIG_BE_CLK_CNTL, DIG, id),\ + SR_ARR(DIO_CLK_CNTL, id) + +#define MCIF_WB_COMMON_REG_LIST_DCN3_5_RI(inst) \ + MCIF_WB_COMMON_REG_LIST_DCN32_RI(inst), \ + SRI2_ARR(MMHUBBUB_CLOCK_CNTL, MMHUBBUB, inst) + +#define HWSEQ_DCN35_REG_LIST()\ + SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ + SR(DCHUBBUB_ARB_HOSTVM_CNTL), \ + SR(DIO_MEM_PWR_CTRL), \ + SR(ODM_MEM_PWR_CTRL3), \ + SR(MMHUBBUB_MEM_PWR_CNTL), \ + SR(DCCG_GATE_DISABLE_CNTL), \ + SR(DCCG_GATE_DISABLE_CNTL2), \ + SR(DCCG_GATE_DISABLE_CNTL4), \ + SR(DCCG_GATE_DISABLE_CNTL5), \ + SR(DCFCLK_CNTL),\ + SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ + SRII(PIXEL_RATE_CNTL, OTG, 0), \ + SRII(PIXEL_RATE_CNTL, OTG, 1),\ + SRII(PIXEL_RATE_CNTL, OTG, 2),\ + SRII(PIXEL_RATE_CNTL, OTG, 3),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 0),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 1),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 2),\ + SRII(PHYPLL_PIXEL_RATE_CNTL, OTG, 3),\ + SR(MICROSECOND_TIME_BASE_DIV), \ + SR(MILLISECOND_TIME_BASE_DIV), \ + SR(DISPCLK_FREQ_CHANGE_CNTL), \ + SR(RBBMIF_TIMEOUT_DIS), \ + SR(RBBMIF_TIMEOUT_DIS_2), \ + SR(DCHUBBUB_CRC_CTRL), \ + SR(DPP_TOP0_DPP_CRC_CTRL), \ + SR(DPP_TOP0_DPP_CRC_VAL_B_A), \ + SR(DPP_TOP0_DPP_CRC_VAL_R_G), \ + SR(MPC_CRC_CTRL), \ + SR(MPC_CRC_RESULT_GB), \ + SR(MPC_CRC_RESULT_C), \ + SR(MPC_CRC_RESULT_AR), \ + SR(DOMAIN0_PG_CONFIG), \ + SR(DOMAIN1_PG_CONFIG), \ + SR(DOMAIN2_PG_CONFIG), \ + SR(DOMAIN3_PG_CONFIG), \ + SR(DOMAIN16_PG_CONFIG), \ + SR(DOMAIN17_PG_CONFIG), \ + SR(DOMAIN18_PG_CONFIG), \ + SR(DOMAIN19_PG_CONFIG), \ + SR(DOMAIN0_PG_STATUS), \ + SR(DOMAIN1_PG_STATUS), \ + SR(DOMAIN2_PG_STATUS), \ + SR(DOMAIN3_PG_STATUS), \ + SR(DOMAIN16_PG_STATUS), \ + SR(DOMAIN17_PG_STATUS), \ + SR(DOMAIN18_PG_STATUS), \ + SR(DOMAIN19_PG_STATUS), \ + SR(DC_IP_REQUEST_CNTL), \ + SR(AZALIA_AUDIO_DTO), \ + SR(AZALIA_CONTROLLER_CLOCK_GATING), \ + SR(HPO_TOP_HW_CONTROL),\ + SR(DMU_CLK_CNTL) + +/* OPTC */ +#define OPTC_COMMON_REG_LIST_DCN3_5_RI(inst) \ + SRI_ARR(OTG_VSTARTUP_PARAM, OTG, inst),\ + SRI_ARR(OTG_VUPDATE_PARAM, OTG, inst),\ + SRI_ARR(OTG_VREADY_PARAM, OTG, inst),\ + SRI_ARR(OTG_MASTER_UPDATE_LOCK, OTG, inst),\ + SRI_ARR(OTG_GLOBAL_CONTROL0, OTG, inst),\ + SRI_ARR(OTG_GLOBAL_CONTROL1, OTG, inst),\ + SRI_ARR(OTG_GLOBAL_CONTROL2, OTG, inst),\ + SRI_ARR(OTG_GLOBAL_CONTROL4, OTG, inst),\ + SRI_ARR(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst),\ + SRI_ARR(OTG_H_TOTAL, OTG, inst),\ + SRI_ARR(OTG_H_BLANK_START_END, OTG, inst),\ + SRI_ARR(OTG_H_SYNC_A, OTG, inst),\ + SRI_ARR(OTG_H_SYNC_A_CNTL, OTG, inst),\ + SRI_ARR(OTG_H_TIMING_CNTL, OTG, inst),\ + SRI_ARR(OTG_V_TOTAL, OTG, inst),\ + SRI_ARR(OTG_V_BLANK_START_END, OTG, inst),\ + SRI_ARR(OTG_V_SYNC_A, OTG, inst),\ + SRI_ARR(OTG_V_SYNC_A_CNTL, OTG, inst),\ + SRI_ARR(OTG_CONTROL, OTG, inst),\ + SRI_ARR(OTG_STEREO_CONTROL, OTG, inst),\ + SRI_ARR(OTG_3D_STRUCTURE_CONTROL, OTG, inst),\ + SRI_ARR(OTG_STEREO_STATUS, OTG, inst),\ + SRI_ARR(OTG_V_TOTAL_MAX, OTG, inst),\ + SRI_ARR(OTG_V_TOTAL_MIN, OTG, inst),\ + SRI_ARR(OTG_V_TOTAL_CONTROL, OTG, inst),\ + SRI_ARR(OTG_TRIGA_CNTL, OTG, inst),\ + SRI_ARR(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\ + SRI_ARR(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\ + SRI_ARR(OTG_STATUS_FRAME_COUNT, OTG, inst),\ + SRI_ARR(OTG_STATUS, OTG, inst),\ + SRI_ARR(OTG_STATUS_POSITION, OTG, inst),\ + SRI_ARR(OTG_NOM_VERT_POSITION, OTG, inst),\ + SRI_ARR(OTG_M_CONST_DTO0, OTG, inst),\ + SRI_ARR(OTG_M_CONST_DTO1, OTG, inst),\ + SRI_ARR(OTG_CLOCK_CONTROL, OTG, inst),\ + SRI_ARR(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\ + SRI_ARR(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\ + SRI_ARR(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst),\ + SRI_ARR(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst),\ + SRI_ARR(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\ + SRI_ARR(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\ + SRI_ARR(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\ + SRI_ARR(OPTC_DATA_SOURCE_SELECT, ODM, inst),\ + SRI_ARR(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\ + SRI_ARR(CONTROL, VTG, inst),\ + SRI_ARR(OTG_VERT_SYNC_CONTROL, OTG, inst),\ + SRI_ARR(OTG_GSL_CONTROL, OTG, inst),\ + SRI_ARR(OTG_CRC_CNTL, OTG, inst),\ + SRI_ARR(OTG_CRC0_DATA_RG, OTG, inst),\ + SRI_ARR(OTG_CRC0_DATA_B, OTG, inst),\ + SRI_ARR(OTG_CRC1_DATA_RG, OTG, inst),\ + SRI_ARR(OTG_CRC1_DATA_B, OTG, inst),\ + SRI_ARR(OTG_CRC2_DATA_RG, OTG, inst),\ + SRI_ARR(OTG_CRC2_DATA_B, OTG, inst),\ + SRI_ARR(OTG_CRC3_DATA_RG, OTG, inst),\ + SRI_ARR(OTG_CRC3_DATA_B, OTG, inst),\ + SRI_ARR(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\ + SRI_ARR(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\ + SRI_ARR(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\ + SRI_ARR(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst),\ + SRI_ARR(OTG_CRC1_WINDOWA_X_CONTROL, OTG, inst),\ + SRI_ARR(OTG_CRC1_WINDOWA_Y_CONTROL, OTG, inst),\ + SRI_ARR(OTG_CRC1_WINDOWB_X_CONTROL, OTG, inst),\ + SRI_ARR(OTG_CRC1_WINDOWB_Y_CONTROL, OTG, inst),\ + SRI_ARR(OTG_CRC0_WINDOWA_X_CONTROL_READBACK, OTG, inst),\ + SRI_ARR(OTG_CRC0_WINDOWA_Y_CONTROL_READBACK, OTG, inst),\ + SRI_ARR(OTG_CRC0_WINDOWB_X_CONTROL_READBACK, OTG, inst),\ + SRI_ARR(OTG_CRC0_WINDOWB_Y_CONTROL_READBACK, OTG, inst),\ + SRI_ARR(OTG_CRC1_WINDOWA_X_CONTROL_READBACK, OTG, inst),\ + SRI_ARR(OTG_CRC1_WINDOWA_Y_CONTROL_READBACK, OTG, inst),\ + SRI_ARR(OTG_CRC1_WINDOWB_X_CONTROL_READBACK, OTG, inst),\ + SRI_ARR(OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG, inst),\ + SR_ARR(GSL_SOURCE_SELECT, inst),\ + SRI_ARR(OTG_TRIGA_MANUAL_TRIG, OTG, inst),\ + SRI_ARR(OTG_GLOBAL_CONTROL1, OTG, inst),\ + SRI_ARR(OTG_GLOBAL_CONTROL2, OTG, inst),\ + SRI_ARR(OTG_GSL_WINDOW_X, OTG, inst),\ + SRI_ARR(OTG_GSL_WINDOW_Y, OTG, inst),\ + SRI_ARR(OTG_VUPDATE_KEEPOUT, OTG, inst),\ + SRI_ARR(OTG_DSC_START_POSITION, OTG, inst),\ + SRI_ARR(OTG_DRR_TRIGGER_WINDOW, OTG, inst),\ + SRI_ARR(OTG_DRR_V_TOTAL_CHANGE, OTG, inst),\ + SRI_ARR(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\ + SRI_ARR(OPTC_BYTES_PER_PIXEL, ODM, inst),\ + SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst),\ + SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst),\ + SRI_ARR(OTG_DRR_CONTROL, OTG, inst),\ + SRI2_ARR(OPTC_CLOCK_CONTROL, OPTC, inst) + +/* DPP */ +#define DPP_REG_LIST_DCN35_RI(id)\ + DPP_REG_LIST_DCN30_COMMON_RI(id) + +#endif /* _DCN35_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index d1a4ed6f59..c78c9224ab 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -86,6 +86,7 @@ enum dmub_status { DMUB_STATUS_TIMEOUT, DMUB_STATUS_INVALID, DMUB_STATUS_HW_FAILURE, + DMUB_STATUS_POWER_STATE_D3 }; /* enum dmub_asic - dmub asic identifier */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 3cea96a364..bb1f69a54c 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -185,8 +185,7 @@ union abm_flags { unsigned int disable_abm_requested : 1; /** - * @disable_abm_immediately: Indicates if driver has requested ABM to be disabled - * immediately. + * @disable_abm_immediately: Indicates if driver has requested ABM to be disabled immediately. */ unsigned int disable_abm_immediately : 1; @@ -654,7 +653,7 @@ union dmub_fw_boot_options { uint32_t gpint_scratch8: 1; /* 1 if GPINT is in scratch8*/ uint32_t usb4_cm_version: 1; /**< 1 CM support */ uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */ - uint32_t usb4_dpia_bw_alloc_supported: 1; /* 1 if USB4 dpia BW allocation supported */ + uint32_t reserved0: 1; uint32_t disable_clk_ds: 1; /* 1 if disallow dispclk_ds and dppclk_ds*/ uint32_t disable_timeout_recovery : 1; /* 1 if timeout recovery should be disabled */ uint32_t ips_pg_disable: 1; /* 1 to disable ONO domains power gating*/ @@ -818,18 +817,61 @@ enum dmub_gpint_command { * RETURN: Lower 32-bit mask. */ DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK = 101, + /** - * DESC: Updates the trace buffer lower 32-bit mask. + * DESC: Updates the trace buffer mask bit0~bit15. * ARGS: The new mask * RETURN: Lower 32-bit mask. */ DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD0 = 102, + /** - * DESC: Updates the trace buffer mask bi0~bit15. + * DESC: Updates the trace buffer mask bit16~bit31. * ARGS: The new mask * RETURN: Lower 32-bit mask. */ DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1 = 103, + + /** + * DESC: Updates the trace buffer mask bit32~bit47. + * ARGS: The new mask + * RETURN: Lower 32-bit mask. + */ + DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD2 = 114, + + /** + * DESC: Updates the trace buffer mask bit48~bit63. + * ARGS: The new mask + * RETURN: Lower 32-bit mask. + */ + DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD3 = 115, + + /** + * DESC: Read the trace buffer mask bi0~bit15. + */ + DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD0 = 116, + + /** + * DESC: Read the trace buffer mask bit16~bit31. + */ + DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD1 = 117, + + /** + * DESC: Read the trace buffer mask bi32~bit47. + */ + DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD2 = 118, + + /** + * DESC: Updates the trace buffer mask bit32~bit63. + */ + DMUB_GPINT__GET_TRACE_BUFFER_MASK_WORD3 = 119, + + /** + * DESC: Enable measurements for various task duration + * ARGS: 0 - Disable measurement + * 1 - Enable measurement + */ + DMUB_GPINT__TRACE_DMUB_WAKE_ACTIVITY = 123, }; /** @@ -1303,6 +1345,10 @@ enum dmub_cmd_cab_type { * Fit surfaces in CAB (i.e. CAB enable) */ DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB = 2, + /** + * Do not fit surfaces in CAB (i.e. no CAB) + */ + DMUB_CMD__CAB_DCN_SS_NOT_FIT_IN_CAB = 3, }; /** @@ -2786,6 +2832,7 @@ struct dmub_rb_cmd_psr_set_power_opt { #define REPLAY_RESIDENCY_MODE_MASK (0x1 << REPLAY_RESIDENCY_MODE_SHIFT) # define REPLAY_RESIDENCY_MODE_PHY (0x0 << REPLAY_RESIDENCY_MODE_SHIFT) # define REPLAY_RESIDENCY_MODE_ALPM (0x1 << REPLAY_RESIDENCY_MODE_SHIFT) +# define REPLAY_RESIDENCY_MODE_IPS 0x10 #define REPLAY_RESIDENCY_ENABLE_MASK (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT) # define REPLAY_RESIDENCY_DISABLE (0x0 << REPLAY_RESIDENCY_ENABLE_SHIFT) @@ -2840,6 +2887,18 @@ enum dmub_cmd_replay_type { * Set power opt and coasting vtotal. */ DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL = 4, + /** + * Set disabled iiming sync. + */ + DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED = 5, + /** + * Set Residency Frameupdate Timer. + */ + DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER = 6, + /** + * Set pseudo vtotal + */ + DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL = 7, }; /** @@ -3002,6 +3061,46 @@ struct dmub_cmd_replay_set_power_opt_data { uint32_t power_opt; }; +/** + * Data passed from driver to FW in a DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED command. + */ +struct dmub_cmd_replay_set_timing_sync_data { + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * REPLAY set_timing_sync + */ + uint8_t timing_sync_supported; + /** + * Explicit padding to 4 byte boundary. + */ + uint8_t pad[2]; +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command. + */ +struct dmub_cmd_replay_set_pseudo_vtotal { + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * Source Vtotal that Replay + IPS + ABM full screen video src vtotal + */ + uint16_t vtotal; + /** + * Explicit padding to 4 byte boundary. + */ + uint8_t pad; +}; + /** * Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command. */ @@ -3034,6 +3133,14 @@ struct dmub_cmd_replay_set_coasting_vtotal_data { * Currently the support is only for 0 or 1 */ uint8_t panel_inst; + /** + * 16-bit value dicated by driver that indicates the coasting vtotal high byte part. + */ + uint16_t coasting_vtotal_high; + /** + * Explicit padding to 4 byte boundary. + */ + uint8_t pad[2]; }; /** @@ -3068,6 +3175,91 @@ struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal { struct dmub_cmd_replay_set_coasting_vtotal_data replay_set_coasting_vtotal_data; }; +/** + * Definition of a DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED command. + */ +struct dmub_rb_cmd_replay_set_timing_sync { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Definition of DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED command. + */ + struct dmub_cmd_replay_set_timing_sync_data replay_set_timing_sync_data; +}; + +/** + * Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command. + */ +struct dmub_rb_cmd_replay_set_pseudo_vtotal { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command. + */ + struct dmub_cmd_replay_set_pseudo_vtotal data; +}; + +/** + * Data passed from driver to FW in DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command. + */ +struct dmub_cmd_replay_frameupdate_timer_data { + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * Replay Frameupdate Timer Enable or not + */ + uint8_t enable; + /** + * REPLAY force reflash frame update number + */ + uint16_t frameupdate_count; +}; +/** + * Definition of DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER + */ +struct dmub_rb_cmd_replay_set_frameupdate_timer { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command. + */ + struct dmub_cmd_replay_frameupdate_timer_data data; +}; + +/** + * Definition union of replay command set + */ +union dmub_replay_cmd_set { + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * Definition of DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED command data. + */ + struct dmub_cmd_replay_set_timing_sync_data sync_data; + /** + * Definition of DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command data. + */ + struct dmub_cmd_replay_frameupdate_timer_data timer_data; + /** + * Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command data. + */ + struct dmub_cmd_replay_set_pseudo_vtotal pseudo_vtotal_data; +}; + /** * Set of HW components that can be locked. * @@ -4211,6 +4403,16 @@ union dmub_rb_cmd { * Definition of a DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL command. */ struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal replay_set_power_opt_and_coasting_vtotal; + + struct dmub_rb_cmd_replay_set_timing_sync replay_set_timing_sync; + /** + * Definition of a DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command. + */ + struct dmub_rb_cmd_replay_set_frameupdate_timer replay_set_frameupdate_timer; + /** + * Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command. + */ + struct dmub_rb_cmd_replay_set_pseudo_vtotal replay_set_pseudo_vtotal; }; /** diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 59d4e64845..9ad7388053 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -64,7 +64,7 @@ /* Default scratch mem size. */ -#define DMUB_SCRATCH_MEM_SIZE (256) +#define DMUB_SCRATCH_MEM_SIZE (1024) /* Number of windows in use. */ #define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL) @@ -768,7 +768,7 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, return DMUB_STATUS_INVALID; if (dmub->power_state != DMUB_POWER_STATE_D0) - return DMUB_STATUS_INVALID; + return DMUB_STATUS_POWER_STATE_D3; if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity || dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) { @@ -789,7 +789,7 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) return DMUB_STATUS_INVALID; if (dmub->power_state != DMUB_POWER_STATE_D0) - return DMUB_STATUS_INVALID; + return DMUB_STATUS_POWER_STATE_D3; /** * Read back all the queued commands to ensure that they've diff --git a/drivers/gpu/drm/amd/display/include/hdcp_msg_types.h b/drivers/gpu/drm/amd/display/include/hdcp_msg_types.h index 42229b4eff..eced9ad91f 100644 --- a/drivers/gpu/drm/amd/display/include/hdcp_msg_types.h +++ b/drivers/gpu/drm/amd/display/include/hdcp_msg_types.h @@ -69,6 +69,11 @@ enum hdcp_message_id { HDCP_MESSAGE_ID_READ_RXSTATUS, HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE, + /* PS175 chip */ + + HDCP_MESSAGE_ID_WRITE_PS175_CMD, + HDCP_MESSAGE_ID_READ_PS175_RSP, + HDCP_MESSAGE_ID_MAX }; diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index ccecddafeb..3955b7e4b2 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -81,6 +81,7 @@ fail_alloc_context: void mod_freesync_destroy(struct mod_freesync *mod_freesync) { struct core_freesync *core_freesync = NULL; + if (mod_freesync == NULL) return; core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); @@ -278,9 +279,8 @@ static void apply_below_the_range(struct core_freesync *core_freesync, } } else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) { /* Enter Below the Range */ - if (!in_out_vrr->btr.btr_active) { + if (!in_out_vrr->btr.btr_active) in_out_vrr->btr.btr_active = true; - } } /* BTR set to "not active" so disengage */ @@ -693,10 +693,12 @@ static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf, if (app_tf != TRANSFER_FUNC_UNKNOWN) { infopacket->valid = true; - if (app_tf != TRANSFER_FUNC_PQ2084) { + if (app_tf == TRANSFER_FUNC_PQ2084) + infopacket->sb[9] |= 0x20; // PB9 = [Bit 5 = PQ EOTF Active] + else { infopacket->sb[6] |= 0x08; // PB6 = [Bit 3 = Native Color Active] if (app_tf == TRANSFER_FUNC_GAMMA_22) - infopacket->sb[9] |= 0x04; // PB6 = [Bit 2 = Gamma 2.2 EOTF Active] + infopacket->sb[9] |= 0x04; // PB9 = [Bit 2 = Gamma 2.2 EOTF Active] } } } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c index 1ddb4f5eac..182e7532dd 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -63,6 +63,7 @@ static inline enum mod_hdcp_status check_hdcp_capable_dp(struct mod_hdcp *hdcp) static inline enum mod_hdcp_status check_r0p_available_dp(struct mod_hdcp *hdcp) { enum mod_hdcp_status status; + if (is_dp_hdcp(hdcp)) { status = (hdcp->auth.msg.hdcp1.bstatus & DP_BSTATUS_R0_PRIME_READY) ? @@ -131,9 +132,8 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp) static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) { /* Avoid device count == 0 to do authentication */ - if (0 == get_device_count(hdcp)) { + if (get_device_count(hdcp) == 0) return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE; - } /* Some MST display may choose to report the internal panel as an HDCP RX. * To update this condition with 1(because the immediate repeater's internal diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c index 91c22b96eb..733f22bed0 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -208,9 +208,8 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp) static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) { /* Avoid device count == 0 to do authentication */ - if (0 == get_device_count(hdcp)) { + if (get_device_count(hdcp) == 0) return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE; - } /* Some MST display may choose to report the internal panel as an HDCP RX. */ /* To update this condition with 1(because the immediate repeater's internal */ @@ -689,9 +688,8 @@ static enum mod_hdcp_status validate_stream_ready(struct mod_hdcp *hdcp, if (is_hdmi_dvi_sl_hdcp(hdcp)) { if (!process_rxstatus(hdcp, event_ctx, input, &status)) goto out; - if (event_ctx->rx_id_list_ready) { + if (event_ctx->rx_id_list_ready) goto out; - } } if (is_hdmi_dvi_sl_hdcp(hdcp)) if (!mod_hdcp_execute_and_set(check_stream_ready_available, diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h index c62df3bcc7..1d83c1b9da 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h @@ -86,10 +86,12 @@ #define HDCP_CPIRQ_TRACE(hdcp) \ HDCP_LOG_FSM(hdcp, "[Link %d] --> CPIRQ", hdcp->config.index) #define HDCP_EVENT_TRACE(hdcp, event) \ - if (event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) \ - HDCP_TIMEOUT_TRACE(hdcp); \ - else if (event == MOD_HDCP_EVENT_CPIRQ) \ - HDCP_CPIRQ_TRACE(hdcp) + do { \ + if (event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) \ + HDCP_TIMEOUT_TRACE(hdcp); \ + else if (event == MOD_HDCP_EVENT_CPIRQ) \ + HDCP_CPIRQ_TRACE(hdcp); \ + } while (0) /* TODO: find some way to tell if logging is off to save time */ #define HDCP_DDC_READ_TRACE(hdcp, msg_name, msg, msg_size) do { \ mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index ff930a71e4..7c9805705f 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -443,7 +443,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE) - continue; + continue; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -929,7 +929,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE) - continue; + continue; hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index; hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id; diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h index 5b71bc96b9..7844ea9165 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h @@ -98,9 +98,9 @@ enum ta_dtm_encoder_type { * This enum defines software value for dio_output_type */ typedef enum { - TA_DTM_DIO_OUTPUT_TYPE__INVALID, - TA_DTM_DIO_OUTPUT_TYPE__DIRECT, - TA_DTM_DIO_OUTPUT_TYPE__DPIA + TA_DTM_DIO_OUTPUT_TYPE__INVALID, + TA_DTM_DIO_OUTPUT_TYPE__DIRECT, + TA_DTM_DIO_OUTPUT_TYPE__DPIA } ta_dtm_dio_output_type; struct ta_dtm_topology_update_input_v3 { @@ -237,11 +237,11 @@ enum ta_hdcp2_hdcp2_msg_id_max_size { #define TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES 127 #define TA_HDCP__HDCP1_V_PRIME_SIZE 20 #define TA_HDCP__HDCP2_TX_BUF_MAX_SIZE \ - TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM + 6 + (TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_NO_STORED_KM + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_STORED_KM + 6) // 64 bits boundaries #define TA_HDCP__HDCP2_RX_BUF_MAX_SIZE \ - TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO + 4 + (TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT + TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_RECEIVER_INFO + 4) enum ta_hdcp_status { TA_HDCP_STATUS__SUCCESS = 0x00, diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h index afe1f6cce5..cc3dc9b589 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h @@ -1,31 +1,3 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - - - - /* * Copyright 2016 Advanced Micro Devices, Inc. * diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h index 5960dd760e..8ce6c22e5d 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h @@ -57,10 +57,10 @@ void mod_stats_update_event(struct mod_stats *mod_stats, unsigned int length); void mod_stats_update_flip(struct mod_stats *mod_stats, - unsigned long timestamp_in_ns); + unsigned long long timestamp_in_ns); void mod_stats_update_vupdate(struct mod_stats *mod_stats, - unsigned long timestamp_in_ns); + unsigned long long timestamp_in_ns); void mod_stats_update_freesync(struct mod_stats *mod_stats, unsigned int v_total_min, diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 1675314a3f..2a3698fd2d 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -31,7 +31,7 @@ #define DIV_ROUNDUP(a, b) (((a)+((b)/2))/(b)) #define bswap16_based_on_endian(big_endian, value) \ - (big_endian) ? cpu_to_be16(value) : cpu_to_le16(value) + ((big_endian) ? cpu_to_be16(value) : cpu_to_le16(value)) /* Possible Min Reduction config from least aggressive to most aggressive * 0 1 2 3 4 5 6 7 8 9 10 11 12 @@ -973,6 +973,39 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link, return true; } +void set_replay_coasting_vtotal(struct dc_link *link, + enum replay_coasting_vtotal_type type, + uint32_t vtotal) +{ + link->replay_settings.coasting_vtotal_table[type] = vtotal; +} + +void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal) +{ + link->replay_settings.abm_with_ips_on_full_screen_video_pseudo_vtotal = vtotal; +} + +void calculate_replay_link_off_frame_count(struct dc_link *link, + uint16_t vtotal, uint16_t htotal) +{ + uint8_t max_link_off_frame_count = 0; + uint16_t max_deviation_line = 0, pixel_deviation_per_line = 0; + + max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line; + pixel_deviation_per_line = link->dpcd_caps.pr_info.pixel_deviation_per_line; + + if (htotal != 0 && vtotal != 0) + max_link_off_frame_count = htotal * max_deviation_line / (pixel_deviation_per_line * vtotal); + else + ASSERT(0); + + link->replay_settings.link_off_frame_count_level = + max_link_off_frame_count >= PR_LINK_OFF_FRAME_COUNT_BEST ? PR_LINK_OFF_FRAME_COUNT_BEST : + max_link_off_frame_count >= PR_LINK_OFF_FRAME_COUNT_GOOD ? PR_LINK_OFF_FRAME_COUNT_GOOD : + PR_LINK_OFF_FRAME_COUNT_FAIL; + +} + bool fill_custom_backlight_caps(unsigned int config_no, struct dm_acpi_atif_backlight_caps *caps) { unsigned int data_points_size; diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h index d9e0d67d67..ff7e6f3cd6 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -54,6 +54,12 @@ bool dmub_init_abm_config(struct resource_pool *res_pool, unsigned int inst); void init_replay_config(struct dc_link *link, struct replay_config *pr_config); +void set_replay_coasting_vtotal(struct dc_link *link, + enum replay_coasting_vtotal_type type, + uint32_t vtotal); +void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal); +void calculate_replay_link_off_frame_count(struct dc_link *link, + uint16_t vtotal, uint16_t htotal); bool is_psr_su_specific_panel(struct dc_link *link); void mod_power_calc_psr_configs(struct psr_config *psr_config, diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 579977f6ad..df2c7ffe19 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -255,6 +255,10 @@ enum DC_DEBUG_MASK { DC_FORCE_SUBVP_MCLK_SWITCH = 0x20, DC_DISABLE_MPO = 0x40, DC_ENABLE_DPIA_TRACE = 0x80, + DC_ENABLE_DML2 = 0x100, + DC_DISABLE_PSR_SU = 0x200, + DC_DISABLE_REPLAY = 0x400, + DC_DISABLE_IPS = 0x800, }; enum amd_dpm_forced_level; diff --git a/drivers/gpu/drm/amd/include/amdgpu_reg_state.h b/drivers/gpu/drm/amd/include/amdgpu_reg_state.h new file mode 100644 index 0000000000..335980e2af --- /dev/null +++ b/drivers/gpu/drm/amd/include/amdgpu_reg_state.h @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_REG_STATE_H__ +#define __AMDGPU_REG_STATE_H__ + +enum amdgpu_reg_state { + AMDGPU_REG_STATE_TYPE_INVALID = 0, + AMDGPU_REG_STATE_TYPE_XGMI = 1, + AMDGPU_REG_STATE_TYPE_WAFL = 2, + AMDGPU_REG_STATE_TYPE_PCIE = 3, + AMDGPU_REG_STATE_TYPE_USR = 4, + AMDGPU_REG_STATE_TYPE_USR_1 = 5 +}; + +enum amdgpu_sysfs_reg_offset { + AMDGPU_SYS_REG_STATE_XGMI = 0x0000, + AMDGPU_SYS_REG_STATE_WAFL = 0x1000, + AMDGPU_SYS_REG_STATE_PCIE = 0x2000, + AMDGPU_SYS_REG_STATE_USR = 0x3000, + AMDGPU_SYS_REG_STATE_USR_1 = 0x4000, + AMDGPU_SYS_REG_STATE_END = 0x5000, +}; + +struct amdgpu_reg_state_header { + uint16_t structure_size; + uint8_t format_revision; + uint8_t content_revision; + uint8_t state_type; + uint8_t num_instances; + uint16_t pad; +}; + +enum amdgpu_reg_inst_state { + AMDGPU_INST_S_OK, + AMDGPU_INST_S_EDISABLED, + AMDGPU_INST_S_EACCESS, +}; + +struct amdgpu_smn_reg_data { + uint64_t addr; + uint32_t value; + uint32_t pad; +}; + +struct amdgpu_reg_inst_header { + uint16_t instance; + uint16_t state; + uint16_t num_smn_regs; + uint16_t pad; +}; + + +struct amdgpu_regs_xgmi_v1_0 { + struct amdgpu_reg_inst_header inst_header; + + struct amdgpu_smn_reg_data smn_reg_values[]; +}; + +struct amdgpu_reg_state_xgmi_v1_0 { + /* common_header.state_type must be AMDGPU_REG_STATE_TYPE_XGMI */ + struct amdgpu_reg_state_header common_header; + + struct amdgpu_regs_xgmi_v1_0 xgmi_state_regs[]; +}; + +struct amdgpu_regs_wafl_v1_0 { + struct amdgpu_reg_inst_header inst_header; + + struct amdgpu_smn_reg_data smn_reg_values[]; +}; + +struct amdgpu_reg_state_wafl_v1_0 { + /* common_header.state_type must be AMDGPU_REG_STATE_TYPE_WAFL */ + struct amdgpu_reg_state_header common_header; + + struct amdgpu_regs_wafl_v1_0 wafl_state_regs[]; +}; + +struct amdgpu_regs_pcie_v1_0 { + struct amdgpu_reg_inst_header inst_header; + + uint16_t device_status; + uint16_t link_status; + uint32_t sub_bus_number_latency; + uint32_t pcie_corr_err_status; + uint32_t pcie_uncorr_err_status; + + struct amdgpu_smn_reg_data smn_reg_values[]; +}; + +struct amdgpu_reg_state_pcie_v1_0 { + /* common_header.state_type must be AMDGPU_REG_STATE_TYPE_PCIE */ + struct amdgpu_reg_state_header common_header; + + struct amdgpu_regs_pcie_v1_0 pci_state_regs[]; +}; + +struct amdgpu_regs_usr_v1_0 { + struct amdgpu_reg_inst_header inst_header; + + struct amdgpu_smn_reg_data smn_reg_values[]; +}; + +struct amdgpu_reg_state_usr_v1_0 { + /* common_header.state_type must be AMDGPU_REG_STATE_TYPE_USR */ + struct amdgpu_reg_state_header common_header; + + struct amdgpu_regs_usr_v1_0 usr_state_regs[]; +}; + +static inline size_t amdgpu_reginst_size(uint16_t num_inst, size_t inst_size, + uint16_t num_regs) +{ + return num_inst * + (inst_size + num_regs * sizeof(struct amdgpu_smn_reg_data)); +} + +#define amdgpu_asic_get_reg_state_supported(adev) \ + (((adev)->asic_funcs && (adev)->asic_funcs->get_reg_state) ? 1 : 0) + +#define amdgpu_asic_get_reg_state(adev, state, buf, size) \ + ((adev)->asic_funcs->get_reg_state ? \ + (adev)->asic_funcs->get_reg_state((adev), (state), (buf), \ + (size)) : \ + 0) + + +int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev); +void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev); + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h index b646648792..fca72e2ec9 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h @@ -6220,12 +6220,20 @@ #define DCCG_GATE_DISABLE_CNTL4__PHYD_REFCLK_ROOT_GATE_DISABLE__SHIFT 0x3 #define DCCG_GATE_DISABLE_CNTL4__PHYE_REFCLK_ROOT_GATE_DISABLE__SHIFT 0x4 #define DCCG_GATE_DISABLE_CNTL4__HDMICHARCLK0_ROOT_GATE_DISABLE__SHIFT 0x11 +#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK0_GATE_DISABLE__SHIFT 0x17 +#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK1_GATE_DISABLE__SHIFT 0x18 +#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK2_GATE_DISABLE__SHIFT 0x19 +#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK3_GATE_DISABLE__SHIFT 0x1a #define DCCG_GATE_DISABLE_CNTL4__PHYA_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000001L #define DCCG_GATE_DISABLE_CNTL4__PHYB_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000002L #define DCCG_GATE_DISABLE_CNTL4__PHYC_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000004L #define DCCG_GATE_DISABLE_CNTL4__PHYD_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000008L #define DCCG_GATE_DISABLE_CNTL4__PHYE_REFCLK_ROOT_GATE_DISABLE_MASK 0x00000010L #define DCCG_GATE_DISABLE_CNTL4__HDMICHARCLK0_ROOT_GATE_DISABLE_MASK 0x00020000L +#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK0_GATE_DISABLE_MASK 0x00800000L +#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK1_GATE_DISABLE_MASK 0x01000000L +#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK2_GATE_DISABLE_MASK 0x02000000L +#define DCCG_GATE_DISABLE_CNTL4__DPIASYMCLK3_GATE_DISABLE_MASK 0x04000000L #define DPSTREAMCLK_CNTL__DPSTREAMCLK0_SRC_SEL__SHIFT 0x0 #define DPSTREAMCLK_CNTL__DPSTREAMCLK0_EN__SHIFT 0x3 #define DPSTREAMCLK_CNTL__DPSTREAMCLK1_SRC_SEL__SHIFT 0x4 diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h index 7ee3d29112..6f80bfa7e4 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h @@ -8707,10 +8707,10 @@ #define regBIF_BX1_MM_CFGREGS_CNTL_BASE_IDX 2 #define regBIF_BX1_BX_RESET_CNTL 0x00f0 #define regBIF_BX1_BX_RESET_CNTL_BASE_IDX 2 -#define regBIF_BX1_INTERRUPT_CNTL 0x8e11 -#define regBIF_BX1_INTERRUPT_CNTL_BASE_IDX 5 -#define regBIF_BX1_INTERRUPT_CNTL2 0x8e12 -#define regBIF_BX1_INTERRUPT_CNTL2_BASE_IDX 5 +#define regBIF_BX1_INTERRUPT_CNTL 0x00f1 +#define regBIF_BX1_INTERRUPT_CNTL_BASE_IDX 2 +#define regBIF_BX1_INTERRUPT_CNTL2 0x00f2 +#define regBIF_BX1_INTERRUPT_CNTL2_BASE_IDX 2 #define regBIF_BX1_CLKREQB_PAD_CNTL 0x00f8 #define regBIF_BX1_CLKREQB_PAD_CNTL_BASE_IDX 2 #define regBIF_BX1_BIF_FEATURES_CONTROL_MISC 0x00fb diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_offset.h new file mode 100644 index 0000000000..a4dd372c05 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_offset.h @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _smuio_10_0_2_OFFSET_HEADER + +// addressBlock: smuio_smuio_misc_SmuSmuioDec +// base address: 0x5a000 +#define mmSMUIO_MCM_CONFIG 0x0023 +#define mmSMUIO_MCM_CONFIG_BASE_IDX 0 +#define mmIP_DISCOVERY_VERSION 0x0000 +#define mmIP_DISCOVERY_VERSION_BASE_IDX 1 +#define mmIO_SMUIO_PINSTRAP 0x01b1 +#define mmIO_SMUIO_PINSTRAP_BASE_IDX 1 +#define mmSCRATCH_REGISTER0 0x01b2 +#define mmSCRATCH_REGISTER0_BASE_IDX 1 +#define mmSCRATCH_REGISTER1 0x01b3 +#define mmSCRATCH_REGISTER1_BASE_IDX 1 +#define mmSCRATCH_REGISTER2 0x01b4 +#define mmSCRATCH_REGISTER2_BASE_IDX 1 +#define mmSCRATCH_REGISTER3 0x01b5 +#define mmSCRATCH_REGISTER3_BASE_IDX 1 +#define mmSCRATCH_REGISTER4 0x01b6 +#define mmSCRATCH_REGISTER4_BASE_IDX 1 +#define mmSCRATCH_REGISTER5 0x01b7 +#define mmSCRATCH_REGISTER5_BASE_IDX 1 +#define mmSCRATCH_REGISTER6 0x01b8 +#define mmSCRATCH_REGISTER6_BASE_IDX 1 +#define mmSCRATCH_REGISTER7 0x01b9 +#define mmSCRATCH_REGISTER7_BASE_IDX 1 + + +// addressBlock: smuio_smuio_reset_SmuSmuioDec +// base address: 0x5a300 +#define mmSMUIO_MP_RESET_INTR 0x00c1 +#define mmSMUIO_MP_RESET_INTR_BASE_IDX 0 +#define mmSMUIO_SOC_HALT 0x00c2 +#define mmSMUIO_SOC_HALT_BASE_IDX 0 +#define mmSMUIO_GFX_MISC_CNTL 0x00c8 +#define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0 + + +// addressBlock: smuio_smuio_ccxctrl_SmuSmuioDec +// base address: 0x5a000 +#define mmPWROK_REFCLK_GAP_CYCLES 0x0001 +#define mmPWROK_REFCLK_GAP_CYCLES_BASE_IDX 1 +#define mmGOLDEN_TSC_INCREMENT_UPPER 0x0004 +#define mmGOLDEN_TSC_INCREMENT_UPPER_BASE_IDX 1 +#define mmGOLDEN_TSC_INCREMENT_LOWER 0x0005 +#define mmGOLDEN_TSC_INCREMENT_LOWER_BASE_IDX 1 +#define mmGOLDEN_TSC_COUNT_UPPER 0x0025 +#define mmGOLDEN_TSC_COUNT_UPPER_BASE_IDX 1 +#define mmGOLDEN_TSC_COUNT_LOWER 0x0026 +#define mmGOLDEN_TSC_COUNT_LOWER_BASE_IDX 1 +#define mmGFX_GOLDEN_TSC_SHADOW_UPPER 0x0029 +#define mmGFX_GOLDEN_TSC_SHADOW_UPPER_BASE_IDX 1 +#define mmGFX_GOLDEN_TSC_SHADOW_LOWER 0x002a +#define mmGFX_GOLDEN_TSC_SHADOW_LOWER_BASE_IDX 1 +#define mmSOC_GOLDEN_TSC_SHADOW_UPPER 0x002b +#define mmSOC_GOLDEN_TSC_SHADOW_UPPER_BASE_IDX 1 +#define mmSOC_GOLDEN_TSC_SHADOW_LOWER 0x002c +#define mmSOC_GOLDEN_TSC_SHADOW_LOWER_BASE_IDX 1 +#define mmSOC_GAP_PWROK 0x002d +#define mmSOC_GAP_PWROK_BASE_IDX 1 + +// addressBlock: smuio_smuio_swtimer_SmuSmuioDec +// base address: 0x5ac40 +#define mmPWR_VIRT_RESET_REQ 0x0110 +#define mmPWR_VIRT_RESET_REQ_BASE_IDX 1 +#define mmPWR_DISP_TIMER_CONTROL 0x0111 +#define mmPWR_DISP_TIMER_CONTROL_BASE_IDX 1 +#define mmPWR_DISP_TIMER2_CONTROL 0x0113 +#define mmPWR_DISP_TIMER2_CONTROL_BASE_IDX 1 +#define mmPWR_DISP_TIMER_GLOBAL_CONTROL 0x0115 +#define mmPWR_DISP_TIMER_GLOBAL_CONTROL_BASE_IDX 1 +#define mmPWR_IH_CONTROL 0x0116 +#define mmPWR_IH_CONTROL_BASE_IDX 1 + +// addressBlock: smuio_smuio_svi0_SmuSmuioDec +// base address: 0x6f000 +#define mmSMUSVI0_TEL_PLANE0 0x520e +#define mmSMUSVI0_TEL_PLANE0_BASE_IDX 1 +#define mmSMUSVI0_PLANE0_CURRENTVID 0x5217 +#define mmSMUSVI0_PLANE0_CURRENTVID_BASE_IDX 1 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_sh_mask.h new file mode 100644 index 0000000000..d10ae61c34 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_sh_mask.h @@ -0,0 +1,184 @@ +/* + * Copyright (C) 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _smuio_10_0_2_SH_MASK_HEADER + +// addressBlock: smuio_smuio_misc_SmuSmuioDec +//SMUIO_MCM_CONFIG +#define SMUIO_MCM_CONFIG__DIE_ID__SHIFT 0x0 +#define SMUIO_MCM_CONFIG__PKG_TYPE__SHIFT 0x2 +#define SMUIO_MCM_CONFIG__SOCKET_ID__SHIFT 0x5 +#define SMUIO_MCM_CONFIG__PKG_SUBTYPE__SHIFT 0x6 +#define SMUIO_MCM_CONFIG__CONSOLE_K__SHIFT 0x10 +#define SMUIO_MCM_CONFIG__CONSOLE_A__SHIFT 0x11 +#define SMUIO_MCM_CONFIG__DIE_ID_MASK 0x00000003L +#define SMUIO_MCM_CONFIG__PKG_TYPE_MASK 0x0000001CL +#define SMUIO_MCM_CONFIG__SOCKET_ID_MASK 0x00000020L +#define SMUIO_MCM_CONFIG__PKG_SUBTYPE_MASK 0x000000C0L +#define SMUIO_MCM_CONFIG__CONSOLE_K_MASK 0x00010000L +#define SMUIO_MCM_CONFIG__CONSOLE_A_MASK 0x00020000L +//IP_DISCOVERY_VERSION +#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION__SHIFT 0x0 +#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION_MASK 0xFFFFFFFFL +//IO_SMUIO_PINSTRAP +#define IO_SMUIO_PINSTRAP__AUD_PORT_CONN__SHIFT 0x0 +#define IO_SMUIO_PINSTRAP__AUD__SHIFT 0x3 +#define IO_SMUIO_PINSTRAP__AUD_PORT_CONN_MASK 0x00000007L +#define IO_SMUIO_PINSTRAP__AUD_MASK 0x00000018L +//SCRATCH_REGISTER0 +#define SCRATCH_REGISTER0__ScratchPad0__SHIFT 0x0 +#define SCRATCH_REGISTER0__ScratchPad0_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER1 +#define SCRATCH_REGISTER1__ScratchPad1__SHIFT 0x0 +#define SCRATCH_REGISTER1__ScratchPad1_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER2 +#define SCRATCH_REGISTER2__ScratchPad2__SHIFT 0x0 +#define SCRATCH_REGISTER2__ScratchPad2_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER3 +#define SCRATCH_REGISTER3__ScratchPad3__SHIFT 0x0 +#define SCRATCH_REGISTER3__ScratchPad3_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER4 +#define SCRATCH_REGISTER4__ScratchPad4__SHIFT 0x0 +#define SCRATCH_REGISTER4__ScratchPad4_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER5 +#define SCRATCH_REGISTER5__ScratchPad5__SHIFT 0x0 +#define SCRATCH_REGISTER5__ScratchPad5_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER6 +#define SCRATCH_REGISTER6__ScratchPad6__SHIFT 0x0 +#define SCRATCH_REGISTER6__ScratchPad6_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER7 +#define SCRATCH_REGISTER7__ScratchPad7__SHIFT 0x0 +#define SCRATCH_REGISTER7__ScratchPad7_MASK 0xFFFFFFFFL + +// addressBlock: smuio_smuio_reset_SmuSmuioDec +//SMUIO_MP_RESET_INTR +#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR__SHIFT 0x0 +#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR_MASK 0x00000001L +//SMUIO_SOC_HALT +#define SMUIO_SOC_HALT__WDT_FORCE_PWROK_EN__SHIFT 0x2 +#define SMUIO_SOC_HALT__WDT_FORCE_RESETn_EN__SHIFT 0x3 +#define SMUIO_SOC_HALT__WDT_FORCE_PWROK_EN_MASK 0x00000004L +#define SMUIO_SOC_HALT__WDT_FORCE_RESETn_EN_MASK 0x00000008L +//SMUIO_GFX_MISC_CNTL +#define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff__SHIFT 0x0 +#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1 +#define SMUIO_GFX_MISC_CNTL__PWR_GFX_DLDO_CLK_SWITCH__SHIFT 0x3 +#define SMUIO_GFX_MISC_CNTL__PWR_GFX_RLC_CGPG_EN__SHIFT 0x4 +#define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff_MASK 0x00000001L +#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L +#define SMUIO_GFX_MISC_CNTL__PWR_GFX_DLDO_CLK_SWITCH_MASK 0x00000008L +#define SMUIO_GFX_MISC_CNTL__PWR_GFX_RLC_CGPG_EN_MASK 0x00000010L + +// addressBlock: smuio_smuio_ccxctrl_SmuSmuioDec +//PWROK_REFCLK_GAP_CYCLES +#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles__SHIFT 0x0 +#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles__SHIFT 0x8 +#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles_MASK 0x000000FFL +#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles_MASK 0x0000FF00L +//GOLDEN_TSC_INCREMENT_UPPER +#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper__SHIFT 0x0 +#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper_MASK 0x00FFFFFFL +//GOLDEN_TSC_INCREMENT_LOWER +#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower__SHIFT 0x0 +#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower_MASK 0xFFFFFFFFL +//GOLDEN_TSC_COUNT_UPPER +#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper__SHIFT 0x0 +#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper_MASK 0x00FFFFFFL +//GOLDEN_TSC_COUNT_LOWER +#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower__SHIFT 0x0 +#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower_MASK 0xFFFFFFFFL +//GFX_GOLDEN_TSC_SHADOW_UPPER +#define GFX_GOLDEN_TSC_SHADOW_UPPER__GfxGoldenTscShadowUpper__SHIFT 0x0 +#define GFX_GOLDEN_TSC_SHADOW_UPPER__GfxGoldenTscShadowUpper_MASK 0x00FFFFFFL +//GFX_GOLDEN_TSC_SHADOW_LOWER +#define GFX_GOLDEN_TSC_SHADOW_LOWER__GfxGoldenTscShadowLower__SHIFT 0x0 +#define GFX_GOLDEN_TSC_SHADOW_LOWER__GfxGoldenTscShadowLower_MASK 0xFFFFFFFFL +//SOC_GOLDEN_TSC_SHADOW_UPPER +#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper__SHIFT 0x0 +#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper_MASK 0x00FFFFFFL +//SOC_GOLDEN_TSC_SHADOW_LOWER +#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower__SHIFT 0x0 +#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower_MASK 0xFFFFFFFFL +//SOC_GAP_PWROK +#define SOC_GAP_PWROK__soc_gap_pwrok__SHIFT 0x0 +#define SOC_GAP_PWROK__soc_gap_pwrok_MASK 0x00000001L + +// addressBlock: smuio_smuio_swtimer_SmuSmuioDec +//PWR_VIRT_RESET_REQ +#define PWR_VIRT_RESET_REQ__VF_FLR__SHIFT 0x0 +#define PWR_VIRT_RESET_REQ__PF_FLR__SHIFT 0x1f +#define PWR_VIRT_RESET_REQ__VF_FLR_MASK 0x7FFFFFFFL +#define PWR_VIRT_RESET_REQ__PF_FLR_MASK 0x80000000L +//PWR_DISP_TIMER_CONTROL +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L +//PWR_DISP_TIMER2_CONTROL +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L +//PWR_DISP_TIMER_GLOBAL_CONTROL +#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH__SHIFT 0x0 +#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN__SHIFT 0xa +#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH_MASK 0x000003FFL +#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN_MASK 0x00000400L +//PWR_IH_CONTROL +#define PWR_IH_CONTROL__MAX_CREDIT__SHIFT 0x0 +#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK__SHIFT 0x5 +#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK__SHIFT 0x6 +#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN__SHIFT 0x1f +#define PWR_IH_CONTROL__MAX_CREDIT_MASK 0x0000001FL +#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK_MASK 0x00000020L +#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK_MASK 0x00000040L +#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN_MASK 0x80000000L + +// addressBlock: smuio_smuio_svi0_SmuSmuioDec +//SMUSVI0_TEL_PLANE0 +#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_IDDCOR__SHIFT 0x0 +#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT 0x10 +#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_IDDCOR_MASK 0x000000FFL +#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK 0x01FF0000L +//SMUSVI0_PLANE0_CURRENTVID +#define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT 0x18 +#define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK 0xFF000000L + +#endif diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index c2ccf3724e..edcb85560c 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -422,7 +422,7 @@ struct amd_pm_funcs { int (*set_hard_min_dcefclk_by_freq)(void *handle, uint32_t clock); int (*set_hard_min_fclk_by_freq)(void *handle, uint32_t clock); int (*set_min_deep_sleep_dcefclk)(void *handle, uint32_t clock); - int (*get_asic_baco_capability)(void *handle, bool *cap); + bool (*get_asic_baco_capability)(void *handle); int (*get_asic_baco_state)(void *handle, int *state); int (*set_asic_baco_state)(void *handle, int state); int (*get_ppfeature_status)(void *handle, char *buf); @@ -432,6 +432,7 @@ struct amd_pm_funcs { int (*set_df_cstate)(void *handle, enum pp_df_cstate state); int (*set_xgmi_pstate)(void *handle, uint32_t pstate); ssize_t (*get_gpu_metrics)(void *handle, void **table); + ssize_t (*get_pm_metrics)(void *handle, void *pmmetrics, size_t size); int (*set_watermarks_for_clock_ranges)(void *handle, struct pp_smu_wm_range_sets *ranges); int (*display_disable_memory_clock_switch)(void *handle, @@ -1225,4 +1226,19 @@ struct gpu_metrics_v3_0 { /* Metrics table alpha filter time constant [us] */ uint32_t time_filter_alphavalue; }; + +struct amdgpu_pmmetrics_header { + uint16_t structure_size; + uint16_t pad; + uint32_t mp1_ip_discovery_version; + uint32_t pmfw_version; + uint32_t pmmetrics_version; +}; + +struct amdgpu_pm_metrics { + struct amdgpu_pmmetrics_header common_header; + + uint8_t data[]; +}; + #endif diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h index e07e93167a..ec5b9ab67c 100644 --- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h @@ -232,6 +232,7 @@ union MESAPI_SET_HW_RESOURCES { }; uint32_t oversubscription_timer; uint64_t doorbell_info; + uint64_t event_intr_history_gpu_mc_ptr; }; uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 8ec11da031..6627ee07d5 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -203,8 +203,7 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; void *pp_handle = adev->powerplay.pp_handle; - bool baco_cap; - int ret = 0; + bool ret; if (!pp_funcs || !pp_funcs->get_asic_baco_capability) return false; @@ -222,12 +221,11 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) mutex_lock(&adev->pm.mutex); - ret = pp_funcs->get_asic_baco_capability(pp_handle, - &baco_cap); + ret = pp_funcs->get_asic_baco_capability(pp_handle); mutex_unlock(&adev->pm.mutex); - return ret ? false : baco_cap; + return ret; } int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) @@ -618,6 +616,16 @@ void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) enable ? "enable" : "disable", ret); } +void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable) +{ + int ret = 0; + + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable); + if (ret) + DRM_ERROR("Dpm %s vpe failed, ret = %d.\n", + enable ? "enable" : "disable", ret); +} + int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; @@ -1319,6 +1327,23 @@ int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) return ret; } +ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics, + size_t size) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; + + if (!pp_funcs->get_pm_metrics) + return -EOPNOTSUPP; + + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics, + size); + mutex_unlock(&adev->pm.mutex); + + return ret; +} + int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, uint32_t *fan_mode) { diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index fee86be55c..b4698f9856 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -1799,6 +1799,44 @@ static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev, return count; } +static int amdgpu_pm_metrics_attr_update(struct amdgpu_device *adev, + struct amdgpu_device_attr *attr, + uint32_t mask, + enum amdgpu_device_attr_states *states) +{ + if (amdgpu_dpm_get_pm_metrics(adev, NULL, 0) == -EOPNOTSUPP) + *states = ATTR_STATE_UNSUPPORTED; + + return 0; +} + +static ssize_t amdgpu_get_pm_metrics(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + ssize_t size = 0; + int ret; + + if (amdgpu_in_reset(adev)) + return -EPERM; + if (adev->in_suspend && !adev->in_runpm) + return -EPERM; + + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) { + pm_runtime_put_autosuspend(ddev->dev); + return ret; + } + + size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE); + + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + + return size; +} + /** * DOC: gpu_metrics * @@ -2096,6 +2134,8 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC, .attr_update = ss_bias_attr_update), AMDGPU_DEVICE_ATTR_RW(xgmi_plpd_policy, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RO(pm_metrics, ATTR_FLAG_BASIC, + .attr_update = amdgpu_pm_metrics_attr_update), }; static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, @@ -4177,6 +4217,13 @@ static int amdgpu_od_set_init(struct amdgpu_device *adev) } } + /* + * If gpu_od is the only member in the list, that means gpu_od is an + * empty directory, so remove it. + */ + if (list_is_singular(&adev->pm.od_kobj_list)) + goto err_out; + return 0; err_out: @@ -4319,11 +4366,19 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size)) seq_printf(m, "\t%u mV (VDDNB)\n", value); size = sizeof(uint32_t); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) - seq_printf(m, "\t%u.%02u W (average GPU)\n", query >> 8, query & 0xff); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) { + if (adev->flags & AMD_IS_APU) + seq_printf(m, "\t%u.%02u W (average SoC including CPU)\n", query >> 8, query & 0xff); + else + seq_printf(m, "\t%u.%02u W (average SoC)\n", query >> 8, query & 0xff); + } size = sizeof(uint32_t); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size)) - seq_printf(m, "\t%u.%02u W (current GPU)\n", query >> 8, query & 0xff); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size)) { + if (adev->flags & AMD_IS_APU) + seq_printf(m, "\t%u.%02u W (current SoC including CPU)\n", query >> 8, query & 0xff); + else + seq_printf(m, "\t%u.%02u W (current SoC)\n", query >> 8, query & 0xff); + } size = sizeof(value); seq_printf(m, "\n"); @@ -4349,9 +4404,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a /* VCN clocks */ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) { if (!value) { - seq_printf(m, "VCN: Disabled\n"); + seq_printf(m, "VCN: Powered down\n"); } else { - seq_printf(m, "VCN: Enabled\n"); + seq_printf(m, "VCN: Powered up\n"); if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) seq_printf(m, "\t%u MHz (DCLK)\n", value/100); if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) @@ -4363,9 +4418,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a /* UVD clocks */ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { if (!value) { - seq_printf(m, "UVD: Disabled\n"); + seq_printf(m, "UVD: Powered down\n"); } else { - seq_printf(m, "UVD: Enabled\n"); + seq_printf(m, "UVD: Powered up\n"); if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) seq_printf(m, "\t%u MHz (DCLK)\n", value/100); if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) @@ -4377,9 +4432,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a /* VCE clocks */ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { if (!value) { - seq_printf(m, "VCE: Disabled\n"); + seq_printf(m, "VCE: Powered down\n"); } else { - seq_printf(m, "VCE: Enabled\n"); + seq_printf(m, "VCE: Powered up\n"); if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); } diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 482ea30147..3047ffe7f2 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -445,6 +445,7 @@ void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev); void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable); void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable); void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable); +void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable); int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version); int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable); int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size); @@ -513,6 +514,18 @@ int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, long *input, uint32_t size); int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table); + +/** + * @get_pm_metrics: Get one snapshot of power management metrics from PMFW. The + * sample is copied to pm_metrics buffer. It's expected to be allocated by the + * caller and size of the allocated buffer is passed. Max size expected for a + * metrics sample is 4096 bytes. + * + * Return: Actual size of the metrics sample + */ +ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics, + size_t size); + int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, uint32_t *fan_mode); int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 914c153871..aed0e2cefb 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -1371,21 +1371,18 @@ static int pp_set_active_display_count(void *handle, uint32_t count) return phm_set_active_display_count(hwmgr, count); } -static int pp_get_asic_baco_capability(void *handle, bool *cap) +static bool pp_get_asic_baco_capability(void *handle) { struct pp_hwmgr *hwmgr = handle; - *cap = false; if (!hwmgr) - return -EINVAL; + return false; if (!(hwmgr->not_vf && amdgpu_dpm) || !hwmgr->hwmgr_func->get_asic_baco_capability) - return 0; + return false; - hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap); - - return 0; + return hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr); } static int pp_get_asic_baco_state(void *handle, int *state) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c index 044cda005a..e8a9471c18 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c @@ -33,21 +33,20 @@ #include "smu/smu_7_1_2_d.h" #include "smu/smu_7_1_2_sh_mask.h" -int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap) +bool smu7_baco_get_capability(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); uint32_t reg; - *cap = false; if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO)) return 0; reg = RREG32(mmCC_BIF_BX_FUSESTRAP0); if (reg & CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK) - *cap = true; + return true; - return 0; + return false; } int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h index be0d98abb5..73a773f4ce 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h @@ -25,7 +25,7 @@ #include "hwmgr.h" #include "common_baco.h" -extern int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap); +extern bool smu7_baco_get_capability(struct pp_hwmgr *hwmgr); extern int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); extern int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c index de0a37f7c6..c66ef97415 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c @@ -28,14 +28,13 @@ #include "vega10_inc.h" #include "smu9_baco.h" -int smu9_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap) +bool smu9_baco_get_capability(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); uint32_t reg, data; - *cap = false; if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO)) - return 0; + return false; WREG32(0x12074, 0xFFF0003B); data = RREG32(0x12075); @@ -44,10 +43,10 @@ int smu9_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap) reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0); if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) - *cap = true; + return true; } - return 0; + return false; } int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h index 84e90f801a..9ff7c2ea1b 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h @@ -25,7 +25,7 @@ #include "hwmgr.h" #include "common_baco.h" -extern int smu9_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap); +extern bool smu9_baco_get_capability(struct pp_hwmgr *hwmgr); extern int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); #endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c index 994c0d374b..dad4c80aee 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c @@ -36,23 +36,22 @@ static const struct soc15_baco_cmd_entry clean_baco_tbl[] = { {CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_7), 0, 0, 0, 0}, }; -int vega20_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap) +bool vega20_baco_get_capability(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); uint32_t reg; - *cap = false; if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO)) - return 0; + return false; if (((RREG32(0x17569) & 0x20000000) >> 29) == 0x1) { reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0); if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) - *cap = true; + return true; } - return 0; + return false; } int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h index f06471e712..bdad9c9156 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h @@ -25,7 +25,7 @@ #include "hwmgr.h" #include "common_baco.h" -extern int vega20_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap); +extern bool vega20_baco_get_capability(struct pp_hwmgr *hwmgr); extern int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); extern int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); extern int vega20_baco_apply_vdci_flush_workaround(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h index 81650727a5..6f536159df 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h @@ -351,7 +351,7 @@ struct pp_hwmgr_func { int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); int (*set_hard_min_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); int (*set_soft_max_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); - int (*get_asic_baco_capability)(struct pp_hwmgr *hwmgr, bool *cap); + bool (*get_asic_baco_capability)(struct pp_hwmgr *hwmgr); int (*get_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); int (*set_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE state); int (*get_ppfeature_status)(struct pp_hwmgr *hwmgr, char *buf); diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c index 9e4228232f..ad1fd3150d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c @@ -2298,6 +2298,7 @@ static uint32_t ci_get_mac_definition(uint32_t value) case SMU_MAX_ENTRIES_SMIO: return SMU7_MAX_ENTRIES_SMIO; case SMU_MAX_LEVELS_VDDC: + case SMU_MAX_LEVELS_VDDGFX: return SMU7_MAX_LEVELS_VDDC; case SMU_MAX_LEVELS_VDDCI: return SMU7_MAX_LEVELS_VDDCI; diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c index 97d9802fe6..17d2f5bff4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c @@ -2263,6 +2263,7 @@ static uint32_t iceland_get_mac_definition(uint32_t value) case SMU_MAX_ENTRIES_SMIO: return SMU71_MAX_ENTRIES_SMIO; case SMU_MAX_LEVELS_VDDC: + case SMU_MAX_LEVELS_VDDGFX: return SMU71_MAX_LEVELS_VDDC; case SMU_MAX_LEVELS_VDDCI: return SMU71_MAX_LEVELS_VDDCI; diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 60dce148b2..0ad947df77 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1315,6 +1315,187 @@ static int smu_get_thermal_temperature_range(struct smu_context *smu) return ret; } +/** + * smu_wbrf_handle_exclusion_ranges - consume the wbrf exclusion ranges + * + * @smu: smu_context pointer + * + * Retrieve the wbrf exclusion ranges and send them to PMFW for proper handling. + * Returns 0 on success, error on failure. + */ +static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu) +{ + struct wbrf_ranges_in_out wbrf_exclusion = {0}; + struct freq_band_range *wifi_bands = wbrf_exclusion.band_list; + struct amdgpu_device *adev = smu->adev; + uint32_t num_of_wbrf_ranges = MAX_NUM_OF_WBRF_RANGES; + uint64_t start, end; + int ret, i, j; + + ret = amd_wbrf_retrieve_freq_band(adev->dev, &wbrf_exclusion); + if (ret) { + dev_err(adev->dev, "Failed to retrieve exclusion ranges!\n"); + return ret; + } + + /* + * The exclusion ranges array we got might be filled with holes and duplicate + * entries. For example: + * {(2400, 2500), (0, 0), (6882, 6962), (2400, 2500), (0, 0), (6117, 6189), (0, 0)...} + * We need to do some sortups to eliminate those holes and duplicate entries. + * Expected output: {(2400, 2500), (6117, 6189), (6882, 6962), (0, 0)...} + */ + for (i = 0; i < num_of_wbrf_ranges; i++) { + start = wifi_bands[i].start; + end = wifi_bands[i].end; + + /* get the last valid entry to fill the intermediate hole */ + if (!start && !end) { + for (j = num_of_wbrf_ranges - 1; j > i; j--) + if (wifi_bands[j].start && wifi_bands[j].end) + break; + + /* no valid entry left */ + if (j <= i) + break; + + start = wifi_bands[i].start = wifi_bands[j].start; + end = wifi_bands[i].end = wifi_bands[j].end; + wifi_bands[j].start = 0; + wifi_bands[j].end = 0; + num_of_wbrf_ranges = j; + } + + /* eliminate duplicate entries */ + for (j = i + 1; j < num_of_wbrf_ranges; j++) { + if ((wifi_bands[j].start == start) && (wifi_bands[j].end == end)) { + wifi_bands[j].start = 0; + wifi_bands[j].end = 0; + } + } + } + + /* Send the sorted wifi_bands to PMFW */ + ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands); + /* Try to set the wifi_bands again */ + if (unlikely(ret == -EBUSY)) { + mdelay(5); + ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands); + } + + return ret; +} + +/** + * smu_wbrf_event_handler - handle notify events + * + * @nb: notifier block + * @action: event type + * @_arg: event data + * + * Calls relevant amdgpu function in response to wbrf event + * notification from kernel. + */ +static int smu_wbrf_event_handler(struct notifier_block *nb, + unsigned long action, void *_arg) +{ + struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier); + + switch (action) { + case WBRF_CHANGED: + schedule_delayed_work(&smu->wbrf_delayed_work, + msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE)); + break; + default: + return NOTIFY_DONE; + } + + return NOTIFY_OK; +} + +/** + * smu_wbrf_delayed_work_handler - callback on delayed work timer expired + * + * @work: struct work_struct pointer + * + * Flood is over and driver will consume the latest exclusion ranges. + */ +static void smu_wbrf_delayed_work_handler(struct work_struct *work) +{ + struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work); + + smu_wbrf_handle_exclusion_ranges(smu); +} + +/** + * smu_wbrf_support_check - check wbrf support + * + * @smu: smu_context pointer + * + * Verifies the ACPI interface whether wbrf is supported. + */ +static void smu_wbrf_support_check(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf && + acpi_amd_wbrf_supported_consumer(adev->dev); + + if (smu->wbrf_supported) + dev_info(adev->dev, "RF interference mitigation is supported\n"); +} + +/** + * smu_wbrf_init - init driver wbrf support + * + * @smu: smu_context pointer + * + * Verifies the AMD ACPI interfaces and registers with the wbrf + * notifier chain if wbrf feature is supported. + * Returns 0 on success, error on failure. + */ +static int smu_wbrf_init(struct smu_context *smu) +{ + int ret; + + if (!smu->wbrf_supported) + return 0; + + INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler); + + smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler; + ret = amd_wbrf_register_notifier(&smu->wbrf_notifier); + if (ret) + return ret; + + /* + * Some wifiband exclusion ranges may be already there + * before our driver loaded. To make sure our driver + * is awared of those exclusion ranges. + */ + schedule_delayed_work(&smu->wbrf_delayed_work, + msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE)); + + return 0; +} + +/** + * smu_wbrf_fini - tear down driver wbrf support + * + * @smu: smu_context pointer + * + * Unregisters with the wbrf notifier chain. + */ +static void smu_wbrf_fini(struct smu_context *smu) +{ + if (!smu->wbrf_supported) + return; + + amd_wbrf_unregister_notifier(&smu->wbrf_notifier); + + cancel_delayed_work_sync(&smu->wbrf_delayed_work); +} + static int smu_smc_hw_setup(struct smu_context *smu) { struct smu_feature *feature = &smu->smu_feature; @@ -1407,6 +1588,15 @@ static int smu_smc_hw_setup(struct smu_context *smu) if (ret) return ret; + /* Enable UclkShadow on wbrf supported */ + if (smu->wbrf_supported) { + ret = smu_enable_uclk_shadow(smu, true); + if (ret) { + dev_err(adev->dev, "Failed to enable UclkShadow feature to support wbrf!\n"); + return ret; + } + } + /* * With SCPM enabled, these actions(and relevant messages) are * not needed and permitted. @@ -1505,6 +1695,15 @@ static int smu_smc_hw_setup(struct smu_context *smu) */ ret = smu_set_min_dcef_deep_sleep(smu, smu->smu_table.boot_values.dcefclk / 100); + if (ret) { + dev_err(adev->dev, "Error setting min deepsleep dcefclk\n"); + return ret; + } + + /* Init wbrf support. Properly setup the notifier */ + ret = smu_wbrf_init(smu); + if (ret) + dev_err(adev->dev, "Error during wbrf init call\n"); return ret; } @@ -1560,6 +1759,13 @@ static int smu_hw_init(void *handle) return ret; } + /* + * Check whether wbrf is supported. This needs to be done + * before SMU setup starts since part of SMU configuration + * relies on this. + */ + smu_wbrf_support_check(smu); + if (smu->is_apu) { ret = smu_set_gfx_imu_enable(smu); if (ret) @@ -1726,6 +1932,8 @@ static int smu_smc_hw_cleanup(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; int ret = 0; + smu_wbrf_fini(smu); + cancel_work_sync(&smu->throttling_logging_work); cancel_work_sync(&smu->interrupt_work); @@ -2980,19 +3188,17 @@ static int smu_set_xgmi_pstate(void *handle, return ret; } -static int smu_get_baco_capability(void *handle, bool *cap) +static bool smu_get_baco_capability(void *handle) { struct smu_context *smu = handle; - *cap = false; - if (!smu->pm_enabled) - return 0; + return false; - if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) - *cap = smu->ppt_funcs->baco_is_support(smu); + if (!smu->ppt_funcs || !smu->ppt_funcs->baco_is_support) + return false; - return 0; + return smu->ppt_funcs->baco_is_support(smu); } static int smu_baco_set_state(void *handle, int state) @@ -3166,6 +3372,20 @@ static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table) return smu->ppt_funcs->get_gpu_metrics(smu, table); } +static ssize_t smu_sys_get_pm_metrics(void *handle, void *pm_metrics, + size_t size) +{ + struct smu_context *smu = handle; + + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + return -EOPNOTSUPP; + + if (!smu->ppt_funcs->get_pm_metrics) + return -EOPNOTSUPP; + + return smu->ppt_funcs->get_pm_metrics(smu, pm_metrics, size); +} + static int smu_enable_mgpu_fan_boost(void *handle) { struct smu_context *smu = handle; @@ -3307,6 +3527,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = { .set_df_cstate = smu_set_df_cstate, .set_xgmi_pstate = smu_set_xgmi_pstate, .get_gpu_metrics = smu_sys_get_gpu_metrics, + .get_pm_metrics = smu_sys_get_pm_metrics, .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges, .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch, .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc, diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index e8329d157b..66e84defd0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -22,6 +22,9 @@ #ifndef __AMDGPU_SMU_H__ #define __AMDGPU_SMU_H__ +#include +#include + #include "amdgpu.h" #include "kgd_pp_interface.h" #include "dm_pp_interface.h" @@ -253,6 +256,7 @@ struct smu_table { uint64_t mc_address; void *cpu_addr; struct amdgpu_bo *bo; + uint32_t version; }; enum smu_perf_level_designation { @@ -317,6 +321,7 @@ enum smu_table_id { SMU_TABLE_PACE, SMU_TABLE_ECCINFO, SMU_TABLE_COMBO_PPTABLE, + SMU_TABLE_WIFIBAND, SMU_TABLE_COUNT, }; @@ -469,6 +474,12 @@ struct stb_context { #define WORKLOAD_POLICY_MAX 7 +/* + * Configure wbrf event handling pace as there can be only one + * event processed every SMU_WBRF_EVENT_HANDLING_PACE ms. + */ +#define SMU_WBRF_EVENT_HANDLING_PACE 10 + struct smu_context { struct amdgpu_device *adev; struct amdgpu_irq_src irq_source; @@ -568,6 +579,11 @@ struct smu_context { struct delayed_work swctf_delayed_work; enum pp_xgmi_plpd_mode plpd_mode; + + /* data structures for wbrf feature support */ + bool wbrf_supported; + struct notifier_block wbrf_notifier; + struct delayed_work wbrf_delayed_work; }; struct i2c_adapter; @@ -1251,6 +1267,15 @@ struct pptable_funcs { */ ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table); + /** + * @get_pm_metrics: Get one snapshot of power management metrics from + * PMFW. + * + * Return: Size of the metrics sample + */ + ssize_t (*get_pm_metrics)(struct smu_context *smu, void *pm_metrics, + size_t size); + /** * @enable_mgpu_fan_boost: Enable multi-GPU fan boost. */ @@ -1364,6 +1389,22 @@ struct pptable_funcs { * @notify_rlc_state: Notify RLC power state to SMU. */ int (*notify_rlc_state)(struct smu_context *smu, bool en); + + /** + * @is_asic_wbrf_supported: check whether PMFW supports the wbrf feature + */ + bool (*is_asic_wbrf_supported)(struct smu_context *smu); + + /** + * @enable_uclk_shadow: Enable the uclk shadow feature on wbrf supported + */ + int (*enable_uclk_shadow)(struct smu_context *smu, bool enable); + + /** + * @set_wbrf_exclusion_ranges: notify SMU the wifi bands occupied + */ + int (*set_wbrf_exclusion_ranges)(struct smu_context *smu, + struct freq_band_range *exclusion_ranges); }; typedef enum { @@ -1490,6 +1531,17 @@ enum smu_baco_seq { __dst_size); \ }) +typedef struct { + uint16_t LowFreq; + uint16_t HighFreq; +} WifiOneBand_t; + +typedef struct { + uint32_t WifiBandEntryNum; + WifiOneBand_t WifiBandEntry[11]; + uint32_t MmHubPadding[8]; +} WifiBandEntryTable_t; + #if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4) int smu_get_power_limit(void *handle, uint32_t *limit, diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h index 9dd1ed5b89..b114d14fc0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h @@ -1615,7 +1615,8 @@ typedef struct { #define TABLE_I2C_COMMANDS 9 #define TABLE_DRIVER_INFO 10 #define TABLE_ECCINFO 11 -#define TABLE_COUNT 12 +#define TABLE_WIFIBAND 12 +#define TABLE_COUNT 13 //IH Interupt ID #define IH_INTERRUPT_ID_TO_DRIVER 0xFE diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h index 62b7c0daff..8b1496f8ce 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h @@ -1605,7 +1605,8 @@ typedef struct { #define TABLE_I2C_COMMANDS 9 #define TABLE_DRIVER_INFO 10 #define TABLE_ECCINFO 11 -#define TABLE_COUNT 12 +#define TABLE_WIFIBAND 12 +#define TABLE_COUNT 13 //IH Interupt ID #define IH_INTERRUPT_ID_TO_DRIVER 0xFE diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h index 8f42771e1f..5bb7a63c06 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h @@ -24,11 +24,6 @@ #ifndef SMU14_DRIVER_IF_V14_0_0_H #define SMU14_DRIVER_IF_V14_0_0_H -// *** IMPORTANT *** -// SMU TEAM: Always increment the interface version if -// any structure is changed in this file -#define PMFW_DRIVER_IF_VERSION 7 - typedef struct { int32_t value; uint32_t numFractionalBits; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h index e2ee855c77..e862d323ca 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h @@ -138,10 +138,9 @@ #define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4A #define PPSMC_MSG_SetPriorityDeltaGain 0x4B #define PPSMC_MSG_AllowIHHostInterrupt 0x4C - #define PPSMC_MSG_DALNotPresent 0x4E - -#define PPSMC_Message_Count 0x4F +#define PPSMC_MSG_EnableUCLKShadow 0x51 +#define PPSMC_Message_Count 0x52 //Debug Dump Message #define DEBUGSMC_MSG_TestMessage 0x1 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h index 6aaefca9b5..a6bf9cdd13 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h @@ -134,6 +134,7 @@ #define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4A #define PPSMC_MSG_SetPriorityDeltaGain 0x4B #define PPSMC_MSG_AllowIHHostInterrupt 0x4C -#define PPSMC_Message_Count 0x4D +#define PPSMC_MSG_EnableUCLKShadow 0x51 +#define PPSMC_Message_Count 0x52 #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index 9dd47d9109..953a767613 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -259,7 +259,9 @@ __SMU_DUMMY_MAP(PowerUpUmsch), \ __SMU_DUMMY_MAP(PowerDownUmsch), \ __SMU_DUMMY_MAP(SetSoftMaxVpe), \ - __SMU_DUMMY_MAP(SetSoftMinVpe), + __SMU_DUMMY_MAP(SetSoftMinVpe), \ + __SMU_DUMMY_MAP(GetMetricsVersion), \ + __SMU_DUMMY_MAP(EnableUCLKShadow), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index 95cb919718..fbd57fa1a0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -210,15 +210,8 @@ int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu); int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks); -int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu, - enum smu_baco_seq baco_seq); - bool smu_v13_0_baco_is_support(struct smu_context *smu); -enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu); - -int smu_v13_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state); - int smu_v13_0_baco_enter(struct smu_context *smu); int smu_v13_0_baco_exit(struct smu_context *smu); @@ -301,5 +294,9 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu, int smu_v13_0_disable_pmfw_state(struct smu_context *smu); +int smu_v13_0_enable_uclk_shadow(struct smu_context *smu, bool enable); + +int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu, + struct freq_band_range *exclusion_ranges); #endif #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h index a5b569976f..3f7463c1c1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h @@ -26,8 +26,8 @@ #include "amdgpu_smu.h" #define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF +#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7 #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1 -#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x6 #define FEATURE_MASK(feature) (1ULL << feature) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index ac04ed1e49..40ba7227cc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -2411,8 +2411,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, .baco_is_support = smu_v11_0_baco_is_support, - .baco_get_state = smu_v11_0_baco_get_state, - .baco_set_state = smu_v11_0_baco_set_state, .baco_enter = smu_v11_0_baco_enter, .baco_exit = smu_v11_0_baco_exit, .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 0821018459..836b1df799 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -3539,8 +3539,6 @@ static const struct pptable_funcs navi10_ppt_funcs = { .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, .baco_is_support = smu_v11_0_baco_is_support, - .baco_get_state = smu_v11_0_baco_get_state, - .baco_set_state = smu_v11_0_baco_set_state, .baco_enter = navi10_baco_enter, .baco_exit = navi10_baco_exit, .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 6f37ca7a06..1f18b61884 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -4432,8 +4432,6 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, .baco_is_support = smu_v11_0_baco_is_support, - .baco_get_state = smu_v11_0_baco_get_state, - .baco_set_state = smu_v11_0_baco_set_state, .baco_enter = sienna_cichlid_baco_enter, .baco_exit = sienna_cichlid_baco_exit, .mode1_reset_is_support = sienna_cichlid_is_mode1_reset_supported, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index e3579a8fd7..ca0d5a5b20 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1530,7 +1530,6 @@ static int aldebaran_i2c_control_init(struct smu_context *smu) smu_i2c->port = 0; mutex_init(&smu_i2c->mutex); control->owner = THIS_MODULE; - control->class = I2C_CLASS_SPD; control->dev.parent = &adev->pdev->dev; control->algo = &aldebaran_i2c_algo; snprintf(control->name, sizeof(control->name), "AMDGPU SMU 0"); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 68981086b5..c486182ff2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -2201,7 +2201,7 @@ int smu_v13_0_gfx_ulv_control(struct smu_context *smu, return ret; } -int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu, +static int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_baco_seq baco_seq) { struct smu_baco_context *smu_baco = &smu->smu_baco; @@ -2223,33 +2223,14 @@ int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu, return 0; } -bool smu_v13_0_baco_is_support(struct smu_context *smu) -{ - struct smu_baco_context *smu_baco = &smu->smu_baco; - - if (amdgpu_sriov_vf(smu->adev) || - !smu_baco->platform_support) - return false; - - /* return true if ASIC is in BACO state already */ - if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) - return true; - - if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && - !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) - return false; - - return true; -} - -enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu) +static enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu) { struct smu_baco_context *smu_baco = &smu->smu_baco; return smu_baco->state; } -int smu_v13_0_baco_set_state(struct smu_context *smu, +static int smu_v13_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) { struct smu_baco_context *smu_baco = &smu->smu_baco; @@ -2283,24 +2264,60 @@ int smu_v13_0_baco_set_state(struct smu_context *smu, return ret; } -int smu_v13_0_baco_enter(struct smu_context *smu) +bool smu_v13_0_baco_is_support(struct smu_context *smu) { - int ret = 0; + struct smu_baco_context *smu_baco = &smu->smu_baco; - ret = smu_v13_0_baco_set_state(smu, - SMU_BACO_STATE_ENTER); - if (ret) - return ret; + if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) + return false; + + /* return true if ASIC is in BACO state already */ + if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) + return true; - msleep(10); + if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && + !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) + return false; - return ret; + return true; +} + +int smu_v13_0_baco_enter(struct smu_context *smu) +{ + struct smu_baco_context *smu_baco = &smu->smu_baco; + struct amdgpu_device *adev = smu->adev; + int ret; + + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { + return smu_v13_0_baco_set_armd3_sequence(smu, + (smu_baco->maco_support && amdgpu_runtime_pm != 1) ? + BACO_SEQ_BAMACO : BACO_SEQ_BACO); + } else { + ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_ENTER); + if (!ret) + usleep_range(10000, 11000); + + return ret; + } } int smu_v13_0_baco_exit(struct smu_context *smu) { - return smu_v13_0_baco_set_state(smu, - SMU_BACO_STATE_EXIT); + struct amdgpu_device *adev = smu->adev; + int ret; + + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { + /* Wait for PMFW handling for the Dstate change */ + usleep_range(10000, 11000); + ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); + } else { + ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_EXIT); + } + + if (!ret) + adev->gfx.is_poweron = false; + + return ret; } int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu) @@ -2492,3 +2509,51 @@ int smu_v13_0_disable_pmfw_state(struct smu_context *smu) return ret == 0 ? 0 : -EINVAL; } + +int smu_v13_0_enable_uclk_shadow(struct smu_context *smu, bool enable) +{ + return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableUCLKShadow, enable, NULL); +} + +int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu, + struct freq_band_range *exclusion_ranges) +{ + WifiBandEntryTable_t wifi_bands; + int valid_entries = 0; + int ret, i; + + memset(&wifi_bands, 0, sizeof(wifi_bands)); + for (i = 0; i < ARRAY_SIZE(wifi_bands.WifiBandEntry); i++) { + if (!exclusion_ranges[i].start && !exclusion_ranges[i].end) + break; + + /* PMFW expects the inputs to be in Mhz unit */ + wifi_bands.WifiBandEntry[valid_entries].LowFreq = + DIV_ROUND_DOWN_ULL(exclusion_ranges[i].start, HZ_PER_MHZ); + wifi_bands.WifiBandEntry[valid_entries++].HighFreq = + DIV_ROUND_UP_ULL(exclusion_ranges[i].end, HZ_PER_MHZ); + } + wifi_bands.WifiBandEntryNum = valid_entries; + + /* + * Per confirm with PMFW team, WifiBandEntryNum = 0 + * is a valid setting. + * + * Considering the scenarios below: + * - At first the wifi device adds an exclusion range e.g. (2400,2500) to + * BIOS and our driver gets notified. We will set WifiBandEntryNum = 1 + * and pass the WifiBandEntry (2400, 2500) to PMFW. + * + * - Later the wifi device removes the wifiband list added above and + * our driver gets notified again. At this time, driver will set + * WifiBandEntryNum = 0 and pass an empty WifiBandEntry list to PMFW. + * + * - PMFW may still need to do some uclk shadow update(e.g. switching + * from shadow clock back to primary clock) on receiving this. + */ + ret = smu_cmn_update_table(smu, SMU_TABLE_WIFIBAND, 0, &wifi_bands, true); + if (ret) + dev_warn(smu->adev->dev, "Failed to set wifiband!"); + + return ret; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 9ac6c408d2..9c03296f92 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -169,6 +169,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0), MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0), MSG_MAP(DALNotPresent, PPSMC_MSG_DALNotPresent, 0), + MSG_MAP(EnableUCLKShadow, PPSMC_MSG_EnableUCLKShadow, 0), }; static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = { @@ -253,6 +254,7 @@ static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = { TAB_MAP(I2C_COMMANDS), TAB_MAP(ECCINFO), TAB_MAP(OVERDRIVE), + TAB_MAP(WIFIBAND), }; static struct cmn2asic_mapping smu_v13_0_0_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { @@ -498,6 +500,9 @@ static int smu_v13_0_0_tables_init(struct smu_context *smu) PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_WIFIBAND, + sizeof(WifiBandEntryTable_t), PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM); smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL); if (!smu_table->metrics_table) @@ -2544,16 +2549,19 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, workload_mask = 1 << workload_type; - /* Add optimizations for SMU13.0.0. Reuse the power saving profile */ - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE && - (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0)) && - ((smu->adev->pm.fw_version == 0x004e6601) || - (smu->adev->pm.fw_version >= 0x004e7400))) { - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - PP_SMC_POWER_PROFILE_POWERSAVING); - if (workload_type >= 0) - workload_mask |= 1 << workload_type; + /* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */ + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) { + if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) && + ((smu->adev->pm.fw_version == 0x004e6601) || + (smu->adev->pm.fw_version >= 0x004e7300))) || + (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && + smu->adev->pm.fw_version >= 0x00504500)) { + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + PP_SMC_POWER_PROFILE_POWERSAVING); + if (workload_type >= 0) + workload_mask |= 1 << workload_type; + } } return smu_cmn_send_smc_msg_with_param(smu, @@ -2562,38 +2570,6 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, NULL); } -static int smu_v13_0_0_baco_enter(struct smu_context *smu) -{ - struct smu_baco_context *smu_baco = &smu->smu_baco; - struct amdgpu_device *adev = smu->adev; - - if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) - return smu_v13_0_baco_set_armd3_sequence(smu, - (smu_baco->maco_support && amdgpu_runtime_pm != 1) ? - BACO_SEQ_BAMACO : BACO_SEQ_BACO); - else - return smu_v13_0_baco_enter(smu); -} - -static int smu_v13_0_0_baco_exit(struct smu_context *smu) -{ - struct amdgpu_device *adev = smu->adev; - int ret; - - if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { - /* Wait for PMFW handling for the Dstate change */ - usleep_range(10000, 11000); - ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); - } else { - ret = smu_v13_0_baco_exit(smu); - } - - if (!ret) - adev->gfx.is_poweron = false; - - return ret; -} - static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; @@ -2724,7 +2700,6 @@ static int smu_v13_0_0_i2c_control_init(struct smu_context *smu) smu_i2c->port = i; mutex_init(&smu_i2c->mutex); control->owner = THIS_MODULE; - control->class = I2C_CLASS_SPD; control->dev.parent = &adev->pdev->dev; control->algo = &smu_v13_0_0_i2c_algo; snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); @@ -2968,6 +2943,20 @@ static ssize_t smu_v13_0_0_get_ecc_info(struct smu_context *smu, return ret; } +static bool smu_v13_0_0_wbrf_support_check(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { + case IP_VERSION(13, 0, 0): + return smu->smc_fw_version >= 0x004e6300; + case IP_VERSION(13, 0, 10): + return smu->smc_fw_version >= 0x00503300; + default: + return false; + } +} + static int smu_v13_0_0_set_power_limit(struct smu_context *smu, enum smu_ppt_limit_type limit_type, uint32_t limit) @@ -3082,10 +3071,8 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .deep_sleep_control = smu_v13_0_deep_sleep_control, .gfx_ulv_control = smu_v13_0_gfx_ulv_control, .baco_is_support = smu_v13_0_baco_is_support, - .baco_get_state = smu_v13_0_baco_get_state, - .baco_set_state = smu_v13_0_baco_set_state, - .baco_enter = smu_v13_0_0_baco_enter, - .baco_exit = smu_v13_0_0_baco_exit, + .baco_enter = smu_v13_0_baco_enter, + .baco_exit = smu_v13_0_baco_exit, .mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported, .mode1_reset = smu_v13_0_0_mode1_reset, .mode2_reset = smu_v13_0_0_mode2_reset, @@ -3097,6 +3084,9 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .gpo_control = smu_v13_0_gpo_control, .get_ecc_info = smu_v13_0_0_get_ecc_info, .notify_display_change = smu_v13_0_notify_display_change, + .is_asic_wbrf_supported = smu_v13_0_0_wbrf_support_check, + .enable_uclk_shadow = smu_v13_0_enable_uclk_shadow, + .set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges, }; void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index bb98156b2f..949131bd1e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -226,8 +226,18 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en) struct amdgpu_device *adev = smu->adev; int ret = 0; - if (!en && !adev->in_s0ix) + if (!en && !adev->in_s0ix) { + /* Adds a GFX reset as workaround just before sending the + * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering + * an invalid state. + */ + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, + SMU_RESET_MODE_2, NULL); + if (ret) + return ret; + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); + } return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 58f63b7a72..78491b04df 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -120,6 +120,7 @@ struct mca_ras_info { #define P2S_TABLE_ID_A 0x50325341 #define P2S_TABLE_ID_X 0x50325358 +// clang-format off static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), @@ -128,6 +129,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), MSG_MAP(RequestI2cTransaction, PPSMC_MSG_RequestI2cTransaction, 0), MSG_MAP(GetMetricsTable, PPSMC_MSG_GetMetricsTable, 1), + MSG_MAP(GetMetricsVersion, PPSMC_MSG_GetMetricsVersion, 1), MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1), MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1), MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), @@ -158,8 +160,8 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0), MSG_MAP(GetMinGfxclkFrequency, PPSMC_MSG_GetMinGfxDpmFreq, 1), MSG_MAP(GetMaxGfxclkFrequency, PPSMC_MSG_GetMaxGfxDpmFreq, 1), - MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 0), - MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0), + MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 1), + MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0), MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0), MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0), @@ -171,6 +173,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0), }; +// clang-format on static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = { CLK_MAP(SOCCLK, PPCLK_SOCCLK), CLK_MAP(FCLK, PPCLK_FCLK), @@ -432,6 +435,41 @@ static int smu_v13_0_6_get_metrics_table(struct smu_context *smu, return 0; } +static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu, + void *metrics, size_t max_size) +{ + struct smu_table_context *smu_tbl_ctxt = &smu->smu_table; + uint32_t table_version = smu_tbl_ctxt->tables[SMU_TABLE_SMU_METRICS].version; + uint32_t table_size = smu_tbl_ctxt->tables[SMU_TABLE_SMU_METRICS].size; + struct amdgpu_pm_metrics *pm_metrics = metrics; + uint32_t pmfw_version; + int ret; + + if (!pm_metrics || !max_size) + return -EINVAL; + + if (max_size < (table_size + sizeof(pm_metrics->common_header))) + return -EOVERFLOW; + + /* Don't use cached metrics data */ + ret = smu_v13_0_6_get_metrics_table(smu, pm_metrics->data, true); + if (ret) + return ret; + + smu_cmn_get_smc_version(smu, NULL, &pmfw_version); + + memset(&pm_metrics->common_header, 0, + sizeof(pm_metrics->common_header)); + pm_metrics->common_header.mp1_ip_discovery_version = + IP_VERSION(13, 0, 6); + pm_metrics->common_header.pmfw_version = pmfw_version; + pm_metrics->common_header.pmmetrics_version = table_version; + pm_metrics->common_header.structure_size = + sizeof(pm_metrics->common_header) + table_size; + + return pm_metrics->common_header.structure_size; +} + static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; @@ -441,6 +479,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) (struct PPTable_t *)smu_table->driver_pptable; struct amdgpu_device *adev = smu->adev; int ret, i, retry = 100; + uint32_t table_version; /* Store one-time values in driver PPTable */ if (!pptable->Init) { @@ -459,6 +498,13 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) if (!retry) return -ETIME; + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsVersion, + &table_version); + if (ret) + return ret; + smu_table->tables[SMU_TABLE_SMU_METRICS].version = + table_version; + pptable->MaxSocketPowerLimit = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit)); pptable->MaxGfxclkFrequency = @@ -1479,7 +1525,6 @@ static int smu_v13_0_6_mca_set_debug_mode(struct smu_context *smu, bool enable) if (smu->smc_fw_version < 0x554800) return 0; - amdgpu_ras_set_mca_debug_mode(smu->adev, enable); return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ClearMcaOnRead, enable ? 0 : ClearMcaOnRead_UE_FLAG_MASK | ClearMcaOnRead_CE_POLL_MASK, NULL); @@ -1893,7 +1938,6 @@ static int smu_v13_0_6_i2c_control_init(struct smu_context *smu) smu_i2c->port = i; mutex_init(&smu_i2c->mutex); control->owner = THIS_MODULE; - control->class = I2C_CLASS_SPD; control->dev.parent = &adev->pdev->dev; control->algo = &smu_v13_0_6_i2c_algo; snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); @@ -2332,16 +2376,6 @@ static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu, return ret; } -static int smu_v13_0_6_post_init(struct smu_context *smu) -{ - struct amdgpu_device *adev = smu->adev; - - if (!amdgpu_sriov_vf(adev) && adev->ras_enabled) - return smu_v13_0_6_mca_set_debug_mode(smu, false); - - return 0; -} - static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) { struct smu_context *smu = adev->powerplay.pp_handle; @@ -2424,8 +2458,8 @@ static const struct mca_bank_ipid smu_v13_0_6_mca_ipid_table[AMDGPU_MCA_IP_COUNT static void mca_bank_entry_info_decode(struct mca_bank_entry *entry, struct mca_bank_info *info) { - uint64_t ipid = entry->regs[MCA_REG_IDX_IPID]; - uint32_t insthi; + u64 ipid = entry->regs[MCA_REG_IDX_IPID]; + u32 instidhi, instid; /* NOTE: All MCA IPID register share the same format, * so the driver can share the MCMP1 register header file. @@ -2434,9 +2468,15 @@ static void mca_bank_entry_info_decode(struct mca_bank_entry *entry, struct mca_ info->hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID); info->mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType); - insthi = REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi); - info->aid = ((insthi >> 2) & 0x03); - info->socket_id = insthi & 0x03; + /* + * Unfied DieID Format: SAASS. A:AID, S:Socket. + * Unfied DieID[4] = InstanceId[0] + * Unfied DieID[0:3] = InstanceIdHi[0:3] + */ + instidhi = REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi); + instid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo); + info->aid = ((instidhi >> 2) & 0x03); + info->socket_id = ((instid & 0x1) << 2) | (instidhi & 0x03); } static int mca_bank_read_reg(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, @@ -2515,9 +2555,9 @@ static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct return 0; } - if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(status0)) + if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(adev, status0)) *count = 1; - else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(status0)) + else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(adev, status0)) *count = 1; return 0; @@ -2528,13 +2568,15 @@ static int mca_pcs_xgmi_mca_get_err_count(const struct mca_ras_info *mca_ras, st uint32_t *count) { u32 ext_error_code; + u32 err_cnt; ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(entry->regs[MCA_REG_IDX_STATUS]); + err_cnt = MCA_REG__MISC0__ERRCNT(entry->regs[MCA_REG_IDX_MISC0]); if (type == AMDGPU_MCA_ERROR_TYPE_UE && ext_error_code == 0) - *count = 1; + *count = err_cnt; else if (type == AMDGPU_MCA_ERROR_TYPE_CE && ext_error_code == 6) - *count = 1; + *count = err_cnt; return 0; } @@ -2610,6 +2652,7 @@ static bool mca_gfx_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct uint32_t instlo; instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo); + instlo &= GENMASK(31, 1); switch (instlo) { case 0x36430400: /* SMNAID XCD 0 */ case 0x38430400: /* SMNAID XCD 1 */ @@ -2629,6 +2672,7 @@ static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amd uint32_t errcode, instlo; instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo); + instlo &= GENMASK(31, 1); if (instlo != 0x03b30400) return false; @@ -2851,6 +2895,13 @@ static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu, return ret; } +static ssize_t smu_v13_0_6_get_ecc_info(struct smu_context *smu, + void *table) +{ + /* Support ecc info by default */ + return 0; +} + static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { /* init dpm */ .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask, @@ -2895,6 +2946,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .log_thermal_throttling_event = smu_v13_0_6_log_thermal_throttling_event, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .get_gpu_metrics = smu_v13_0_6_get_gpu_metrics, + .get_pm_metrics = smu_v13_0_6_get_pm_metrics, .get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range, .mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported, .mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported, @@ -2904,7 +2956,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .i2c_init = smu_v13_0_6_i2c_control_init, .i2c_fini = smu_v13_0_6_i2c_control_fini, .send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num, - .post_init = smu_v13_0_6_post_init, + .get_ecc_info = smu_v13_0_6_get_ecc_info, }; void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 402e0d184e..7318964f1f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -140,6 +140,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0), MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), + MSG_MAP(EnableUCLKShadow, PPSMC_MSG_EnableUCLKShadow, 0), }; static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = { @@ -222,6 +223,7 @@ static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = { TAB_MAP(ACTIVITY_MONITOR_COEFF), [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE}, TAB_MAP(OVERDRIVE), + TAB_MAP(WIFIBAND), }; static struct cmn2asic_mapping smu_v13_0_7_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { @@ -512,6 +514,9 @@ static int smu_v13_0_7_tables_init(struct smu_context *smu) AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_WIFIBAND, + sizeof(WifiBandEntryTable_t), PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM); smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL); if (!smu_table->metrics_table) @@ -2513,38 +2518,6 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu, return ret; } -static int smu_v13_0_7_baco_enter(struct smu_context *smu) -{ - struct smu_baco_context *smu_baco = &smu->smu_baco; - struct amdgpu_device *adev = smu->adev; - - if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) - return smu_v13_0_baco_set_armd3_sequence(smu, - (smu_baco->maco_support && amdgpu_runtime_pm != 1) ? - BACO_SEQ_BAMACO : BACO_SEQ_BACO); - else - return smu_v13_0_baco_enter(smu); -} - -static int smu_v13_0_7_baco_exit(struct smu_context *smu) -{ - struct amdgpu_device *adev = smu->adev; - int ret; - - if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { - /* Wait for PMFW handling for the Dstate change */ - usleep_range(10000, 11000); - ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); - } else { - ret = smu_v13_0_baco_exit(smu); - } - - if (!ret) - adev->gfx.is_poweron = false; - - return ret; -} - static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; @@ -2565,6 +2538,11 @@ static int smu_v13_0_7_set_df_cstate(struct smu_context *smu, NULL); } +static bool smu_v13_0_7_wbrf_support_check(struct smu_context *smu) +{ + return smu->smc_fw_version > 0x00524600; +} + static int smu_v13_0_7_set_power_limit(struct smu_context *smu, enum smu_ppt_limit_type limit_type, uint32_t limit) @@ -2673,15 +2651,16 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, .baco_is_support = smu_v13_0_baco_is_support, - .baco_get_state = smu_v13_0_baco_get_state, - .baco_set_state = smu_v13_0_baco_set_state, - .baco_enter = smu_v13_0_7_baco_enter, - .baco_exit = smu_v13_0_7_baco_exit, + .baco_enter = smu_v13_0_baco_enter, + .baco_exit = smu_v13_0_baco_exit, .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported, .mode1_reset = smu_v13_0_mode1_reset, .set_mp1_state = smu_v13_0_7_set_mp1_state, .set_df_cstate = smu_v13_0_7_set_df_cstate, .gpo_control = smu_v13_0_gpo_control, + .is_asic_wbrf_supported = smu_v13_0_7_wbrf_support_check, + .enable_uclk_shadow = smu_v13_0_enable_uclk_shadow, + .set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges, }; void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 01f2ab4567..6dae5ad74f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -224,7 +224,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu) if (smu->is_apu) adev->pm.fw_version = smu_version; - switch (adev->ip_versions[MP1_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(14, 0, 2): smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; break; @@ -233,7 +233,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu) break; default: dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n", - adev->ip_versions[MP1_HWIP][0]); + amdgpu_ip_version(adev, MP1_HWIP, 0)); smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_INV; break; } @@ -731,7 +731,7 @@ int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable) int ret = 0; struct amdgpu_device *adev = smu->adev; - switch (adev->ip_versions[MP1_HWIP][0]) { + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(14, 0, 2): case IP_VERSION(14, 0, 0): if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index 24a43374a7..9310c4758e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -1088,6 +1088,25 @@ static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu, 0, NULL); } +static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table) +{ + DpmClocks_t *clk_table = smu->smu_table.clocks_table; + uint8_t idx; + + /* Only the Clock information of SOC and VPE is copied to provide VPE DPM settings for use. */ + for (idx = 0; idx < NUM_SOCCLK_DPM_LEVELS; idx++) { + clock_table->SocClocks[idx].Freq = (idx < clk_table->NumSocClkLevelsEnabled) ? clk_table->SocClocks[idx]:0; + clock_table->SocClocks[idx].Vol = 0; + } + + for (idx = 0; idx < NUM_VPE_DPM_LEVELS; idx++) { + clock_table->VPEClocks[idx].Freq = (idx < clk_table->VpeClkLevelsEnabled) ? clk_table->VPEClocks[idx]:0; + clock_table->VPEClocks[idx].Vol = 0; + } + + return 0; +} + static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .check_fw_status = smu_v14_0_check_fw_status, .check_fw_version = smu_v14_0_check_fw_version, @@ -1118,6 +1137,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu, .dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable, .dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable, + .get_dpm_clock_table = smu_14_0_0_get_dpm_table, }; static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h index 64766ac69c..6f4d212607 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h @@ -98,6 +98,9 @@ #define smu_set_config_table(smu, config_table) smu_ppt_funcs(set_config_table, -EOPNOTSUPP, smu, config_table) #define smu_init_pptable_microcode(smu) smu_ppt_funcs(init_pptable_microcode, 0, smu) #define smu_notify_rlc_state(smu, en) smu_ppt_funcs(notify_rlc_state, 0, smu, en) +#define smu_is_asic_wbrf_supported(smu) smu_ppt_funcs(is_asic_wbrf_supported, false, smu) +#define smu_enable_uclk_shadow(smu, enable) smu_ppt_funcs(enable_uclk_shadow, 0, smu, enable) +#define smu_set_wbrf_exclusion_ranges(smu, freq_band_range) smu_ppt_funcs(set_wbrf_exclusion_ranges, -EOPNOTSUPP, smu, freq_band_range) #endif #endif diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c index dc01c43f61..d72c22dcf6 100644 --- a/drivers/gpu/drm/arm/malidp_crtc.c +++ b/drivers/gpu/drm/arm/malidp_crtc.c @@ -221,7 +221,7 @@ static int malidp_crtc_atomic_check_ctm(struct drm_crtc *crtc, /* * The size of the ctm is checked in - * drm_atomic_replace_property_blob_from_id. + * drm_property_replace_blob_from_id. */ ctm = (struct drm_color_ctm *)state->ctm->data; for (i = 0; i < ARRAY_SIZE(ctm->matrix); ++i) { diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 15dd667aa2..c78687c755 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -7,8 +7,9 @@ #include #include #include -#include +#include #include +#include #include #include @@ -1012,26 +1013,17 @@ armada_lcd_bind(struct device *dev, struct device *master, void *data) int irq = platform_get_irq(pdev, 0); const struct armada_variant *variant; struct device_node *port = NULL; + struct device_node *np, *parent = dev->of_node; if (irq < 0) return irq; - if (!dev->of_node) { - const struct platform_device_id *id; - id = platform_get_device_id(pdev); - if (!id) - return -ENXIO; - - variant = (const struct armada_variant *)id->driver_data; - } else { - const struct of_device_id *match; - struct device_node *np, *parent = dev->of_node; - - match = of_match_device(dev->driver->of_match_table, dev); - if (!match) - return -ENXIO; + variant = device_get_match_data(dev); + if (!variant) + return -ENXIO; + if (parent) { np = of_get_child_by_name(parent, "ports"); if (np) parent = np; @@ -1041,8 +1033,6 @@ armada_lcd_bind(struct device *dev, struct device *master, void *data) dev_err(dev, "no port node found in %pOF\n", parent); return -ENXIO; } - - variant = match->data; } return armada_drm_crtc_create(drm, dev, res, irq, variant, port); @@ -1066,10 +1056,9 @@ static int armada_lcd_probe(struct platform_device *pdev) return component_add(&pdev->dev, &armada_lcd_ops); } -static int armada_lcd_remove(struct platform_device *pdev) +static void armada_lcd_remove(struct platform_device *pdev) { component_del(&pdev->dev, &armada_lcd_ops); - return 0; } static const struct of_device_id armada_lcd_of_match[] = { @@ -1095,7 +1084,7 @@ MODULE_DEVICE_TABLE(platform, armada_lcd_platform_ids); struct platform_driver armada_lcd_platform_driver = { .probe = armada_lcd_probe, - .remove = armada_lcd_remove, + .remove_new = armada_lcd_remove, .driver = { .name = "armada-lcd", .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index fa1c675987..e51ecc4f7e 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -226,10 +226,9 @@ static int armada_drm_probe(struct platform_device *pdev) match); } -static int armada_drm_remove(struct platform_device *pdev) +static void armada_drm_remove(struct platform_device *pdev) { component_master_del(&pdev->dev, &armada_master_ops); - return 0; } static void armada_drm_shutdown(struct platform_device *pdev) @@ -249,7 +248,7 @@ MODULE_DEVICE_TABLE(platform, armada_drm_platform_ids); static struct platform_driver armada_drm_platform_driver = { .probe = armada_drm_probe, - .remove = armada_drm_remove, + .remove_new = armada_drm_remove, .shutdown = armada_drm_shutdown, .driver = { .name = "armada-drm", diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index 78122b35a0..a7a6b70220 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -6,10 +6,10 @@ #include #include #include -#include -#include +#include #include #include +#include #include #include @@ -143,7 +143,6 @@ static int aspeed_gfx_load(struct drm_device *drm) struct aspeed_gfx *priv = to_aspeed_gfx(drm); struct device_node *np = pdev->dev.of_node; const struct aspeed_gfx_config *config; - const struct of_device_id *match; struct resource *res; int ret; @@ -152,10 +151,9 @@ static int aspeed_gfx_load(struct drm_device *drm) if (IS_ERR(priv->base)) return PTR_ERR(priv->base); - match = of_match_device(aspeed_gfx_match, &pdev->dev); - if (!match) + config = device_get_match_data(&pdev->dev); + if (!config) return -EINVAL; - config = match->data; priv->dac_reg = config->dac_reg; priv->int_clr_reg = config->int_clear_reg; diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c index ebb6d8ebd4..1e92594169 100644 --- a/drivers/gpu/drm/ast/ast_dp.c +++ b/drivers/gpu/drm/ast/ast_dp.c @@ -180,6 +180,7 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on) { struct ast_device *ast = to_ast_device(dev); u8 video_on_off = on; + u32 i = 0; // Video On/Off ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on); @@ -192,6 +193,8 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on) ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) { // wait 1 ms mdelay(1); + if (++i > 200) + break; } } } diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index cf5b754f04..90bcb1eb9c 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -89,11 +89,194 @@ static const struct pci_device_id ast_pciidlist[] = { MODULE_DEVICE_TABLE(pci, ast_pciidlist); +static bool ast_is_vga_enabled(void __iomem *ioregs) +{ + u8 vgaer = __ast_read8(ioregs, AST_IO_VGAER); + + return vgaer & AST_IO_VGAER_VGA_ENABLE; +} + +static void ast_enable_vga(void __iomem *ioregs) +{ + __ast_write8(ioregs, AST_IO_VGAER, AST_IO_VGAER_VGA_ENABLE); + __ast_write8(ioregs, AST_IO_VGAMR_W, AST_IO_VGAMR_IOSEL); +} + +/* + * Run this function as part of the HW device cleanup; not + * when the DRM device gets released. + */ +static void ast_enable_mmio_release(void *data) +{ + void __iomem *ioregs = (void __force __iomem *)data; + + /* enable standard VGA decode */ + __ast_write8_i(ioregs, AST_IO_VGACRI, 0xa1, AST_IO_VGACRA1_MMIO_ENABLED); +} + +static int ast_enable_mmio(struct device *dev, void __iomem *ioregs) +{ + void *data = (void __force *)ioregs; + + __ast_write8_i(ioregs, AST_IO_VGACRI, 0xa1, + AST_IO_VGACRA1_MMIO_ENABLED | + AST_IO_VGACRA1_VGAIO_DISABLED); + + return devm_add_action_or_reset(dev, ast_enable_mmio_release, data); +} + +static void ast_open_key(void __iomem *ioregs) +{ + __ast_write8_i(ioregs, AST_IO_VGACRI, 0x80, AST_IO_VGACR80_PASSWORD); +} + +static int ast_detect_chip(struct pci_dev *pdev, + void __iomem *regs, void __iomem *ioregs, + enum ast_chip *chip_out, + enum ast_config_mode *config_mode_out) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + enum ast_config_mode config_mode = ast_use_defaults; + uint32_t scu_rev = 0xffffffff; + enum ast_chip chip; + u32 data; + u8 vgacrd0, vgacrd1; + + /* + * Find configuration mode and read SCU revision + */ + + /* Check if we have device-tree properties */ + if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", &data)) { + /* We do, disable P2A access */ + config_mode = ast_use_dt; + scu_rev = data; + } else if (pdev->device == PCI_CHIP_AST2000) { // Not all families have a P2A bridge + /* + * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge + * is disabled. We force using P2A if VGA only mode bit + * is set D[7] + */ + vgacrd0 = __ast_read8_i(ioregs, AST_IO_VGACRI, 0xd0); + vgacrd1 = __ast_read8_i(ioregs, AST_IO_VGACRI, 0xd1); + if (!(vgacrd0 & 0x80) || !(vgacrd1 & 0x10)) { + + /* + * We have a P2A bridge and it is enabled. + */ + + /* Patch AST2500/AST2510 */ + if ((pdev->revision & 0xf0) == 0x40) { + if (!(vgacrd0 & AST_VRAM_INIT_STATUS_MASK)) + ast_patch_ahb_2500(regs); + } + + /* Double check that it's actually working */ + data = __ast_read32(regs, 0xf004); + if ((data != 0xffffffff) && (data != 0x00)) { + config_mode = ast_use_p2a; + + /* Read SCU7c (silicon revision register) */ + __ast_write32(regs, 0xf004, 0x1e6e0000); + __ast_write32(regs, 0xf000, 0x1); + scu_rev = __ast_read32(regs, 0x1207c); + } + } + } + + switch (config_mode) { + case ast_use_defaults: + dev_info(dev, "Using default configuration\n"); + break; + case ast_use_dt: + dev_info(dev, "Using device-tree for configuration\n"); + break; + case ast_use_p2a: + dev_info(dev, "Using P2A bridge for configuration\n"); + break; + } + + /* + * Identify chipset + */ + + if (pdev->revision >= 0x50) { + chip = AST2600; + dev_info(dev, "AST 2600 detected\n"); + } else if (pdev->revision >= 0x40) { + switch (scu_rev & 0x300) { + case 0x0100: + chip = AST2510; + dev_info(dev, "AST 2510 detected\n"); + break; + default: + chip = AST2500; + dev_info(dev, "AST 2500 detected\n"); + break; + } + } else if (pdev->revision >= 0x30) { + switch (scu_rev & 0x300) { + case 0x0100: + chip = AST1400; + dev_info(dev, "AST 1400 detected\n"); + break; + default: + chip = AST2400; + dev_info(dev, "AST 2400 detected\n"); + break; + } + } else if (pdev->revision >= 0x20) { + switch (scu_rev & 0x300) { + case 0x0000: + chip = AST1300; + dev_info(dev, "AST 1300 detected\n"); + break; + default: + chip = AST2300; + dev_info(dev, "AST 2300 detected\n"); + break; + } + } else if (pdev->revision >= 0x10) { + switch (scu_rev & 0x0300) { + case 0x0200: + chip = AST1100; + dev_info(dev, "AST 1100 detected\n"); + break; + case 0x0100: + chip = AST2200; + dev_info(dev, "AST 2200 detected\n"); + break; + case 0x0000: + chip = AST2150; + dev_info(dev, "AST 2150 detected\n"); + break; + default: + chip = AST2100; + dev_info(dev, "AST 2100 detected\n"); + break; + } + } else { + chip = AST2000; + dev_info(dev, "AST 2000 detected\n"); + } + + *chip_out = chip; + *config_mode_out = config_mode; + + return 0; +} + static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - struct ast_device *ast; - struct drm_device *dev; + struct device *dev = &pdev->dev; int ret; + void __iomem *regs; + void __iomem *ioregs; + enum ast_config_mode config_mode; + enum ast_chip chip; + struct drm_device *drm; + bool need_post = false; ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &ast_driver); if (ret) @@ -103,16 +286,80 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) return ret; - ast = ast_device_create(&ast_driver, pdev, ent->driver_data); - if (IS_ERR(ast)) - return PTR_ERR(ast); - dev = &ast->base; + regs = pcim_iomap(pdev, 1, 0); + if (!regs) + return -EIO; + + if (pdev->revision >= 0x40) { + /* + * On AST2500 and later models, MMIO is enabled by + * default. Adopt it to be compatible with ARM. + */ + resource_size_t len = pci_resource_len(pdev, 1); + + if (len < AST_IO_MM_OFFSET) + return -EIO; + if ((len - AST_IO_MM_OFFSET) < AST_IO_MM_LENGTH) + return -EIO; + ioregs = regs + AST_IO_MM_OFFSET; + } else if (pci_resource_flags(pdev, 2) & IORESOURCE_IO) { + /* + * Map I/O registers if we have a PCI BAR for I/O. + */ + resource_size_t len = pci_resource_len(pdev, 2); + + if (len < AST_IO_MM_LENGTH) + return -EIO; + ioregs = pcim_iomap(pdev, 2, 0); + if (!ioregs) + return -EIO; + } else { + /* + * Anything else is best effort. + */ + resource_size_t len = pci_resource_len(pdev, 1); + + if (len < AST_IO_MM_OFFSET) + return -EIO; + if ((len - AST_IO_MM_OFFSET) < AST_IO_MM_LENGTH) + return -EIO; + ioregs = regs + AST_IO_MM_OFFSET; + + dev_info(dev, "Platform has no I/O space, using MMIO\n"); + } + + if (!ast_is_vga_enabled(ioregs)) { + dev_info(dev, "VGA not enabled on entry, requesting chip POST\n"); + need_post = true; + } + + /* + * If VGA isn't enabled, we need to enable now or subsequent + * access to the scratch registers will fail. + */ + if (need_post) + ast_enable_vga(ioregs); + /* Enable extended register access */ + ast_open_key(ioregs); + + ret = ast_enable_mmio(dev, ioregs); + if (ret) + return ret; + + ret = ast_detect_chip(pdev, regs, ioregs, &chip, &config_mode); + if (ret) + return ret; + + drm = ast_device_create(pdev, &ast_driver, chip, config_mode, regs, ioregs, need_post); + if (IS_ERR(drm)) + return PTR_ERR(drm); + pci_set_drvdata(pdev, drm); - ret = drm_dev_register(dev, ent->driver_data); + ret = drm_dev_register(drm, ent->driver_data); if (ret) return ret; - drm_fbdev_generic_setup(dev, 32); + drm_fbdev_generic_setup(drm, 32); return 0; } diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 772f3b049c..3be5ccf1f5 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -98,6 +98,12 @@ enum ast_tx_chip { #define AST_TX_DP501_BIT BIT(AST_TX_DP501) #define AST_TX_ASTDP_BIT BIT(AST_TX_ASTDP) +enum ast_config_mode { + ast_use_p2a, + ast_use_dt, + ast_use_defaults +}; + #define AST_DRAM_512Mx16 0 #define AST_DRAM_1Gx16 1 #define AST_DRAM_512Mx32 2 @@ -192,12 +198,13 @@ to_ast_bmc_connector(struct drm_connector *connector) struct ast_device { struct drm_device base; - struct mutex ioregs_lock; /* Protects access to I/O registers in ioregs */ void __iomem *regs; void __iomem *ioregs; void __iomem *dp501_fw_buf; + enum ast_config_mode config_mode; enum ast_chip chip; + uint32_t dram_bus_width; uint32_t dram_type; uint32_t mclk; @@ -207,6 +214,8 @@ struct ast_device { unsigned long vram_size; unsigned long vram_fb_available; + struct mutex modeset_lock; /* Protects access to modeset I/O registers in ioregs */ + struct ast_plane primary_plane; struct ast_plane cursor_plane; struct drm_crtc crtc; @@ -234,11 +243,6 @@ struct ast_device { } output; bool support_wide_screen; - enum { - ast_use_p2a, - ast_use_dt, - ast_use_defaults - } config_mode; unsigned long tx_chip_types; /* bitfield of enum ast_chip_type */ u8 *dp501_fw_addr; @@ -250,9 +254,13 @@ static inline struct ast_device *to_ast_device(struct drm_device *dev) return container_of(dev, struct ast_device, base); } -struct ast_device *ast_device_create(const struct drm_driver *drv, - struct pci_dev *pdev, - unsigned long flags); +struct drm_device *ast_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); static inline unsigned long __ast_gen(struct ast_device *ast) { @@ -272,55 +280,94 @@ static inline bool __ast_gen_is_eq(struct ast_device *ast, unsigned long gen) #define IS_AST_GEN6(__ast) __ast_gen_is_eq(__ast, 6) #define IS_AST_GEN7(__ast) __ast_gen_is_eq(__ast, 7) +static inline u8 __ast_read8(const void __iomem *addr, u32 reg) +{ + return ioread8(addr + reg); +} + +static inline u32 __ast_read32(const void __iomem *addr, u32 reg) +{ + return ioread32(addr + reg); +} + +static inline void __ast_write8(void __iomem *addr, u32 reg, u8 val) +{ + iowrite8(val, addr + reg); +} + +static inline void __ast_write32(void __iomem *addr, u32 reg, u32 val) +{ + iowrite32(val, addr + reg); +} + +static inline u8 __ast_read8_i(void __iomem *addr, u32 reg, u8 index) +{ + __ast_write8(addr, reg, index); + return __ast_read8(addr, reg + 1); +} + +static inline u8 __ast_read8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask) +{ + u8 val = __ast_read8_i(addr, reg, index); + + return val & read_mask; +} + +static inline void __ast_write8_i(void __iomem *addr, u32 reg, u8 index, u8 val) +{ + __ast_write8(addr, reg, index); + __ast_write8(addr, reg + 1, val); +} + +static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask, + u8 val) +{ + u8 tmp = __ast_read8_i_masked(addr, reg, index, read_mask); + + tmp |= val; + __ast_write8_i(addr, reg, index, tmp); +} + static inline u32 ast_read32(struct ast_device *ast, u32 reg) { - return ioread32(ast->regs + reg); + return __ast_read32(ast->regs, reg); } static inline void ast_write32(struct ast_device *ast, u32 reg, u32 val) { - iowrite32(val, ast->regs + reg); + __ast_write32(ast->regs, reg, val); } static inline u8 ast_io_read8(struct ast_device *ast, u32 reg) { - return ioread8(ast->ioregs + reg); + return __ast_read8(ast->ioregs, reg); } static inline void ast_io_write8(struct ast_device *ast, u32 reg, u8 val) { - iowrite8(val, ast->ioregs + reg); + __ast_write8(ast->ioregs, reg, val); } static inline u8 ast_get_index_reg(struct ast_device *ast, u32 base, u8 index) { - ast_io_write8(ast, base, index); - ++base; - return ast_io_read8(ast, base); + return __ast_read8_i(ast->ioregs, base, index); } static inline u8 ast_get_index_reg_mask(struct ast_device *ast, u32 base, u8 index, u8 preserve_mask) { - u8 val = ast_get_index_reg(ast, base, index); - - return val & preserve_mask; + return __ast_read8_i_masked(ast->ioregs, base, index, preserve_mask); } static inline void ast_set_index_reg(struct ast_device *ast, u32 base, u8 index, u8 val) { - ast_io_write8(ast, base, index); - ++base; - ast_io_write8(ast, base, val); + __ast_write8_i(ast->ioregs, base, index, val); } static inline void ast_set_index_reg_mask(struct ast_device *ast, u32 base, u8 index, u8 preserve_mask, u8 val) { - u8 tmp = ast_get_index_reg_mask(ast, base, index, preserve_mask); - - tmp |= val; - ast_set_index_reg(ast, base, index, tmp); + __ast_write8_i_masked(ast->ioregs, base, index, preserve_mask, val); } #define AST_VIDMEM_SIZE_8M 0x00800000 @@ -442,7 +489,7 @@ int ast_mm_init(struct ast_device *ast); void ast_post_gpu(struct drm_device *dev); u32 ast_mindwm(struct ast_device *ast, u32 r); void ast_moutdwm(struct ast_device *ast, u32 r, u32 v); -void ast_patch_ahb_2500(struct ast_device *ast); +void ast_patch_ahb_2500(void __iomem *regs); /* ast dp501 */ void ast_set_dp501_video_output(struct drm_device *dev, u8 mode); bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size); diff --git a/drivers/gpu/drm/ast/ast_i2c.c b/drivers/gpu/drm/ast/ast_i2c.c index 0e845e7acd..e5d3f7121d 100644 --- a/drivers/gpu/drm/ast/ast_i2c.c +++ b/drivers/gpu/drm/ast/ast_i2c.c @@ -120,7 +120,6 @@ struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev) return NULL; i2c->adapter.owner = THIS_MODULE; - i2c->adapter.class = I2C_CLASS_DDC; i2c->adapter.dev.parent = dev->dev; i2c->dev = dev; i2c_set_adapdata(&i2c->adapter, i2c); diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index f4ab40e22c..2f3ad5f949 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -35,180 +35,6 @@ #include "ast_drv.h" -static bool ast_is_vga_enabled(struct drm_device *dev) -{ - struct ast_device *ast = to_ast_device(dev); - u8 ch; - - ch = ast_io_read8(ast, AST_IO_VGAER); - - return !!(ch & 0x01); -} - -static void ast_enable_vga(struct drm_device *dev) -{ - struct ast_device *ast = to_ast_device(dev); - - ast_io_write8(ast, AST_IO_VGAER, 0x01); - ast_io_write8(ast, AST_IO_VGAMR_W, 0x01); -} - -/* - * Run this function as part of the HW device cleanup; not - * when the DRM device gets released. - */ -static void ast_enable_mmio_release(void *data) -{ - struct ast_device *ast = data; - - /* enable standard VGA decode */ - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa1, 0x04); -} - -static int ast_enable_mmio(struct ast_device *ast) -{ - struct drm_device *dev = &ast->base; - - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa1, 0x06); - - return devm_add_action_or_reset(dev->dev, ast_enable_mmio_release, ast); -} - -static void ast_open_key(struct ast_device *ast) -{ - ast_set_index_reg(ast, AST_IO_VGACRI, 0x80, 0xA8); -} - -static int ast_device_config_init(struct ast_device *ast) -{ - struct drm_device *dev = &ast->base; - struct pci_dev *pdev = to_pci_dev(dev->dev); - struct device_node *np = dev->dev->of_node; - uint32_t scu_rev = 0xffffffff; - u32 data; - u8 jregd0, jregd1; - - /* - * Find configuration mode and read SCU revision - */ - - ast->config_mode = ast_use_defaults; - - /* Check if we have device-tree properties */ - if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", &data)) { - /* We do, disable P2A access */ - ast->config_mode = ast_use_dt; - scu_rev = data; - } else if (pdev->device == PCI_CHIP_AST2000) { // Not all families have a P2A bridge - /* - * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge - * is disabled. We force using P2A if VGA only mode bit - * is set D[7] - */ - jregd0 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); - jregd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, 0xff); - if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) { - - /* - * We have a P2A bridge and it is enabled. - */ - - /* Patch AST2500/AST2510 */ - if ((pdev->revision & 0xf0) == 0x40) { - if (!(jregd0 & AST_VRAM_INIT_STATUS_MASK)) - ast_patch_ahb_2500(ast); - } - - /* Double check that it's actually working */ - data = ast_read32(ast, 0xf004); - if ((data != 0xffffffff) && (data != 0x00)) { - ast->config_mode = ast_use_p2a; - - /* Read SCU7c (silicon revision register) */ - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - scu_rev = ast_read32(ast, 0x1207c); - } - } - } - - switch (ast->config_mode) { - case ast_use_defaults: - drm_info(dev, "Using default configuration\n"); - break; - case ast_use_dt: - drm_info(dev, "Using device-tree for configuration\n"); - break; - case ast_use_p2a: - drm_info(dev, "Using P2A bridge for configuration\n"); - break; - } - - /* - * Identify chipset - */ - - if (pdev->revision >= 0x50) { - ast->chip = AST2600; - drm_info(dev, "AST 2600 detected\n"); - } else if (pdev->revision >= 0x40) { - switch (scu_rev & 0x300) { - case 0x0100: - ast->chip = AST2510; - drm_info(dev, "AST 2510 detected\n"); - break; - default: - ast->chip = AST2500; - drm_info(dev, "AST 2500 detected\n"); - } - } else if (pdev->revision >= 0x30) { - switch (scu_rev & 0x300) { - case 0x0100: - ast->chip = AST1400; - drm_info(dev, "AST 1400 detected\n"); - break; - default: - ast->chip = AST2400; - drm_info(dev, "AST 2400 detected\n"); - } - } else if (pdev->revision >= 0x20) { - switch (scu_rev & 0x300) { - case 0x0000: - ast->chip = AST1300; - drm_info(dev, "AST 1300 detected\n"); - break; - default: - ast->chip = AST2300; - drm_info(dev, "AST 2300 detected\n"); - break; - } - } else if (pdev->revision >= 0x10) { - switch (scu_rev & 0x0300) { - case 0x0200: - ast->chip = AST1100; - drm_info(dev, "AST 1100 detected\n"); - break; - case 0x0100: - ast->chip = AST2200; - drm_info(dev, "AST 2200 detected\n"); - break; - case 0x0000: - ast->chip = AST2150; - drm_info(dev, "AST 2150 detected\n"); - break; - default: - ast->chip = AST2100; - drm_info(dev, "AST 2100 detected\n"); - break; - } - } else { - ast->chip = AST2000; - drm_info(dev, "AST 2000 detected\n"); - } - - return 0; -} - static void ast_detect_widescreen(struct ast_device *ast) { u8 jreg; @@ -424,69 +250,27 @@ static int ast_get_dram_info(struct drm_device *dev) return 0; } -struct ast_device *ast_device_create(const struct drm_driver *drv, - struct pci_dev *pdev, - unsigned long flags) +struct drm_device *ast_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) { struct drm_device *dev; struct ast_device *ast; - bool need_post = false; - int ret = 0; + int ret; ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); if (IS_ERR(ast)) - return ast; + return ERR_CAST(ast); dev = &ast->base; - pci_set_drvdata(pdev, dev); - - ret = drmm_mutex_init(dev, &ast->ioregs_lock); - if (ret) - return ERR_PTR(ret); - - ast->regs = pcim_iomap(pdev, 1, 0); - if (!ast->regs) - return ERR_PTR(-EIO); - - /* - * After AST2500, MMIO is enabled by default, and it should be adopted - * to be compatible with Arm. - */ - if (pdev->revision >= 0x40) { - ast->ioregs = ast->regs + AST_IO_MM_OFFSET; - } else if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) { - drm_info(dev, "platform has no IO space, trying MMIO\n"); - ast->ioregs = ast->regs + AST_IO_MM_OFFSET; - } - - /* "map" IO regs if the above hasn't done so already */ - if (!ast->ioregs) { - ast->ioregs = pcim_iomap(pdev, 2, 0); - if (!ast->ioregs) - return ERR_PTR(-EIO); - } - - if (!ast_is_vga_enabled(dev)) { - drm_info(dev, "VGA not enabled on entry, requesting chip POST\n"); - need_post = true; - } - - /* - * If VGA isn't enabled, we need to enable now or subsequent - * access to the scratch registers will fail. - */ - if (need_post) - ast_enable_vga(dev); - - /* Enable extended register access */ - ast_open_key(ast); - ret = ast_enable_mmio(ast); - if (ret) - return ERR_PTR(ret); - - ret = ast_device_config_init(ast); - if (ret) - return ERR_PTR(ret); + ast->chip = chip; + ast->config_mode = config_mode; + ast->regs = regs; + ast->ioregs = ioregs; ast_detect_widescreen(ast); ast_detect_tx_chip(ast, need_post); @@ -517,5 +301,5 @@ struct ast_device *ast_device_create(const struct drm_driver *drv, if (ret) return ERR_PTR(ret); - return ast; + return dev; } diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index c20534d0ef..a718646a66 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -1358,13 +1358,13 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector) * Protect access to I/O registers from concurrent modesetting * by acquiring the I/O-register lock. */ - mutex_lock(&ast->ioregs_lock); + mutex_lock(&ast->modeset_lock); edid = drm_get_edid(connector, &ast_vga_connector->i2c->adapter); if (!edid) goto err_mutex_unlock; - mutex_unlock(&ast->ioregs_lock); + mutex_unlock(&ast->modeset_lock); count = drm_add_edid_modes(connector, edid); kfree(edid); @@ -1372,7 +1372,7 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector) return count; err_mutex_unlock: - mutex_unlock(&ast->ioregs_lock); + mutex_unlock(&ast->modeset_lock); err_drm_connector_update_edid_property: drm_connector_update_edid_property(connector, NULL); return 0; @@ -1464,13 +1464,13 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector * Protect access to I/O registers from concurrent modesetting * by acquiring the I/O-register lock. */ - mutex_lock(&ast->ioregs_lock); + mutex_lock(&ast->modeset_lock); edid = drm_get_edid(connector, &ast_sil164_connector->i2c->adapter); if (!edid) goto err_mutex_unlock; - mutex_unlock(&ast->ioregs_lock); + mutex_unlock(&ast->modeset_lock); count = drm_add_edid_modes(connector, edid); kfree(edid); @@ -1478,7 +1478,7 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector return count; err_mutex_unlock: - mutex_unlock(&ast->ioregs_lock); + mutex_unlock(&ast->modeset_lock); err_drm_connector_update_edid_property: drm_connector_update_edid_property(connector, NULL); return 0; @@ -1670,13 +1670,13 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector) * Protect access to I/O registers from concurrent modesetting * by acquiring the I/O-register lock. */ - mutex_lock(&ast->ioregs_lock); + mutex_lock(&ast->modeset_lock); succ = ast_astdp_read_edid(connector->dev, edid); if (succ < 0) goto err_mutex_unlock; - mutex_unlock(&ast->ioregs_lock); + mutex_unlock(&ast->modeset_lock); drm_connector_update_edid_property(connector, edid); count = drm_add_edid_modes(connector, edid); @@ -1685,7 +1685,7 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector) return count; err_mutex_unlock: - mutex_unlock(&ast->ioregs_lock); + mutex_unlock(&ast->modeset_lock); kfree(edid); err_drm_connector_update_edid_property: drm_connector_update_edid_property(connector, NULL); @@ -1870,9 +1870,9 @@ static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *s * display modes. Protect access to I/O registers by acquiring * the I/O-register lock. Released in atomic_flush(). */ - mutex_lock(&ast->ioregs_lock); + mutex_lock(&ast->modeset_lock); drm_atomic_helper_commit_tail_rpm(state); - mutex_unlock(&ast->ioregs_lock); + mutex_unlock(&ast->modeset_lock); } static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs = { @@ -1910,6 +1910,10 @@ int ast_mode_config_init(struct ast_device *ast) struct drm_connector *physical_connector = NULL; int ret; + ret = drmm_mutex_init(dev, &ast->modeset_lock); + if (ret) + return ret; + ret = drmm_mode_config_init(dev); if (ret) return ret; diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 7a993a3843..22f548805d 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -77,28 +77,42 @@ ast_set_def_ext_reg(struct drm_device *dev) ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg); } -u32 ast_mindwm(struct ast_device *ast, u32 r) +static u32 __ast_mindwm(void __iomem *regs, u32 r) { - uint32_t data; + u32 data; - ast_write32(ast, 0xf004, r & 0xffff0000); - ast_write32(ast, 0xf000, 0x1); + __ast_write32(regs, 0xf004, r & 0xffff0000); + __ast_write32(regs, 0xf000, 0x1); do { - data = ast_read32(ast, 0xf004) & 0xffff0000; + data = __ast_read32(regs, 0xf004) & 0xffff0000; } while (data != (r & 0xffff0000)); - return ast_read32(ast, 0x10000 + (r & 0x0000ffff)); + + return __ast_read32(regs, 0x10000 + (r & 0x0000ffff)); } -void ast_moutdwm(struct ast_device *ast, u32 r, u32 v) +static void __ast_moutdwm(void __iomem *regs, u32 r, u32 v) { - uint32_t data; - ast_write32(ast, 0xf004, r & 0xffff0000); - ast_write32(ast, 0xf000, 0x1); + u32 data; + + __ast_write32(regs, 0xf004, r & 0xffff0000); + __ast_write32(regs, 0xf000, 0x1); + do { - data = ast_read32(ast, 0xf004) & 0xffff0000; + data = __ast_read32(regs, 0xf004) & 0xffff0000; } while (data != (r & 0xffff0000)); - ast_write32(ast, 0x10000 + (r & 0x0000ffff), v); + + __ast_write32(regs, 0x10000 + (r & 0x0000ffff), v); +} + +u32 ast_mindwm(struct ast_device *ast, u32 r) +{ + return __ast_mindwm(ast->regs, r); +} + +void ast_moutdwm(struct ast_device *ast, u32 r, u32 v) +{ + __ast_moutdwm(ast->regs, r, v); } /* @@ -1987,17 +2001,18 @@ static bool ast_dram_init_2500(struct ast_device *ast) return true; } -void ast_patch_ahb_2500(struct ast_device *ast) +void ast_patch_ahb_2500(void __iomem *regs) { - u32 data; + u32 data; /* Clear bus lock condition */ - ast_moutdwm(ast, 0x1e600000, 0xAEED1A03); - ast_moutdwm(ast, 0x1e600084, 0x00010000); - ast_moutdwm(ast, 0x1e600088, 0x00000000); - ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8); - data = ast_mindwm(ast, 0x1e6e2070); - if (data & 0x08000000) { /* check fast reset */ + __ast_moutdwm(regs, 0x1e600000, 0xAEED1A03); + __ast_moutdwm(regs, 0x1e600084, 0x00010000); + __ast_moutdwm(regs, 0x1e600088, 0x00000000); + __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8); + + data = __ast_mindwm(regs, 0x1e6e2070); + if (data & 0x08000000) { /* check fast reset */ /* * If "Fast restet" is enabled for ARM-ICE debugger, * then WDT needs to enable, that @@ -2009,16 +2024,18 @@ void ast_patch_ahb_2500(struct ast_device *ast) * [1]:= 1:WDT will be cleeared and disabled after timeout occurs * [0]:= 1:WDT enable */ - ast_moutdwm(ast, 0x1E785004, 0x00000010); - ast_moutdwm(ast, 0x1E785008, 0x00004755); - ast_moutdwm(ast, 0x1E78500c, 0x00000033); + __ast_moutdwm(regs, 0x1E785004, 0x00000010); + __ast_moutdwm(regs, 0x1E785008, 0x00004755); + __ast_moutdwm(regs, 0x1E78500c, 0x00000033); udelay(1000); } + do { - ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8); - data = ast_mindwm(ast, 0x1e6e2000); - } while (data != 1); - ast_moutdwm(ast, 0x1e6e207c, 0x08000000); /* clear fast reset */ + __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8); + data = __ast_mindwm(regs, 0x1e6e2000); + } while (data != 1); + + __ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */ } void ast_post_chip_2500(struct drm_device *dev) @@ -2030,7 +2047,7 @@ void ast_post_chip_2500(struct drm_device *dev) reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); if ((reg & AST_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */ /* Clear bus lock condition */ - ast_patch_ahb_2500(ast); + ast_patch_ahb_2500(ast->regs); /* Disable watchdog */ ast_moutdwm(ast, 0x1E78502C, 0x00000000); diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h index 555286ecf5..62dddbf3fe 100644 --- a/drivers/gpu/drm/ast/ast_reg.h +++ b/drivers/gpu/drm/ast/ast_reg.h @@ -10,10 +10,17 @@ */ #define AST_IO_MM_OFFSET (0x380) +#define AST_IO_MM_LENGTH (128) #define AST_IO_VGAARI_W (0x40) + #define AST_IO_VGAMR_W (0x42) +#define AST_IO_VGAMR_R (0x4c) +#define AST_IO_VGAMR_IOSEL BIT(0) + #define AST_IO_VGAER (0x43) +#define AST_IO_VGAER_VGA_ENABLE BIT(0) + #define AST_IO_VGASRI (0x44) #define AST_IO_VGADRR (0x47) #define AST_IO_VGADWR (0x48) @@ -21,14 +28,15 @@ #define AST_IO_VGAGRI (0x4E) #define AST_IO_VGACRI (0x54) +#define AST_IO_VGACR80_PASSWORD (0xa8) +#define AST_IO_VGACRA1_VGAIO_DISABLED BIT(1) +#define AST_IO_VGACRA1_MMIO_ENABLED BIT(2) #define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */ #define AST_IO_VGACRCB_HWC_ENABLED BIT(1) #define AST_IO_VGAIR1_R (0x5A) #define AST_IO_VGAIR1_VREFRESH BIT(3) -#define AST_IO_VGAMR_R (0x4C) - /* * Display Transmitter Type */ diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 3e6a4e2044..efd996f6c1 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -12,6 +12,23 @@ config DRM_PANEL_BRIDGE help DRM bridge wrapper of DRM panels +config DRM_AUX_BRIDGE + tristate + depends on DRM_BRIDGE && OF + select AUXILIARY_BUS + select DRM_PANEL_BRIDGE + help + Simple transparent bridge that is used by several non-DRM drivers to + build bridges chain. + +config DRM_AUX_HPD_BRIDGE + tristate + depends on DRM_BRIDGE && OF + select AUXILIARY_BUS + help + Simple bridge that terminates the bridge chain and provides HPD + support. + menu "Display Interface Bridges" depends on DRM && DRM_BRIDGE diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 2b892b7ed5..017b583273 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -1,4 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_DRM_AUX_BRIDGE) += aux-bridge.o +obj-$(CONFIG_DRM_AUX_HPD_BRIDGE) += aux-hpd-bridge.o obj-$(CONFIG_DRM_CHIPONE_ICN6211) += chipone-icn6211.o obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c new file mode 100644 index 0000000000..b29980f953 --- /dev/null +++ b/drivers/gpu/drm/bridge/aux-bridge.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2023 Linaro Ltd. + * + * Author: Dmitry Baryshkov + */ +#include +#include +#include + +#include +#include + +static DEFINE_IDA(drm_aux_bridge_ida); + +static void drm_aux_bridge_release(struct device *dev) +{ + struct auxiliary_device *adev = to_auxiliary_dev(dev); + + ida_free(&drm_aux_bridge_ida, adev->id); + + kfree(adev); +} + +static void drm_aux_bridge_unregister_adev(void *_adev) +{ + struct auxiliary_device *adev = _adev; + + auxiliary_device_delete(adev); + auxiliary_device_uninit(adev); +} + +/** + * drm_aux_bridge_register - Create a simple bridge device to link the chain + * @parent: device instance providing this bridge + * + * Creates a simple DRM bridge that doesn't implement any drm_bridge + * operations. Such bridges merely fill a place in the bridge chain linking + * surrounding DRM bridges. + * + * Return: zero on success, negative error code on failure + */ +int drm_aux_bridge_register(struct device *parent) +{ + struct auxiliary_device *adev; + int ret; + + adev = kzalloc(sizeof(*adev), GFP_KERNEL); + if (!adev) + return -ENOMEM; + + ret = ida_alloc(&drm_aux_bridge_ida, GFP_KERNEL); + if (ret < 0) { + kfree(adev); + return ret; + } + + adev->id = ret; + adev->name = "aux_bridge"; + adev->dev.parent = parent; + adev->dev.of_node = of_node_get(parent->of_node); + adev->dev.release = drm_aux_bridge_release; + + ret = auxiliary_device_init(adev); + if (ret) { + ida_free(&drm_aux_bridge_ida, adev->id); + kfree(adev); + return ret; + } + + ret = auxiliary_device_add(adev); + if (ret) { + auxiliary_device_uninit(adev); + return ret; + } + + return devm_add_action_or_reset(parent, drm_aux_bridge_unregister_adev, adev); +} +EXPORT_SYMBOL_GPL(drm_aux_bridge_register); + +struct drm_aux_bridge_data { + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + struct device *dev; +}; + +static int drm_aux_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct drm_aux_bridge_data *data; + + if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) + return -EINVAL; + + data = container_of(bridge, struct drm_aux_bridge_data, bridge); + + return drm_bridge_attach(bridge->encoder, data->next_bridge, bridge, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); +} + +static const struct drm_bridge_funcs drm_aux_bridge_funcs = { + .attach = drm_aux_bridge_attach, +}; + +static int drm_aux_bridge_probe(struct auxiliary_device *auxdev, + const struct auxiliary_device_id *id) +{ + struct drm_aux_bridge_data *data; + + data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->dev = &auxdev->dev; + data->next_bridge = devm_drm_of_get_bridge(&auxdev->dev, auxdev->dev.of_node, 0, 0); + if (IS_ERR(data->next_bridge)) + return dev_err_probe(&auxdev->dev, PTR_ERR(data->next_bridge), + "failed to acquire drm_bridge\n"); + + data->bridge.funcs = &drm_aux_bridge_funcs; + data->bridge.of_node = data->dev->of_node; + + return devm_drm_bridge_add(data->dev, &data->bridge); +} + +static const struct auxiliary_device_id drm_aux_bridge_table[] = { + { .name = KBUILD_MODNAME ".aux_bridge" }, + {}, +}; +MODULE_DEVICE_TABLE(auxiliary, drm_aux_bridge_table); + +static struct auxiliary_driver drm_aux_bridge_drv = { + .name = "aux_bridge", + .id_table = drm_aux_bridge_table, + .probe = drm_aux_bridge_probe, +}; +module_auxiliary_driver(drm_aux_bridge_drv); + +MODULE_AUTHOR("Dmitry Baryshkov "); +MODULE_DESCRIPTION("DRM transparent bridge"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c new file mode 100644 index 0000000000..6886db2d9e --- /dev/null +++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2023 Linaro Ltd. + * + * Author: Dmitry Baryshkov + */ +#include +#include +#include + +#include +#include + +static DEFINE_IDA(drm_aux_hpd_bridge_ida); + +struct drm_aux_hpd_bridge_data { + struct drm_bridge bridge; + struct device *dev; +}; + +static void drm_aux_hpd_bridge_release(struct device *dev) +{ + struct auxiliary_device *adev = to_auxiliary_dev(dev); + + ida_free(&drm_aux_hpd_bridge_ida, adev->id); + + of_node_put(adev->dev.platform_data); + of_node_put(adev->dev.of_node); + + kfree(adev); +} + +static void drm_aux_hpd_bridge_free_adev(void *_adev) +{ + auxiliary_device_uninit(_adev); +} + +/** + * devm_drm_dp_hpd_bridge_alloc - allocate a HPD DisplayPort bridge + * @parent: device instance providing this bridge + * @np: device node pointer corresponding to this bridge instance + * + * Creates a simple DRM bridge with the type set to + * DRM_MODE_CONNECTOR_DisplayPort, which terminates the bridge chain and is + * able to send the HPD events. + * + * Return: bridge auxiliary device pointer or an error pointer + */ +struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, struct device_node *np) +{ + struct auxiliary_device *adev; + int ret; + + adev = kzalloc(sizeof(*adev), GFP_KERNEL); + if (!adev) + return ERR_PTR(-ENOMEM); + + ret = ida_alloc(&drm_aux_hpd_bridge_ida, GFP_KERNEL); + if (ret < 0) { + kfree(adev); + return ERR_PTR(ret); + } + + adev->id = ret; + adev->name = "dp_hpd_bridge"; + adev->dev.parent = parent; + adev->dev.of_node = of_node_get(parent->of_node); + adev->dev.release = drm_aux_hpd_bridge_release; + adev->dev.platform_data = of_node_get(np); + + ret = auxiliary_device_init(adev); + if (ret) { + of_node_put(adev->dev.platform_data); + of_node_put(adev->dev.of_node); + ida_free(&drm_aux_hpd_bridge_ida, adev->id); + kfree(adev); + return ERR_PTR(ret); + } + + ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_free_adev, adev); + if (ret) + return ERR_PTR(ret); + + return adev; +} +EXPORT_SYMBOL_GPL(devm_drm_dp_hpd_bridge_alloc); + +static void drm_aux_hpd_bridge_del_adev(void *_adev) +{ + auxiliary_device_delete(_adev); +} + +/** + * devm_drm_dp_hpd_bridge_add - register a HDP DisplayPort bridge + * @dev: struct device to tie registration lifetime to + * @adev: bridge auxiliary device to be registered + * + * Returns: zero on success or a negative errno + */ +int devm_drm_dp_hpd_bridge_add(struct device *dev, struct auxiliary_device *adev) +{ + int ret; + + ret = auxiliary_device_add(adev); + if (ret) + return ret; + + return devm_add_action_or_reset(dev, drm_aux_hpd_bridge_del_adev, adev); +} +EXPORT_SYMBOL_GPL(devm_drm_dp_hpd_bridge_add); + +/** + * drm_dp_hpd_bridge_register - allocate and register a HDP DisplayPort bridge + * @parent: device instance providing this bridge + * @np: device node pointer corresponding to this bridge instance + * + * Return: device instance that will handle created bridge or an error pointer + */ +struct device *drm_dp_hpd_bridge_register(struct device *parent, struct device_node *np) +{ + struct auxiliary_device *adev; + int ret; + + adev = devm_drm_dp_hpd_bridge_alloc(parent, np); + if (IS_ERR(adev)) + return ERR_CAST(adev); + + ret = devm_drm_dp_hpd_bridge_add(parent, adev); + if (ret) + return ERR_PTR(ret); + + return &adev->dev; +} +EXPORT_SYMBOL_GPL(drm_dp_hpd_bridge_register); + +/** + * drm_aux_hpd_bridge_notify - notify hot plug detection events + * @dev: device created for the HPD bridge + * @status: output connection status + * + * A wrapper around drm_bridge_hpd_notify() that is used to report hot plug + * detection events for bridges created via drm_dp_hpd_bridge_register(). + * + * This function shall be called in a context that can sleep. + */ +void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status) +{ + struct auxiliary_device *adev = to_auxiliary_dev(dev); + struct drm_aux_hpd_bridge_data *data = auxiliary_get_drvdata(adev); + + if (!data) + return; + + drm_bridge_hpd_notify(&data->bridge, status); +} +EXPORT_SYMBOL_GPL(drm_aux_hpd_bridge_notify); + +static int drm_aux_hpd_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL; +} + +static const struct drm_bridge_funcs drm_aux_hpd_bridge_funcs = { + .attach = drm_aux_hpd_bridge_attach, +}; + +static int drm_aux_hpd_bridge_probe(struct auxiliary_device *auxdev, + const struct auxiliary_device_id *id) +{ + struct drm_aux_hpd_bridge_data *data; + + data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->dev = &auxdev->dev; + data->bridge.funcs = &drm_aux_hpd_bridge_funcs; + data->bridge.of_node = dev_get_platdata(data->dev); + data->bridge.ops = DRM_BRIDGE_OP_HPD; + data->bridge.type = id->driver_data; + + auxiliary_set_drvdata(auxdev, data); + + return devm_drm_bridge_add(data->dev, &data->bridge); +} + +static const struct auxiliary_device_id drm_aux_hpd_bridge_table[] = { + { .name = KBUILD_MODNAME ".dp_hpd_bridge", .driver_data = DRM_MODE_CONNECTOR_DisplayPort, }, + {}, +}; +MODULE_DEVICE_TABLE(auxiliary, drm_aux_hpd_bridge_table); + +static struct auxiliary_driver drm_aux_hpd_bridge_drv = { + .name = "aux_hpd_bridge", + .id_table = drm_aux_hpd_bridge_table, + .probe = drm_aux_hpd_bridge_probe, +}; +module_auxiliary_driver(drm_aux_hpd_bridge_drv); + +MODULE_AUTHOR("Dmitry Baryshkov "); +MODULE_DESCRIPTION("DRM HPD bridge"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c index 6af565ac30..7d47052745 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c @@ -2596,11 +2596,10 @@ clk_disable: return ret; } -static int cdns_mhdp_remove(struct platform_device *pdev) +static void cdns_mhdp_remove(struct platform_device *pdev) { struct cdns_mhdp_device *mhdp = platform_get_drvdata(pdev); unsigned long timeout = msecs_to_jiffies(100); - bool stop_fw = false; int ret; drm_bridge_remove(&mhdp->bridge); @@ -2608,18 +2607,19 @@ static int cdns_mhdp_remove(struct platform_device *pdev) ret = wait_event_timeout(mhdp->fw_load_wq, mhdp->hw_state == MHDP_HW_READY, timeout); - if (ret == 0) - dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n", - __func__); - else - stop_fw = true; - spin_lock(&mhdp->start_lock); mhdp->hw_state = MHDP_HW_STOPPED; spin_unlock(&mhdp->start_lock); - if (stop_fw) + if (ret == 0) { + dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n", + __func__); + } else { ret = cdns_mhdp_set_firmware_active(mhdp, false); + if (ret) + dev_err(mhdp->dev, "Failed to stop firmware (%pe)\n", + ERR_PTR(ret)); + } phy_exit(mhdp->phy); @@ -2634,8 +2634,6 @@ static int cdns_mhdp_remove(struct platform_device *pdev) /* Ignoring mhdp->hdcp.check_work and mhdp->hdcp.prop_work here. */ clk_disable_unprepare(mhdp->clk); - - return ret; } static const struct of_device_id mhdp_ids[] = { @@ -2658,7 +2656,7 @@ static struct platform_driver mhdp_driver = { .of_match_table = mhdp_ids, }, .probe = cdns_mhdp_probe, - .remove = cdns_mhdp_remove, + .remove_new = cdns_mhdp_remove, }; module_platform_driver(mhdp_driver); diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c index e5839c89a3..97d4af3d13 100644 --- a/drivers/gpu/drm/bridge/lontium-lt8912b.c +++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c @@ -43,6 +43,8 @@ struct lt8912 { struct videomode mode; + struct regulator_bulk_data supplies[7]; + u8 data_lanes; bool is_power_on; }; @@ -257,6 +259,12 @@ static int lt8912_free_i2c(struct lt8912 *lt) static int lt8912_hard_power_on(struct lt8912 *lt) { + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(lt->supplies), lt->supplies); + if (ret) + return ret; + gpiod_set_value_cansleep(lt->gp_reset, 0); msleep(20); @@ -267,6 +275,9 @@ static void lt8912_hard_power_off(struct lt8912 *lt) { gpiod_set_value_cansleep(lt->gp_reset, 1); msleep(20); + + regulator_bulk_disable(ARRAY_SIZE(lt->supplies), lt->supplies); + lt->is_power_on = false; } @@ -632,6 +643,48 @@ static const struct drm_bridge_funcs lt8912_bridge_funcs = { .get_edid = lt8912_bridge_get_edid, }; +static int lt8912_bridge_resume(struct device *dev) +{ + struct lt8912 *lt = dev_get_drvdata(dev); + int ret; + + ret = lt8912_hard_power_on(lt); + if (ret) + return ret; + + ret = lt8912_soft_power_on(lt); + if (ret) + return ret; + + return lt8912_video_on(lt); +} + +static int lt8912_bridge_suspend(struct device *dev) +{ + struct lt8912 *lt = dev_get_drvdata(dev); + + lt8912_hard_power_off(lt); + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(lt8912_bridge_pm_ops, lt8912_bridge_suspend, lt8912_bridge_resume); + +static int lt8912_get_regulators(struct lt8912 *lt) +{ + unsigned int i; + const char * const supply_names[] = { + "vdd", "vccmipirx", "vccsysclk", "vcclvdstx", + "vcchdmitx", "vcclvdspll", "vcchdmipll" + }; + + for (i = 0; i < ARRAY_SIZE(lt->supplies); i++) + lt->supplies[i].supply = supply_names[i]; + + return devm_regulator_bulk_get(lt->dev, ARRAY_SIZE(lt->supplies), + lt->supplies); +} + static int lt8912_parse_dt(struct lt8912 *lt) { struct gpio_desc *gp_reset; @@ -683,6 +736,10 @@ static int lt8912_parse_dt(struct lt8912 *lt) goto err_free_host_node; } + ret = lt8912_get_regulators(lt); + if (ret) + goto err_free_host_node; + of_node_put(port_node); return 0; @@ -768,6 +825,7 @@ static struct i2c_driver lt8912_i2c_driver = { .driver = { .name = "lt8912", .of_match_table = lt8912_dt_match, + .pm = pm_sleep_ptr(<8912_bridge_pm_ops), }, .probe = lt8912_probe, .remove = lt8912_remove, diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 52d91a0df8..aca5bb0866 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -515,7 +515,6 @@ static struct i2c_adapter *dw_hdmi_i2c_adapter(struct dw_hdmi *hdmi) init_completion(&i2c->cmp); adap = &i2c->adap; - adap->class = I2C_CLASS_DDC; adap->owner = THIS_MODULE; adap->dev.parent = hdmi->dev; adap->algo = &dw_hdmi_algorithm; diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index b5464199b6..62cc3893dc 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -1415,11 +1415,9 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, int ret; if (!pdata->pwm_enabled) { - ret = pm_runtime_get_sync(pdata->dev); - if (ret < 0) { - pm_runtime_put_sync(pdata->dev); + ret = pm_runtime_resume_and_get(chip->dev); + if (ret < 0) return ret; - } } if (state->enabled) { @@ -1433,7 +1431,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, SN_GPIO_MUX_MASK << (2 * SN_PWM_GPIO_IDX), SN_GPIO_MUX_SPECIAL << (2 * SN_PWM_GPIO_IDX)); if (ret) { - dev_err(pdata->dev, "failed to mux in PWM function\n"); + dev_err(chip->dev, "failed to mux in PWM function\n"); goto out; } } @@ -1509,7 +1507,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, ret = regmap_write(pdata->regmap, SN_PWM_PRE_DIV_REG, pre_div); if (ret) { - dev_err(pdata->dev, "failed to update PWM_PRE_DIV\n"); + dev_err(chip->dev, "failed to update PWM_PRE_DIV\n"); goto out; } @@ -1521,7 +1519,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, FIELD_PREP(SN_PWM_INV_MASK, state->polarity == PWM_POLARITY_INVERSED); ret = regmap_write(pdata->regmap, SN_PWM_EN_INV_REG, pwm_en_inv); if (ret) { - dev_err(pdata->dev, "failed to update PWM_EN/PWM_INV\n"); + dev_err(chip->dev, "failed to update PWM_EN/PWM_INV\n"); goto out; } @@ -1529,7 +1527,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, out: if (!pdata->pwm_enabled) - pm_runtime_put_sync(pdata->dev); + pm_runtime_put_sync(chip->dev); return ret; } @@ -1589,12 +1587,14 @@ static int ti_sn_pwm_probe(struct auxiliary_device *adev, { struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent); - pdata->pchip.dev = pdata->dev; + pdata->pchip.dev = &adev->dev; pdata->pchip.ops = &ti_sn_pwm_ops; pdata->pchip.npwm = 1; pdata->pchip.of_xlate = of_pwm_single_xlate; pdata->pchip.of_pwm_n_cells = 1; + devm_pm_runtime_enable(&adev->dev); + return pwmchip_add(&pdata->pchip); } @@ -1605,7 +1605,7 @@ static void ti_sn_pwm_remove(struct auxiliary_device *adev) pwmchip_remove(&pdata->pchip); if (pdata->pwm_enabled) - pm_runtime_put_sync(pdata->dev); + pm_runtime_put_sync(&adev->dev); } static const struct auxiliary_device_id ti_sn_pwm_id_table[] = { diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c index b588fea125..f9fb35683a 100644 --- a/drivers/gpu/drm/bridge/ti-tpd12s015.c +++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c @@ -179,13 +179,11 @@ static int tpd12s015_probe(struct platform_device *pdev) return 0; } -static int tpd12s015_remove(struct platform_device *pdev) +static void tpd12s015_remove(struct platform_device *pdev) { struct tpd12s015_device *tpd = platform_get_drvdata(pdev); drm_bridge_remove(&tpd->bridge); - - return 0; } static const struct of_device_id tpd12s015_of_match[] = { @@ -197,7 +195,7 @@ MODULE_DEVICE_TABLE(of, tpd12s015_of_match); static struct platform_driver tpd12s015_driver = { .probe = tpd12s015_probe, - .remove = tpd12s015_remove, + .remove_new = tpd12s015_remove, .driver = { .name = "tpd12s015", .of_match_table = tpd12s015_of_match, diff --git a/drivers/gpu/drm/ci/arm64.config b/drivers/gpu/drm/ci/arm64.config index b4f6534178..8dbce9919a 100644 --- a/drivers/gpu/drm/ci/arm64.config +++ b/drivers/gpu/drm/ci/arm64.config @@ -186,6 +186,7 @@ CONFIG_HW_RANDOM_MTK=y CONFIG_MTK_DEVAPC=y CONFIG_PWM_MTK_DISP=y CONFIG_MTK_CMDQ=y +CONFIG_REGULATOR_DA9211=y # For nouveau. Note that DRM must be a module so that it's loaded after NFS is up to provide the firmware. CONFIG_ARCH_TEGRA=y diff --git a/drivers/gpu/drm/ci/build.sh b/drivers/gpu/drm/ci/build.sh index e5c5dcedd1..f73f3471e9 100644 --- a/drivers/gpu/drm/ci/build.sh +++ b/drivers/gpu/drm/ci/build.sh @@ -19,7 +19,7 @@ if [[ "$KERNEL_ARCH" = "arm64" ]]; then DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dtb" DEVICE_TREES+=" arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dtb" DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dtb" - DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8016-sbc.dtb" + DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8016-sbc-usb-host.dtb" DEVICE_TREES+=" arch/arm64/boot/dts/qcom/apq8096-db820c.dtb" DEVICE_TREES+=" arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3.dtb" DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtb" @@ -58,6 +58,9 @@ git config --global user.email "fdo@example.com" git config --global user.name "freedesktop.org CI" git config --global pull.rebase true +# cleanup git state on the worker +rm -rf .git/rebase-merge + # Try to merge fixes from target repo if [ "$(git ls-remote --exit-code --heads ${UPSTREAM_REPO} ${TARGET_BRANCH}-external-fixes)" ]; then git pull ${UPSTREAM_REPO} ${TARGET_BRANCH}-external-fixes @@ -75,19 +78,19 @@ else fi fi -for opt in $ENABLE_KCONFIGS; do - echo CONFIG_$opt=y >> drivers/gpu/drm/ci/${KERNEL_ARCH}.config -done -for opt in $DISABLE_KCONFIGS; do - echo CONFIG_$opt=n >> drivers/gpu/drm/ci/${KERNEL_ARCH}.config -done - if [[ -n "${MERGE_FRAGMENT}" ]]; then ./scripts/kconfig/merge_config.sh ${DEFCONFIG} drivers/gpu/drm/ci/${MERGE_FRAGMENT} else make `basename ${DEFCONFIG}` fi +for opt in $ENABLE_KCONFIGS; do + ./scripts/config --enable CONFIG_$opt +done +for opt in $DISABLE_KCONFIGS; do + ./scripts/config --disable CONFIG_$opt +done + make ${KERNEL_IMAGE_NAME} mkdir -p /lava-files/ diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml index aeb9bab1b0..084e3ff8e3 100644 --- a/drivers/gpu/drm/ci/gitlab-ci.yml +++ b/drivers/gpu/drm/ci/gitlab-ci.yml @@ -1,11 +1,11 @@ variables: DRM_CI_PROJECT_PATH: &drm-ci-project-path mesa/mesa - DRM_CI_COMMIT_SHA: &drm-ci-commit-sha edfbf74df1d4d6ce54ffe24566108be0e1a98c3d + DRM_CI_COMMIT_SHA: &drm-ci-commit-sha 9d162de9a05155e1c4041857a5848842749164cf UPSTREAM_REPO: git://anongit.freedesktop.org/drm/drm TARGET_BRANCH: drm-next - IGT_VERSION: d1db7333d9c5fbbb05e50b0804123950d9dc1c46 + IGT_VERSION: d2af13d9f5be5ce23d996e4afd3e45990f5ab977 DEQP_RUNNER_GIT_URL: https://gitlab.freedesktop.org/anholt/deqp-runner.git DEQP_RUNNER_GIT_TAG: v0.15.0 @@ -25,7 +25,9 @@ variables: # per-job artifact storage on MinIO JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID} # default kernel for rootfs before injecting the current kernel tree - KERNEL_IMAGE_BASE: https://${S3_HOST}/mesa-lava/gfx-ci/linux/v6.4.12-for-mesa-ci-f6b4ad45f48d + KERNEL_REPO: "gfx-ci/linux" + KERNEL_TAG: "v6.6.4-for-mesa-ci-e4f4c500f7fb" + KERNEL_IMAGE_BASE: https://${S3_HOST}/mesa-lava/${KERNEL_REPO}/${KERNEL_TAG} LAVA_TAGS: subset-1-gfx LAVA_JOB_PRIORITY: 30 @@ -133,6 +135,11 @@ stages: - if: &is-pre-merge-for-marge '$GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"' when: on_success +.never-post-merge-rules: + rules: + - if: *is-post-merge + when: never + # Rule to filter for only scheduled pipelines. .scheduled_pipeline-rules: rules: @@ -150,6 +157,7 @@ stages: .build-rules: rules: - !reference [.no_scheduled_pipelines-rules, rules] + - !reference [.never-post-merge-rules, rules] # Run automatically once all dependency jobs have passed - when: on_success @@ -157,6 +165,7 @@ stages: .container+build-rules: rules: - !reference [.no_scheduled_pipelines-rules, rules] + - !reference [.never-post-merge-rules, rules] - when: manual .ci-deqp-artifacts: @@ -175,6 +184,7 @@ stages: .container-rules: rules: - !reference [.no_scheduled_pipelines-rules, rules] + - !reference [.never-post-merge-rules, rules] # Run pipeline by default in the main project if any CI pipeline # configuration files were changed, to ensure docker images are up to date - if: *is-post-merge diff --git a/drivers/gpu/drm/ci/igt_runner.sh b/drivers/gpu/drm/ci/igt_runner.sh index 2f815ee3a8..f1a08b9b14 100755 --- a/drivers/gpu/drm/ci/igt_runner.sh +++ b/drivers/gpu/drm/ci/igt_runner.sh @@ -15,15 +15,21 @@ cat /sys/kernel/debug/device_component/* ' # Dump drm state to confirm that kernel was able to find a connected display: -# TODO this path might not exist for all drivers.. maybe run modetest instead? set +e cat /sys/kernel/debug/dri/*/state set -e case "$DRIVER_NAME" in - rockchip|mediatek|meson) + rockchip|meson) export IGT_FORCE_DRIVER="panfrost" ;; + mediatek) + if [ "$GPU_VERSION" = "mt8173" ]; then + export IGT_FORCE_DRIVER=${DRIVER_NAME} + elif [ "$GPU_VERSION" = "mt8183" ]; then + export IGT_FORCE_DRIVER="panfrost" + fi + ;; amdgpu) # Cannot use HWCI_KERNEL_MODULES as at that point we don't have the module in /lib mv /install/modules/lib/modules/* /lib/modules/. diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml index 2aa4b19166..9faf76e55a 100644 --- a/drivers/gpu/drm/ci/test.yml +++ b/drivers/gpu/drm/ci/test.yml @@ -102,7 +102,7 @@ msm:apq8016: stage: msm variables: DRIVER_NAME: msm - BM_DTB: https://${PIPELINE_ARTIFACTS_BASE}/arm64/apq8016-sbc.dtb + BM_DTB: https://${PIPELINE_ARTIFACTS_BASE}/arm64/apq8016-sbc-usb-host.dtb GPU_VERSION: apq8016 # disabling unused clocks congests with the MDSS runtime PM trying to # disable those clocks and causes boot to fail. @@ -111,9 +111,6 @@ msm:apq8016: RUNNER_TAG: google-freedreno-db410c script: - ./install/bare-metal/fastboot.sh - rules: - # TODO: current issue: it is not fiding the NFS root. Fix and remove this rule. - - when: never msm:apq8096: extends: @@ -283,9 +280,6 @@ mediatek:mt8173: DEVICE_TYPE: mt8173-elm-hana GPU_VERSION: mt8173 RUNNER_TAG: mesa-ci-x86-64-lava-mt8173-elm-hana - rules: - # TODO: current issue: device is hanging. Fix and remove this rule. - - when: never mediatek:mt8183: extends: @@ -333,16 +327,16 @@ virtio_gpu:none: GPU_VERSION: none extends: - .test-gl + - .test-rules tags: - kvm script: - ln -sf $CI_PROJECT_DIR/install /install - mv install/bzImage /lava-files/bzImage + - mkdir -p $CI_PROJECT_DIR/results + - ln -sf $CI_PROJECT_DIR/results /results - install/crosvm-runner.sh install/igt_runner.sh needs: - debian/x86_64_test-gl - testing:x86_64 - igt:x86_64 - rules: - # TODO: current issue: malloc(): corrupted top size. Fix and remove this rule. - - when: never \ No newline at end of file diff --git a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt index 671916067d..ef0cb7c369 100644 --- a/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt +++ b/drivers/gpu/drm/ci/xfails/mediatek-mt8173-fails.txt @@ -1,5 +1,4 @@ kms_3d,Fail -kms_addfb_basic@addfb25-bad-modifier,Fail kms_bw@linear-tiling-1-displays-1920x1080p,Fail kms_bw@linear-tiling-1-displays-2560x1440p,Fail kms_bw@linear-tiling-1-displays-3840x2160p,Fail @@ -9,13 +8,19 @@ kms_bw@linear-tiling-2-displays-3840x2160p,Fail kms_bw@linear-tiling-3-displays-1920x1080p,Fail kms_bw@linear-tiling-3-displays-2560x1440p,Fail kms_bw@linear-tiling-3-displays-3840x2160p,Fail +kms_color@invalid-gamma-lut-sizes,Fail kms_color@pipe-A-invalid-gamma-lut-sizes,Fail kms_color@pipe-B-invalid-gamma-lut-sizes,Fail -kms_force_connector_basic@force-connector-state,Fail +kms_cursor_legacy@cursor-vs-flip-atomic,Fail +kms_cursor_legacy@cursor-vs-flip-legacy,Fail +kms_flip@flip-vs-modeset-vs-hang,Fail +kms_flip@flip-vs-panning-vs-hang,Fail +kms_flip@flip-vs-suspend,Fail +kms_flip@flip-vs-suspend-interruptible,Fail kms_force_connector_basic@force-edid,Fail kms_force_connector_basic@force-load-detect,Fail kms_force_connector_basic@prune-stale-modes,Fail -kms_invalid_mode@int-max-clock,Fail +kms_hdmi_inject@inject-4k,Fail kms_plane_scaling@planes-upscale-20x20,Fail kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-25,Fail kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-5,Fail @@ -27,3 +32,5 @@ kms_properties@get_properties-sanity-atomic,Fail kms_properties@plane-properties-atomic,Fail kms_properties@plane-properties-legacy,Fail kms_rmfb@close-fd,Fail +kms_selftest@drm_format,Timeout +kms_selftest@drm_format_helper,Timeout diff --git a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt index 9981682fea..d39d254c93 100644 --- a/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt +++ b/drivers/gpu/drm/ci/xfails/msm-apq8016-fails.txt @@ -6,10 +6,15 @@ kms_cursor_legacy@all-pipes-single-bo,Fail kms_cursor_legacy@all-pipes-single-move,Fail kms_cursor_legacy@all-pipes-torture-bo,Fail kms_cursor_legacy@all-pipes-torture-move,Fail +kms_cursor_legacy@forked-bo,Fail +kms_cursor_legacy@forked-move,Fail kms_cursor_legacy@pipe-A-forked-bo,Fail kms_cursor_legacy@pipe-A-forked-move,Fail kms_cursor_legacy@pipe-A-single-bo,Fail kms_cursor_legacy@pipe-A-single-move,Fail kms_cursor_legacy@pipe-A-torture-bo,Fail kms_cursor_legacy@pipe-A-torture-move,Fail +kms_force_connector_basic@force-edid,Fail kms_hdmi_inject@inject-4k,Fail +kms_selftest@drm_format,Timeout +kms_selftest@drm_format_helper,Timeout diff --git a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt index 9586b2339f..007f21e56d 100644 --- a/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt +++ b/drivers/gpu/drm/ci/xfails/virtio_gpu-none-fails.txt @@ -10,6 +10,49 @@ kms_bw@linear-tiling-1-displays-3840x2160p,Fail kms_bw@linear-tiling-2-displays-1920x1080p,Fail kms_bw@linear-tiling-2-displays-2560x1440p,Fail kms_bw@linear-tiling-2-displays-3840x2160p,Fail +kms_bw@linear-tiling-3-displays-1920x1080p,Fail +kms_bw@linear-tiling-3-displays-2560x1440p,Fail +kms_bw@linear-tiling-3-displays-3840x2160p,Fail +kms_bw@linear-tiling-4-displays-1920x1080p,Fail +kms_bw@linear-tiling-4-displays-2560x1440p,Fail +kms_bw@linear-tiling-4-displays-3840x2160p,Fail +kms_bw@linear-tiling-5-displays-1920x1080p,Fail +kms_bw@linear-tiling-5-displays-2560x1440p,Fail +kms_bw@linear-tiling-5-displays-3840x2160p,Fail +kms_bw@linear-tiling-6-displays-1920x1080p,Fail +kms_bw@linear-tiling-6-displays-2560x1440p,Fail +kms_bw@linear-tiling-6-displays-3840x2160p,Fail +kms_bw@linear-tiling-7-displays-1920x1080p,Fail +kms_bw@linear-tiling-7-displays-2560x1440p,Fail +kms_bw@linear-tiling-7-displays-3840x2160p,Fail +kms_bw@linear-tiling-8-displays-1920x1080p,Fail +kms_bw@linear-tiling-8-displays-2560x1440p,Fail +kms_bw@linear-tiling-8-displays-3840x2160p,Fail +kms_flip@absolute-wf_vblank,Fail +kms_flip@absolute-wf_vblank-interruptible,Fail +kms_flip@basic-flip-vs-wf_vblank,Fail +kms_flip@blocking-absolute-wf_vblank,Fail +kms_flip@blocking-absolute-wf_vblank-interruptible,Fail +kms_flip@blocking-wf_vblank,Fail +kms_flip@busy-flip,Fail +kms_flip@dpms-vs-vblank-race,Fail +kms_flip@dpms-vs-vblank-race-interruptible,Fail +kms_flip@flip-vs-absolute-wf_vblank,Fail +kms_flip@flip-vs-absolute-wf_vblank-interruptible,Fail +kms_flip@flip-vs-blocking-wf-vblank,Fail +kms_flip@flip-vs-expired-vblank,Fail +kms_flip@flip-vs-expired-vblank-interruptible,Fail +kms_flip@flip-vs-modeset-vs-hang,Fail +kms_flip@flip-vs-panning-vs-hang,Fail +kms_flip@flip-vs-wf_vblank-interruptible,Fail +kms_flip@modeset-vs-vblank-race,Fail +kms_flip@modeset-vs-vblank-race-interruptible,Fail +kms_flip@plain-flip-fb-recreate,Fail +kms_flip@plain-flip-fb-recreate-interruptible,Fail +kms_flip@plain-flip-ts-check,Fail +kms_flip@plain-flip-ts-check-interruptible,Fail +kms_flip@wf_vblank-ts-check,Fail +kms_flip@wf_vblank-ts-check-interruptible,Fail kms_invalid_mode@int-max-clock,Fail kms_plane_scaling@downscale-with-modifier-factor-0-25,Fail kms_plane_scaling@downscale-with-rotation-factor-0-25,Fail @@ -22,6 +65,9 @@ kms_plane_scaling@upscale-with-modifier-factor-0-25,Fail kms_plane_scaling@upscale-with-pixel-format-20x20,Fail kms_plane_scaling@upscale-with-pixel-format-factor-0-25,Fail kms_plane_scaling@upscale-with-rotation-20x20,Fail +kms_selftest@drm_format,Timeout +kms_selftest@drm_format_helper,Timeout +kms_setmode@basic,Fail kms_vblank@crtc-id,Fail kms_vblank@invalid,Fail kms_vblank@pipe-A-accuracy-idle,Fail diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c index f3680f4e69..26c188ce5f 100644 --- a/drivers/gpu/drm/display/drm_dp_helper.c +++ b/drivers/gpu/drm/display/drm_dp_helper.c @@ -2102,7 +2102,6 @@ int drm_dp_aux_register(struct drm_dp_aux *aux) if (!aux->ddc.algo) drm_dp_aux_init(aux); - aux->ddc.class = I2C_CLASS_DDC; aux->ddc.owner = THIS_MODULE; aux->ddc.dev.parent = aux->dev; @@ -2245,6 +2244,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = { { OUI(0x00, 0x00, 0x00), DEVICE_ID('C', 'H', '7', '5', '1', '1'), false, BIT(DP_DPCD_QUIRK_NO_SINK_COUNT) }, /* Synaptics DP1.4 MST hubs can support DSC without virtual DPCD */ { OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) }, + /* Synaptics DP1.4 MST hubs require DSC for some modes on which it applies HBLANK expansion. */ + { OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC) }, /* Apple MacBookPro 2017 15 inch eDP Retina panel reports too low DP_MAX_LINK_RATE */ { OUI(0x00, 0x10, 0xfa), DEVICE_ID(101, 68, 21, 101, 98, 97), false, BIT(DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS) }, }; @@ -2326,6 +2327,33 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, } EXPORT_SYMBOL(drm_dp_read_desc); +/** + * drm_dp_dsc_sink_bpp_incr() - Get bits per pixel increment + * @dsc_dpcd: DSC capabilities from DPCD + * + * Returns the bpp precision supported by the DP sink. + */ +u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) +{ + u8 bpp_increment_dpcd = dsc_dpcd[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT]; + + switch (bpp_increment_dpcd) { + case DP_DSC_BITS_PER_PIXEL_1_16: + return 16; + case DP_DSC_BITS_PER_PIXEL_1_8: + return 8; + case DP_DSC_BITS_PER_PIXEL_1_4: + return 4; + case DP_DSC_BITS_PER_PIXEL_1_2: + return 2; + case DP_DSC_BITS_PER_PIXEL_1_1: + return 1; + } + + return 0; +} +EXPORT_SYMBOL(drm_dp_dsc_sink_bpp_incr); + /** * drm_dp_dsc_sink_max_slice_count() - Get the max slice count * supported by the DSC sink. @@ -3898,3 +3926,142 @@ int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux) EXPORT_SYMBOL(drm_panel_dp_aux_backlight); #endif + +/* See DP Standard v2.1 2.6.4.4.1.1, 2.8.4.4, 2.8.7 */ +static int drm_dp_link_symbol_cycles(int lane_count, int pixels, int bpp_x16, + int symbol_size, bool is_mst) +{ + int cycles = DIV_ROUND_UP(pixels * bpp_x16, 16 * symbol_size * lane_count); + int align = is_mst ? 4 / lane_count : 1; + + return ALIGN(cycles, align); +} + +static int drm_dp_link_dsc_symbol_cycles(int lane_count, int pixels, int slice_count, + int bpp_x16, int symbol_size, bool is_mst) +{ + int slice_pixels = DIV_ROUND_UP(pixels, slice_count); + int slice_data_cycles = drm_dp_link_symbol_cycles(lane_count, slice_pixels, + bpp_x16, symbol_size, is_mst); + int slice_eoc_cycles = is_mst ? 4 / lane_count : 1; + + return slice_count * (slice_data_cycles + slice_eoc_cycles); +} + +/** + * drm_dp_bw_overhead - Calculate the BW overhead of a DP link stream + * @lane_count: DP link lane count + * @hactive: pixel count of the active period in one scanline of the stream + * @dsc_slice_count: DSC slice count if @flags/DRM_DP_LINK_BW_OVERHEAD_DSC is set + * @bpp_x16: bits per pixel in .4 binary fixed point + * @flags: DRM_DP_OVERHEAD_x flags + * + * Calculate the BW allocation overhead of a DP link stream, depending + * on the link's + * - @lane_count + * - SST/MST mode (@flags / %DRM_DP_OVERHEAD_MST) + * - symbol size (@flags / %DRM_DP_OVERHEAD_UHBR) + * - FEC mode (@flags / %DRM_DP_OVERHEAD_FEC) + * - SSC/REF_CLK mode (@flags / %DRM_DP_OVERHEAD_SSC_REF_CLK) + * as well as the stream's + * - @hactive timing + * - @bpp_x16 color depth + * - compression mode (@flags / %DRM_DP_OVERHEAD_DSC). + * Note that this overhead doesn't account for the 8b/10b, 128b/132b + * channel coding efficiency, for that see + * @drm_dp_link_bw_channel_coding_efficiency(). + * + * Returns the overhead as 100% + overhead% in 1ppm units. + */ +int drm_dp_bw_overhead(int lane_count, int hactive, + int dsc_slice_count, + int bpp_x16, unsigned long flags) +{ + int symbol_size = flags & DRM_DP_BW_OVERHEAD_UHBR ? 32 : 8; + bool is_mst = flags & DRM_DP_BW_OVERHEAD_MST; + u32 overhead = 1000000; + int symbol_cycles; + + if (lane_count == 0 || hactive == 0 || bpp_x16 == 0) { + DRM_DEBUG_KMS("Invalid BW overhead params: lane_count %d, hactive %d, bpp_x16 %d.%04d\n", + lane_count, hactive, + bpp_x16 >> 4, (bpp_x16 & 0xf) * 625); + return 0; + } + + /* + * DP Standard v2.1 2.6.4.1 + * SSC downspread and ref clock variation margin: + * 5300ppm + 300ppm ~ 0.6% + */ + if (flags & DRM_DP_BW_OVERHEAD_SSC_REF_CLK) + overhead += 6000; + + /* + * DP Standard v2.1 2.6.4.1.1, 3.5.1.5.4: + * FEC symbol insertions for 8b/10b channel coding: + * After each 250 data symbols on 2-4 lanes: + * 250 LL + 5 FEC_PARITY_PH + 1 CD_ADJ (256 byte FEC block) + * After each 2 x 250 data symbols on 1 lane: + * 2 * 250 LL + 11 FEC_PARITY_PH + 1 CD_ADJ (512 byte FEC block) + * After 256 (2-4 lanes) or 128 (1 lane) FEC blocks: + * 256 * 256 bytes + 1 FEC_PM + * or + * 128 * 512 bytes + 1 FEC_PM + * (256 * 6 + 1) / (256 * 250) = 2.4015625 % + */ + if (flags & DRM_DP_BW_OVERHEAD_FEC) + overhead += 24016; + + /* + * DP Standard v2.1 2.7.9, 5.9.7 + * The FEC overhead for UHBR is accounted for in its 96.71% channel + * coding efficiency. + */ + WARN_ON((flags & DRM_DP_BW_OVERHEAD_UHBR) && + (flags & DRM_DP_BW_OVERHEAD_FEC)); + + if (flags & DRM_DP_BW_OVERHEAD_DSC) + symbol_cycles = drm_dp_link_dsc_symbol_cycles(lane_count, hactive, + dsc_slice_count, + bpp_x16, symbol_size, + is_mst); + else + symbol_cycles = drm_dp_link_symbol_cycles(lane_count, hactive, + bpp_x16, symbol_size, + is_mst); + + return DIV_ROUND_UP_ULL(mul_u32_u32(symbol_cycles * symbol_size * lane_count, + overhead * 16), + hactive * bpp_x16); +} +EXPORT_SYMBOL(drm_dp_bw_overhead); + +/** + * drm_dp_bw_channel_coding_efficiency - Get a DP link's channel coding efficiency + * @is_uhbr: Whether the link has a 128b/132b channel coding + * + * Return the channel coding efficiency of the given DP link type, which is + * either 8b/10b or 128b/132b (aka UHBR). The corresponding overhead includes + * the 8b -> 10b, 128b -> 132b pixel data to link symbol conversion overhead + * and for 128b/132b any link or PHY level control symbol insertion overhead + * (LLCP, FEC, PHY sync, see DP Standard v2.1 3.5.2.18). For 8b/10b the + * corresponding FEC overhead is BW allocation specific, included in the value + * returned by drm_dp_bw_overhead(). + * + * Returns the efficiency in the 100%/coding-overhead% ratio in + * 1ppm units. + */ +int drm_dp_bw_channel_coding_efficiency(bool is_uhbr) +{ + if (is_uhbr) + return 967100; + else + /* + * Note that on 8b/10b MST the efficiency is only + * 78.75% due to the 1 out of 64 MTPH packet overhead, + * not accounted for here. + */ + return 800000; +} +EXPORT_SYMBOL(drm_dp_bw_channel_coding_efficiency); diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index 772b00ebd5..f7c6b60629 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include @@ -3578,16 +3579,26 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, * value is in units of PBNs/(timeslots/1 MTP). This value can be used to * convert the number of PBNs required for a given stream to the number of * timeslots this stream requires in each MTP. + * + * Returns the BW / timeslot value in 20.12 fixed point format. */ -int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, - int link_rate, int link_lane_count) +fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, + int link_rate, int link_lane_count) { + int ch_coding_efficiency = + drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(link_rate)); + fixed20_12 ret; + if (link_rate == 0 || link_lane_count == 0) drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n", link_rate, link_lane_count); - /* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */ - return link_rate * link_lane_count / 54000; + /* See DP v2.0 2.6.4.2, 2.7.6.3 VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */ + ret.full = DIV_ROUND_DOWN_ULL(mul_u32_u32(link_rate * link_lane_count, + ch_coding_efficiency), + (1000000ULL * 8 * 5400) >> 12); + + return ret; } EXPORT_SYMBOL(drm_dp_get_vc_payload_bw); @@ -4335,7 +4346,7 @@ int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state, } } - req_slots = DIV_ROUND_UP(pbn, topology_state->pbn_div); + req_slots = DIV_ROUND_UP(dfixed_const(pbn), topology_state->pbn_div.full); drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n", port->connector->base.id, port->connector->name, @@ -4726,17 +4737,28 @@ EXPORT_SYMBOL(drm_dp_check_act_status); int drm_dp_calc_pbn_mode(int clock, int bpp) { /* - * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on * common multiplier to render an integer PBN for all link rate/lane * counts combinations * calculate - * peak_kbps *= (1006/1000) - * peak_kbps *= (64/54) - * peak_kbps *= 8 convert to bytes + * peak_kbps = clock * bpp / 16 + * peak_kbps *= SSC overhead / 1000000 + * peak_kbps /= 8 convert to Kbytes + * peak_kBps *= (64/54) / 1000 convert to PBN + */ + /* + * TODO: Use the actual link and mode parameters to calculate + * the overhead. For now it's assumed that these are + * 4 link lanes, 4096 hactive pixels, which don't add any + * significant data padding overhead and that there is no DSC + * or FEC overhead. */ - return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006 >> 4), - 1000 * 8 * 54 * 1000); + int overhead = drm_dp_bw_overhead(4, 4096, 0, bpp, + DRM_DP_BW_OVERHEAD_MST | + DRM_DP_BW_OVERHEAD_SSC_REF_CLK); + + return DIV64_U64_ROUND_UP(mul_u32_u32(clock * bpp, 64 * overhead >> 4), + 1000000ULL * 8 * 54 * 1000); } EXPORT_SYMBOL(drm_dp_calc_pbn_mode); @@ -4861,7 +4883,8 @@ void drm_dp_mst_dump_topology(struct seq_file *m, state = to_drm_dp_mst_topology_state(mgr->base.state); seq_printf(m, "\n*** Atomic state info ***\n"); seq_printf(m, "payload_mask: %x, max_payloads: %d, start_slot: %u, pbn_div: %d\n", - state->payload_mask, mgr->max_payloads, state->start_slot, state->pbn_div); + state->payload_mask, mgr->max_payloads, state->start_slot, + dfixed_trunc(state->pbn_div)); seq_printf(m, "\n| idx | port | vcpi | slots | pbn | dsc | status | sink name |\n"); for (i = 0; i < mgr->max_payloads; i++) { @@ -5126,13 +5149,67 @@ static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port, return false; } +static bool +drm_dp_mst_port_downstream_of_parent_locked(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + struct drm_dp_mst_port *parent) +{ + if (!mgr->mst_primary) + return false; + + port = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary, + port); + if (!port) + return false; + + if (!parent) + return true; + + parent = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary, + parent); + if (!parent) + return false; + + if (!parent->mstb) + return false; + + return drm_dp_mst_port_downstream_of_branch(port, parent->mstb); +} + +/** + * drm_dp_mst_port_downstream_of_parent - check if a port is downstream of a parent port + * @mgr: MST topology manager + * @port: the port being looked up + * @parent: the parent port + * + * The function returns %true if @port is downstream of @parent. If @parent is + * %NULL - denoting the root port - the function returns %true if @port is in + * @mgr's topology. + */ +bool +drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + struct drm_dp_mst_port *parent) +{ + bool ret; + + mutex_lock(&mgr->lock); + ret = drm_dp_mst_port_downstream_of_parent_locked(mgr, port, parent); + mutex_unlock(&mgr->lock); + + return ret; +} +EXPORT_SYMBOL(drm_dp_mst_port_downstream_of_parent); + static int drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port, - struct drm_dp_mst_topology_state *state); + struct drm_dp_mst_topology_state *state, + struct drm_dp_mst_port **failing_port); static int drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb, - struct drm_dp_mst_topology_state *state) + struct drm_dp_mst_topology_state *state, + struct drm_dp_mst_port **failing_port) { struct drm_dp_mst_atomic_payload *payload; struct drm_dp_mst_port *port; @@ -5161,7 +5238,7 @@ drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb, drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb); list_for_each_entry(port, &mstb->ports, next) { - ret = drm_dp_mst_atomic_check_port_bw_limit(port, state); + ret = drm_dp_mst_atomic_check_port_bw_limit(port, state, failing_port); if (ret < 0) return ret; @@ -5173,7 +5250,8 @@ drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb, static int drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port, - struct drm_dp_mst_topology_state *state) + struct drm_dp_mst_topology_state *state, + struct drm_dp_mst_port **failing_port) { struct drm_dp_mst_atomic_payload *payload; int pbn_used = 0; @@ -5194,13 +5272,15 @@ drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port, drm_dbg_atomic(port->mgr->dev, "[MSTB:%p] [MST PORT:%p] no BW available for the port\n", port->parent, port); + *failing_port = port; return -EINVAL; } pbn_used = payload->pbn; } else { pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb, - state); + state, + failing_port); if (pbn_used <= 0) return pbn_used; } @@ -5209,6 +5289,7 @@ drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port, drm_dbg_atomic(port->mgr->dev, "[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n", port->parent, port, pbn_used, port->full_pbn); + *failing_port = port; return -ENOSPC; } @@ -5261,10 +5342,10 @@ drm_dp_mst_atomic_check_payload_alloc_limits(struct drm_dp_mst_topology_mgr *mgr } if (!payload_count) - mst_state->pbn_div = 0; + mst_state->pbn_div.full = dfixed_const(0); drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n", - mgr, mst_state, mst_state->pbn_div, avail_slots, + mgr, mst_state, dfixed_trunc(mst_state->pbn_div), avail_slots, mst_state->total_avail_slots - avail_slots); return 0; @@ -5386,20 +5467,84 @@ int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state, } EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc); +/** + * drm_dp_mst_atomic_check_mgr - Check the atomic state of an MST topology manager + * @state: The global atomic state + * @mgr: Manager to check + * @mst_state: The MST atomic state for @mgr + * @failing_port: Returns the port with a BW limitation + * + * Checks the given MST manager's topology state for an atomic update to ensure + * that it's valid. This includes checking whether there's enough bandwidth to + * support the new timeslot allocations in the atomic update. + * + * Any atomic drivers supporting DP MST must make sure to call this or + * the drm_dp_mst_atomic_check() function after checking the rest of their state + * in their &drm_mode_config_funcs.atomic_check() callback. + * + * See also: + * drm_dp_mst_atomic_check() + * drm_dp_atomic_find_time_slots() + * drm_dp_atomic_release_time_slots() + * + * Returns: + * - 0 if the new state is valid + * - %-ENOSPC, if the new state is invalid, because of BW limitation + * @failing_port is set to: + * + * - The non-root port where a BW limit check failed + * with all the ports downstream of @failing_port passing + * the BW limit check. + * The returned port pointer is valid until at least + * one payload downstream of it exists. + * - %NULL if the BW limit check failed at the root port + * with all the ports downstream of the root port passing + * the BW limit check. + * + * - %-EINVAL, if the new state is invalid, because the root port has + * too many payloads. + */ +int drm_dp_mst_atomic_check_mgr(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state, + struct drm_dp_mst_port **failing_port) +{ + int ret; + + *failing_port = NULL; + + if (!mgr->mst_state) + return 0; + + mutex_lock(&mgr->lock); + ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary, + mst_state, + failing_port); + mutex_unlock(&mgr->lock); + + if (ret < 0) + return ret; + + return drm_dp_mst_atomic_check_payload_alloc_limits(mgr, mst_state); +} +EXPORT_SYMBOL(drm_dp_mst_atomic_check_mgr); + /** * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an * atomic update is valid * @state: Pointer to the new &struct drm_dp_mst_topology_state * * Checks the given topology state for an atomic update to ensure that it's - * valid. This includes checking whether there's enough bandwidth to support - * the new timeslot allocations in the atomic update. + * valid, calling drm_dp_mst_atomic_check_mgr() for all MST manager in the + * atomic state. This includes checking whether there's enough bandwidth to + * support the new timeslot allocations in the atomic update. * * Any atomic drivers supporting DP MST must make sure to call this after * checking the rest of their state in their * &drm_mode_config_funcs.atomic_check() callback. * * See also: + * drm_dp_mst_atomic_check_mgr() * drm_dp_atomic_find_time_slots() * drm_dp_atomic_release_time_slots() * @@ -5414,21 +5559,11 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state) int i, ret = 0; for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { - if (!mgr->mst_state) - continue; + struct drm_dp_mst_port *tmp_port; - ret = drm_dp_mst_atomic_check_payload_alloc_limits(mgr, mst_state); + ret = drm_dp_mst_atomic_check_mgr(state, mgr, mst_state, &tmp_port); if (ret) break; - - mutex_lock(&mgr->lock); - ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary, - mst_state); - mutex_unlock(&mgr->lock); - if (ret < 0) - break; - else - ret = 0; } return ret; @@ -5793,7 +5928,6 @@ static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port) aux->ddc.algo_data = aux; aux->ddc.retries = 3; - aux->ddc.class = I2C_CLASS_DDC; aux->ddc.owner = THIS_MODULE; /* FIXME: set the kdev of the port's connector as parent */ aux->ddc.dev.parent = parent_dev; @@ -5884,6 +6018,7 @@ static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port) struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) { struct drm_dp_mst_port *immediate_upstream_port; + struct drm_dp_aux *immediate_upstream_aux; struct drm_dp_mst_port *fec_port; struct drm_dp_desc desc = {}; u8 endpoint_fec; @@ -5948,21 +6083,25 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) * - Port is on primary branch device * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG) */ - if (drm_dp_read_desc(port->mgr->aux, &desc, true)) + if (immediate_upstream_port) + immediate_upstream_aux = &immediate_upstream_port->aux; + else + immediate_upstream_aux = port->mgr->aux; + + if (drm_dp_read_desc(immediate_upstream_aux, &desc, true)) return NULL; - if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) && - port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 && - port->parent == port->mgr->mst_primary) { + if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) { u8 dpcd_ext[DP_RECEIVER_CAP_SIZE]; - if (drm_dp_read_dpcd_caps(port->mgr->aux, dpcd_ext) < 0) + if (drm_dp_read_dpcd_caps(immediate_upstream_aux, dpcd_ext) < 0) return NULL; - if ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) && + if (dpcd_ext[DP_DPCD_REV] >= DP_DPCD_REV_14 && + ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) && ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) - != DP_DWN_STRM_PORT_TYPE_ANALOG)) - return port->mgr->aux; + != DP_DWN_STRM_PORT_TYPE_ANALOG))) + return immediate_upstream_aux; } /* diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c deleted file mode 100644 index a4ad6fd13a..0000000000 --- a/drivers/gpu/drm/drm_agpsupport.c +++ /dev/null @@ -1,451 +0,0 @@ -/* - * \file drm_agpsupport.c - * DRM support for AGP/GART backend - * - * \author Rickard E. (Rik) Faith - * \author Gareth Hughes - */ - -/* - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include -#include - -#if IS_ENABLED(CONFIG_AGP) -#include -#endif - -#include -#include -#include -#include - -#include "drm_legacy.h" - -#if IS_ENABLED(CONFIG_AGP) - -/* - * Get AGP information. - * - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device has been initialized and acquired and fills in the - * drm_agp_info structure with the information in drm_agp_head::agp_info. - */ -int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info) -{ - struct agp_kern_info *kern; - - if (!dev->agp || !dev->agp->acquired) - return -EINVAL; - - kern = &dev->agp->agp_info; - info->agp_version_major = kern->version.major; - info->agp_version_minor = kern->version.minor; - info->mode = kern->mode; - info->aperture_base = kern->aper_base; - info->aperture_size = kern->aper_size * 1024 * 1024; - info->memory_allowed = kern->max_memory << PAGE_SHIFT; - info->memory_used = kern->current_memory << PAGE_SHIFT; - info->id_vendor = kern->device->vendor; - info->id_device = kern->device->device; - - return 0; -} -EXPORT_SYMBOL(drm_legacy_agp_info); - -int drm_legacy_agp_info_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_agp_info *info = data; - int err; - - err = drm_legacy_agp_info(dev, info); - if (err) - return err; - - return 0; -} - -/* - * Acquire the AGP device. - * - * \param dev DRM device that is to acquire AGP. - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device hasn't been acquired before and calls - * \c agp_backend_acquire. - */ -int drm_legacy_agp_acquire(struct drm_device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev->dev); - - if (!dev->agp) - return -ENODEV; - if (dev->agp->acquired) - return -EBUSY; - dev->agp->bridge = agp_backend_acquire(pdev); - if (!dev->agp->bridge) - return -ENODEV; - dev->agp->acquired = 1; - return 0; -} -EXPORT_SYMBOL(drm_legacy_agp_acquire); - -/* - * Acquire the AGP device (ioctl). - * - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device hasn't been acquired before and calls - * \c agp_backend_acquire. - */ -int drm_legacy_agp_acquire_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - return drm_legacy_agp_acquire((struct drm_device *)file_priv->minor->dev); -} - -/* - * Release the AGP device. - * - * \param dev DRM device that is to release AGP. - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device has been acquired and calls \c agp_backend_release. - */ -int drm_legacy_agp_release(struct drm_device *dev) -{ - if (!dev->agp || !dev->agp->acquired) - return -EINVAL; - agp_backend_release(dev->agp->bridge); - dev->agp->acquired = 0; - return 0; -} -EXPORT_SYMBOL(drm_legacy_agp_release); - -int drm_legacy_agp_release_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - return drm_legacy_agp_release(dev); -} - -/* - * Enable the AGP bus. - * - * \param dev DRM device that has previously acquired AGP. - * \param mode Requested AGP mode. - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device has been acquired but not enabled, and calls - * \c agp_enable. - */ -int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode) -{ - if (!dev->agp || !dev->agp->acquired) - return -EINVAL; - - dev->agp->mode = mode.mode; - agp_enable(dev->agp->bridge, mode.mode); - dev->agp->enabled = 1; - return 0; -} -EXPORT_SYMBOL(drm_legacy_agp_enable); - -int drm_legacy_agp_enable_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_agp_mode *mode = data; - - return drm_legacy_agp_enable(dev, *mode); -} - -/* - * Allocate AGP memory. - * - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device is present and has been acquired, allocates the - * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it. - */ -int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) -{ - struct drm_agp_mem *entry; - struct agp_memory *memory; - unsigned long pages; - u32 type; - - if (!dev->agp || !dev->agp->acquired) - return -EINVAL; - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; - - pages = DIV_ROUND_UP(request->size, PAGE_SIZE); - type = (u32) request->type; - memory = agp_allocate_memory(dev->agp->bridge, pages, type); - if (!memory) { - kfree(entry); - return -ENOMEM; - } - - entry->handle = (unsigned long)memory->key + 1; - entry->memory = memory; - entry->bound = 0; - entry->pages = pages; - list_add(&entry->head, &dev->agp->memory); - - request->handle = entry->handle; - request->physical = memory->physical; - - return 0; -} -EXPORT_SYMBOL(drm_legacy_agp_alloc); - - -int drm_legacy_agp_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_agp_buffer *request = data; - - return drm_legacy_agp_alloc(dev, request); -} - -/* - * Search for the AGP memory entry associated with a handle. - * - * \param dev DRM device structure. - * \param handle AGP memory handle. - * \return pointer to the drm_agp_mem structure associated with \p handle. - * - * Walks through drm_agp_head::memory until finding a matching handle. - */ -static struct drm_agp_mem *drm_legacy_agp_lookup_entry(struct drm_device *dev, - unsigned long handle) -{ - struct drm_agp_mem *entry; - - list_for_each_entry(entry, &dev->agp->memory, head) { - if (entry->handle == handle) - return entry; - } - return NULL; -} - -/* - * Unbind AGP memory from the GATT (ioctl). - * - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device is present and acquired, looks-up the AGP memory - * entry and passes it to the unbind_agp() function. - */ -int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request) -{ - struct drm_agp_mem *entry; - int ret; - - if (!dev->agp || !dev->agp->acquired) - return -EINVAL; - entry = drm_legacy_agp_lookup_entry(dev, request->handle); - if (!entry || !entry->bound) - return -EINVAL; - ret = agp_unbind_memory(entry->memory); - if (ret == 0) - entry->bound = 0; - return ret; -} -EXPORT_SYMBOL(drm_legacy_agp_unbind); - - -int drm_legacy_agp_unbind_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_agp_binding *request = data; - - return drm_legacy_agp_unbind(dev, request); -} - -/* - * Bind AGP memory into the GATT (ioctl) - * - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device is present and has been acquired and that no memory - * is currently bound into the GATT. Looks-up the AGP memory entry and passes - * it to bind_agp() function. - */ -int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request) -{ - struct drm_agp_mem *entry; - int retcode; - int page; - - if (!dev->agp || !dev->agp->acquired) - return -EINVAL; - entry = drm_legacy_agp_lookup_entry(dev, request->handle); - if (!entry || entry->bound) - return -EINVAL; - page = DIV_ROUND_UP(request->offset, PAGE_SIZE); - retcode = agp_bind_memory(entry->memory, page); - if (retcode) - return retcode; - entry->bound = dev->agp->base + (page << PAGE_SHIFT); - DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n", - dev->agp->base, entry->bound); - return 0; -} -EXPORT_SYMBOL(drm_legacy_agp_bind); - - -int drm_legacy_agp_bind_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_agp_binding *request = data; - - return drm_legacy_agp_bind(dev, request); -} - -/* - * Free AGP memory (ioctl). - * - * \return zero on success or a negative number on failure. - * - * Verifies the AGP device is present and has been acquired and looks up the - * AGP memory entry. If the memory is currently bound, unbind it via - * unbind_agp(). Frees it via free_agp() as well as the entry itself - * and unlinks from the doubly linked list it's inserted in. - */ -int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request) -{ - struct drm_agp_mem *entry; - - if (!dev->agp || !dev->agp->acquired) - return -EINVAL; - entry = drm_legacy_agp_lookup_entry(dev, request->handle); - if (!entry) - return -EINVAL; - if (entry->bound) - agp_unbind_memory(entry->memory); - - list_del(&entry->head); - - agp_free_memory(entry->memory); - kfree(entry); - return 0; -} -EXPORT_SYMBOL(drm_legacy_agp_free); - - -int drm_legacy_agp_free_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_agp_buffer *request = data; - - return drm_legacy_agp_free(dev, request); -} - -/* - * Initialize the AGP resources. - * - * \return pointer to a drm_agp_head structure. - * - * Gets the drm_agp_t structure which is made available by the agpgart module - * via the inter_module_* functions. Creates and initializes a drm_agp_head - * structure. - * - * Note that final cleanup of the kmalloced structure is directly done in - * drm_pci_agp_destroy. - */ -struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev->dev); - struct drm_agp_head *head = NULL; - - head = kzalloc(sizeof(*head), GFP_KERNEL); - if (!head) - return NULL; - head->bridge = agp_find_bridge(pdev); - if (!head->bridge) { - head->bridge = agp_backend_acquire(pdev); - if (!head->bridge) { - kfree(head); - return NULL; - } - agp_copy_info(head->bridge, &head->agp_info); - agp_backend_release(head->bridge); - } else { - agp_copy_info(head->bridge, &head->agp_info); - } - if (head->agp_info.chipset == NOT_SUPPORTED) { - kfree(head); - return NULL; - } - INIT_LIST_HEAD(&head->memory); - head->cant_use_aperture = head->agp_info.cant_use_aperture; - head->page_mask = head->agp_info.page_mask; - head->base = head->agp_info.aper_base; - return head; -} -/* Only exported for i810.ko */ -EXPORT_SYMBOL(drm_legacy_agp_init); - -/** - * drm_legacy_agp_clear - Clear AGP resource list - * @dev: DRM device - * - * Iterate over all AGP resources and remove them. But keep the AGP head - * intact so it can still be used. It is safe to call this if AGP is disabled or - * was already removed. - * - * Cleanup is only done for drivers who have DRIVER_LEGACY set. - */ -void drm_legacy_agp_clear(struct drm_device *dev) -{ - struct drm_agp_mem *entry, *tempe; - - if (!dev->agp) - return; - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { - if (entry->bound) - agp_unbind_memory(entry->memory); - agp_free_memory(entry->memory); - kfree(entry); - } - INIT_LIST_HEAD(&dev->agp->memory); - - if (dev->agp->acquired) - drm_legacy_agp_release(dev); - - dev->agp->acquired = 0; - dev->agp->enabled = 0; -} - -#endif diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index f1a503aafe..a91737adf8 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -733,6 +733,7 @@ static void drm_atomic_plane_print_state(struct drm_printer *p, drm_get_color_encoding_name(state->color_encoding)); drm_printf(p, "\tcolor-range=%s\n", drm_get_color_range_name(state->color_range)); + drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed); if (plane->funcs->atomic_print_state) plane->funcs->atomic_print_state(p, state); @@ -1773,6 +1774,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, struct drm_crtc *crtc; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; + struct drm_private_obj *obj; if (!drm_drv_uses_atomic_modeset(dev)) return; @@ -1801,6 +1803,14 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, if (take_locks) drm_modeset_unlock(&dev->mode_config.connection_mutex); drm_connector_list_iter_end(&conn_iter); + + list_for_each_entry(obj, &config->privobj_list, head) { + if (take_locks) + drm_modeset_lock(&obj->lock, NULL); + drm_atomic_private_obj_print_state(p, obj->state); + if (take_locks) + drm_modeset_unlock(&obj->lock); + } } /** diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 68ffcc0b00..39ef0a6add 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -795,9 +795,9 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, EXPORT_SYMBOL(drm_atomic_helper_check_modeset); /** - * drm_atomic_helper_check_wb_encoder_state() - Check writeback encoder state - * @encoder: encoder state to check - * @conn_state: connector state to check + * drm_atomic_helper_check_wb_connector_state() - Check writeback connector state + * @connector: corresponding connector + * @state: the driver state object * * Checks if the writeback connector state is valid, and returns an error if it * isn't. @@ -806,9 +806,11 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset); * Zero for success or -errno */ int -drm_atomic_helper_check_wb_encoder_state(struct drm_encoder *encoder, - struct drm_connector_state *conn_state) +drm_atomic_helper_check_wb_connector_state(struct drm_connector *connector, + struct drm_atomic_state *state) { + struct drm_connector_state *conn_state = + drm_atomic_get_new_connector_state(state, connector); struct drm_writeback_job *wb_job = conn_state->writeback_job; struct drm_property_blob *pixel_format_blob; struct drm_framebuffer *fb; @@ -827,11 +829,11 @@ drm_atomic_helper_check_wb_encoder_state(struct drm_encoder *encoder, if (fb->format->format == formats[i]) return 0; - drm_dbg_kms(encoder->dev, "Invalid pixel format %p4cc\n", &fb->format->format); + drm_dbg_kms(connector->dev, "Invalid pixel format %p4cc\n", &fb->format->format); return -EINVAL; } -EXPORT_SYMBOL(drm_atomic_helper_check_wb_encoder_state); +EXPORT_SYMBOL(drm_atomic_helper_check_wb_connector_state); /** * drm_atomic_helper_check_plane_state() - Check plane state for validity @@ -2382,10 +2384,10 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, EXPORT_SYMBOL(drm_atomic_helper_setup_commit); /** - * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits + * drm_atomic_helper_wait_for_dependencies - wait for required preceding commits * @old_state: atomic state object with old state structures * - * This function waits for all preceeding commits that touch the same CRTC as + * This function waits for all preceding commits that touch the same CRTC as * @old_state to both be committed to the hardware (as signalled by * drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event). diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c index 784e63d70a..519228eb10 100644 --- a/drivers/gpu/drm/drm_atomic_state_helper.c +++ b/drivers/gpu/drm/drm_atomic_state_helper.c @@ -275,6 +275,20 @@ void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state, plane_state->normalized_zpos = val; } } + + if (plane->hotspot_x_property) { + if (!drm_object_property_get_default_value(&plane->base, + plane->hotspot_x_property, + &val)) + plane_state->hotspot_x = val; + } + + if (plane->hotspot_y_property) { + if (!drm_object_property_get_default_value(&plane->base, + plane->hotspot_y_property, + &val)) + plane_state->hotspot_y = val; + } } EXPORT_SYMBOL(__drm_atomic_helper_plane_state_reset); @@ -338,6 +352,7 @@ void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane, state->fence = NULL; state->commit = NULL; state->fb_damage_clips = NULL; + state->color_mgmt_changed = false; } EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state); diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 98d3b10c08..29d4940188 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -362,48 +362,6 @@ static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state, return fence_ptr; } -static int -drm_atomic_replace_property_blob_from_id(struct drm_device *dev, - struct drm_property_blob **blob, - uint64_t blob_id, - ssize_t expected_size, - ssize_t expected_elem_size, - bool *replaced) -{ - struct drm_property_blob *new_blob = NULL; - - if (blob_id != 0) { - new_blob = drm_property_lookup_blob(dev, blob_id); - if (new_blob == NULL) { - drm_dbg_atomic(dev, - "cannot find blob ID %llu\n", blob_id); - return -EINVAL; - } - - if (expected_size > 0 && - new_blob->length != expected_size) { - drm_dbg_atomic(dev, - "[BLOB:%d] length %zu different from expected %zu\n", - new_blob->base.id, new_blob->length, expected_size); - drm_property_blob_put(new_blob); - return -EINVAL; - } - if (expected_elem_size > 0 && - new_blob->length % expected_elem_size != 0) { - drm_dbg_atomic(dev, - "[BLOB:%d] length %zu not divisible by element size %zu\n", - new_blob->base.id, new_blob->length, expected_elem_size); - drm_property_blob_put(new_blob); - return -EINVAL; - } - } - - *replaced |= drm_property_replace_blob(blob, new_blob); - drm_property_blob_put(new_blob); - - return 0; -} - static int drm_atomic_crtc_set_property(struct drm_crtc *crtc, struct drm_crtc_state *state, struct drm_property *property, uint64_t val) @@ -424,7 +382,7 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc, } else if (property == config->prop_vrr_enabled) { state->vrr_enabled = val; } else if (property == config->degamma_lut_property) { - ret = drm_atomic_replace_property_blob_from_id(dev, + ret = drm_property_replace_blob_from_id(dev, &state->degamma_lut, val, -1, sizeof(struct drm_color_lut), @@ -432,7 +390,7 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc, state->color_mgmt_changed |= replaced; return ret; } else if (property == config->ctm_property) { - ret = drm_atomic_replace_property_blob_from_id(dev, + ret = drm_property_replace_blob_from_id(dev, &state->ctm, val, sizeof(struct drm_color_ctm), -1, @@ -440,7 +398,7 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc, state->color_mgmt_changed |= replaced; return ret; } else if (property == config->gamma_lut_property) { - ret = drm_atomic_replace_property_blob_from_id(dev, + ret = drm_property_replace_blob_from_id(dev, &state->gamma_lut, val, -1, sizeof(struct drm_color_lut), @@ -581,7 +539,7 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane, } else if (property == plane->color_range_property) { state->color_range = val; } else if (property == config->prop_fb_damage_clips) { - ret = drm_atomic_replace_property_blob_from_id(dev, + ret = drm_property_replace_blob_from_id(dev, &state->fb_damage_clips, val, -1, @@ -593,6 +551,22 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane, } else if (plane->funcs->atomic_set_property) { return plane->funcs->atomic_set_property(plane, state, property, val); + } else if (property == plane->hotspot_x_property) { + if (plane->type != DRM_PLANE_TYPE_CURSOR) { + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] is not a cursor plane: 0x%llx\n", + plane->base.id, plane->name, val); + return -EINVAL; + } + state->hotspot_x = val; + } else if (property == plane->hotspot_y_property) { + if (plane->type != DRM_PLANE_TYPE_CURSOR) { + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] is not a cursor plane: 0x%llx\n", + plane->base.id, plane->name, val); + return -EINVAL; + } + state->hotspot_y = val; } else { drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] unknown property [PROP:%d:%s]\n", @@ -653,6 +627,10 @@ drm_atomic_plane_get_property(struct drm_plane *plane, *val = state->scaling_filter; } else if (plane->funcs->atomic_get_property) { return plane->funcs->atomic_get_property(plane, state, property, val); + } else if (property == plane->hotspot_x_property) { + *val = state->hotspot_x; + } else if (property == plane->hotspot_y_property) { + *val = state->hotspot_y; } else { drm_dbg_atomic(dev, "[PLANE:%d:%s] unknown property [PROP:%d:%s]\n", @@ -758,7 +736,7 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector, if (state->link_status != DRM_LINK_STATUS_GOOD) state->link_status = val; } else if (property == config->hdr_output_metadata_property) { - ret = drm_atomic_replace_property_blob_from_id(dev, + ret = drm_property_replace_blob_from_id(dev, &state->hdr_output_metadata, val, sizeof(struct hdr_output_metadata), -1, @@ -1006,13 +984,28 @@ out: return ret; } +static int drm_atomic_check_prop_changes(int ret, uint64_t old_val, uint64_t prop_value, + struct drm_property *prop) +{ + if (ret != 0 || old_val != prop_value) { + drm_dbg_atomic(prop->dev, + "[PROP:%d:%s] No prop can be changed during async flip\n", + prop->base.id, prop->name); + return -EINVAL; + } + + return 0; +} + int drm_atomic_set_property(struct drm_atomic_state *state, struct drm_file *file_priv, struct drm_mode_object *obj, struct drm_property *prop, - uint64_t prop_value) + u64 prop_value, + bool async_flip) { struct drm_mode_object *ref; + u64 old_val; int ret; if (!drm_property_change_valid_get(prop, prop_value, &ref)) @@ -1029,6 +1022,13 @@ int drm_atomic_set_property(struct drm_atomic_state *state, break; } + if (async_flip) { + ret = drm_atomic_connector_get_property(connector, connector_state, + prop, &old_val); + ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop); + break; + } + ret = drm_atomic_connector_set_property(connector, connector_state, file_priv, prop, prop_value); @@ -1044,6 +1044,13 @@ int drm_atomic_set_property(struct drm_atomic_state *state, break; } + if (async_flip) { + ret = drm_atomic_crtc_get_property(crtc, crtc_state, + prop, &old_val); + ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop); + break; + } + ret = drm_atomic_crtc_set_property(crtc, crtc_state, prop, prop_value); break; @@ -1051,6 +1058,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state, case DRM_MODE_OBJECT_PLANE: { struct drm_plane *plane = obj_to_plane(obj); struct drm_plane_state *plane_state; + struct drm_mode_config *config = &plane->dev->mode_config; plane_state = drm_atomic_get_plane_state(state, plane); if (IS_ERR(plane_state)) { @@ -1058,6 +1066,21 @@ int drm_atomic_set_property(struct drm_atomic_state *state, break; } + if (async_flip && prop != config->prop_fb_id) { + ret = drm_atomic_plane_get_property(plane, plane_state, + prop, &old_val); + ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop); + break; + } + + if (async_flip && plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY) { + drm_dbg_atomic(prop->dev, + "[OBJECT:%d] Only primary planes can be changed during async flip\n", + obj->id); + ret = -EINVAL; + break; + } + ret = drm_atomic_plane_set_property(plane, plane_state, file_priv, prop, prop_value); @@ -1323,6 +1346,18 @@ static void complete_signaling(struct drm_device *dev, kfree(fence_state); } +static void +set_async_flip(struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + int i; + + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + crtc_state->async_flip = true; + } +} + int drm_mode_atomic_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -1337,6 +1372,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, struct drm_out_fence_state *fence_state; int ret = 0; unsigned int i, j, num_fences; + bool async_flip = false; /* disallow for drivers not supporting atomic: */ if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) @@ -1363,9 +1399,13 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, } if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) { - drm_dbg_atomic(dev, - "commit failed: invalid flag DRM_MODE_PAGE_FLIP_ASYNC\n"); - return -EINVAL; + if (!dev->mode_config.async_page_flip) { + drm_dbg_atomic(dev, + "commit failed: DRM_MODE_PAGE_FLIP_ASYNC not supported\n"); + return -EINVAL; + } + + async_flip = true; } /* can't test and expect an event at the same time. */ @@ -1450,8 +1490,8 @@ retry: goto out; } - ret = drm_atomic_set_property(state, file_priv, - obj, prop, prop_value); + ret = drm_atomic_set_property(state, file_priv, obj, + prop, prop_value, async_flip); if (ret) { drm_mode_object_put(obj); goto out; @@ -1468,6 +1508,9 @@ retry: if (ret) goto out; + if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) + set_async_flip(state); + if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) { ret = drm_atomic_check_only(state); } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) { diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index 6899b3dc1f..22aa015df3 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c @@ -37,13 +37,12 @@ #include #include "drm_internal.h" -#include "drm_legacy.h" /** * DOC: master and authentication * * &struct drm_master is used to track groups of clients with open - * primary/legacy device nodes. For every &struct drm_file which has had at + * primary device nodes. For every &struct drm_file which has had at * least once successfully became the device master (either through the * SET_MASTER IOCTL, or implicitly through opening the primary device node when * no one else is the current master that time) there exists one &drm_master. @@ -139,7 +138,6 @@ struct drm_master *drm_master_create(struct drm_device *dev) return NULL; kref_init(&master->refcount); - drm_master_legacy_init(master); idr_init_base(&master->magic_map, 1); master->dev = dev; @@ -365,8 +363,6 @@ void drm_master_release(struct drm_file *file_priv) if (!drm_is_current_master_locked(file_priv)) goto out; - drm_legacy_lock_master_cleanup(dev, master); - if (dev->master == file_priv->master) drm_drop_master(dev, file_priv); out: @@ -429,8 +425,6 @@ static void drm_master_destroy(struct kref *kref) if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_lease_destroy(master); - drm_legacy_master_rmmaps(dev, master); - idr_destroy(&master->magic_map); idr_destroy(&master->leases); idr_destroy(&master->lessee_idr); diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index e1cfba2ff5..4f6f8c662d 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -1391,50 +1391,6 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np) EXPORT_SYMBOL(of_drm_find_bridge); #endif -#ifdef CONFIG_DEBUG_FS -static int drm_bridge_chains_info(struct seq_file *m, void *data) -{ - struct drm_debugfs_entry *entry = m->private; - struct drm_device *dev = entry->dev; - struct drm_printer p = drm_seq_file_printer(m); - struct drm_mode_config *config = &dev->mode_config; - struct drm_encoder *encoder; - unsigned int bridge_idx = 0; - - list_for_each_entry(encoder, &config->encoder_list, head) { - struct drm_bridge *bridge; - - drm_printf(&p, "encoder[%u]\n", encoder->base.id); - - drm_for_each_bridge_in_chain(encoder, bridge) { - drm_printf(&p, "\tbridge[%u] type: %u, ops: %#x", - bridge_idx, bridge->type, bridge->ops); - -#ifdef CONFIG_OF - if (bridge->of_node) - drm_printf(&p, ", OF: %pOFfc", bridge->of_node); -#endif - - drm_printf(&p, "\n"); - - bridge_idx++; - } - } - - return 0; -} - -static const struct drm_debugfs_info drm_bridge_debugfs_list[] = { - { "bridge_chains", drm_bridge_chains_info, 0 }, -}; - -void drm_bridge_debugfs_init(struct drm_device *dev) -{ - drm_debugfs_add_files(dev, drm_bridge_debugfs_list, - ARRAY_SIZE(drm_bridge_debugfs_list)); -} -#endif - MODULE_AUTHOR("Ajay Kumar "); MODULE_DESCRIPTION("DRM bridge infrastructure"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c index 8239ad43ae..3acd67021e 100644 --- a/drivers/gpu/drm/drm_bridge_connector.c +++ b/drivers/gpu/drm/drm_bridge_connector.c @@ -198,12 +198,6 @@ static void drm_bridge_connector_destroy(struct drm_connector *connector) struct drm_bridge_connector *bridge_connector = to_drm_bridge_connector(connector); - if (bridge_connector->bridge_hpd) { - struct drm_bridge *hpd = bridge_connector->bridge_hpd; - - drm_bridge_hpd_disable(hpd); - } - drm_connector_unregister(connector); drm_connector_cleanup(connector); diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c deleted file mode 100644 index 86700560fe..0000000000 --- a/drivers/gpu/drm/drm_bufs.c +++ /dev/null @@ -1,1627 +0,0 @@ -/* - * Legacy: Generic DRM Buffer Management - * - * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Author: Rickard E. (Rik) Faith - * Author: Gareth Hughes - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include - -#include "drm_legacy.h" - - -static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, - struct drm_local_map *map) -{ - struct drm_map_list *entry; - - list_for_each_entry(entry, &dev->maplist, head) { - /* - * Because the kernel-userspace ABI is fixed at a 32-bit offset - * while PCI resources may live above that, we only compare the - * lower 32 bits of the map offset for maps of type - * _DRM_FRAMEBUFFER or _DRM_REGISTERS. - * It is assumed that if a driver have more than one resource - * of each type, the lower 32 bits are different. - */ - if (!entry->map || - map->type != entry->map->type || - entry->master != dev->master) - continue; - switch (map->type) { - case _DRM_SHM: - if (map->flags != _DRM_CONTAINS_LOCK) - break; - return entry; - case _DRM_REGISTERS: - case _DRM_FRAME_BUFFER: - if ((entry->map->offset & 0xffffffff) == - (map->offset & 0xffffffff)) - return entry; - break; - default: /* Make gcc happy */ - break; - } - if (entry->map->offset == map->offset) - return entry; - } - - return NULL; -} - -static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, - unsigned long user_token, int hashed_handle, int shm) -{ - int use_hashed_handle, shift; - unsigned long add; - -#if (BITS_PER_LONG == 64) - use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle); -#elif (BITS_PER_LONG == 32) - use_hashed_handle = hashed_handle; -#else -#error Unsupported long size. Neither 64 nor 32 bits. -#endif - - if (!use_hashed_handle) { - int ret; - - hash->key = user_token >> PAGE_SHIFT; - ret = drm_ht_insert_item(&dev->map_hash, hash); - if (ret != -EINVAL) - return ret; - } - - shift = 0; - add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT; - if (shm && (SHMLBA > PAGE_SIZE)) { - int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1; - - /* For shared memory, we have to preserve the SHMLBA - * bits of the eventual vma->vm_pgoff value during - * mmap(). Otherwise we run into cache aliasing problems - * on some platforms. On these platforms, the pgoff of - * a mmap() request is used to pick a suitable virtual - * address for the mmap() region such that it will not - * cause cache aliasing problems. - * - * Therefore, make sure the SHMLBA relevant bits of the - * hash value we use are equal to those in the original - * kernel virtual address. - */ - shift = bits; - add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL)); - } - - return drm_ht_just_insert_please(&dev->map_hash, hash, - user_token, 32 - PAGE_SHIFT - 3, - shift, add); -} - -/* - * Core function to create a range of memory available for mapping by a - * non-root process. - * - * Adjusts the memory offset to its absolute value according to the mapping - * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where - * applicable and if supported by the kernel. - */ -static int drm_addmap_core(struct drm_device *dev, resource_size_t offset, - unsigned int size, enum drm_map_type type, - enum drm_map_flags flags, - struct drm_map_list **maplist) -{ - struct drm_local_map *map; - struct drm_map_list *list; - unsigned long user_token; - int ret; - - map = kmalloc(sizeof(*map), GFP_KERNEL); - if (!map) - return -ENOMEM; - - map->offset = offset; - map->size = size; - map->flags = flags; - map->type = type; - - /* Only allow shared memory to be removable since we only keep enough - * book keeping information about shared memory to allow for removal - * when processes fork. - */ - if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { - kfree(map); - return -EINVAL; - } - DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", - (unsigned long long)map->offset, map->size, map->type); - - /* page-align _DRM_SHM maps. They are allocated here so there is no security - * hole created by that and it works around various broken drivers that use - * a non-aligned quantity to map the SAREA. --BenH - */ - if (map->type == _DRM_SHM) - map->size = PAGE_ALIGN(map->size); - - if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { - kfree(map); - return -EINVAL; - } - map->mtrr = -1; - map->handle = NULL; - - switch (map->type) { - case _DRM_REGISTERS: - case _DRM_FRAME_BUFFER: -#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__) - if (map->offset + (map->size-1) < map->offset || - map->offset < virt_to_phys(high_memory)) { - kfree(map); - return -EINVAL; - } -#endif - /* Some drivers preinitialize some maps, without the X Server - * needing to be aware of it. Therefore, we just return success - * when the server tries to create a duplicate map. - */ - list = drm_find_matching_map(dev, map); - if (list != NULL) { - if (list->map->size != map->size) { - DRM_DEBUG("Matching maps of type %d with " - "mismatched sizes, (%ld vs %ld)\n", - map->type, map->size, - list->map->size); - list->map->size = map->size; - } - - kfree(map); - *maplist = list; - return 0; - } - - if (map->type == _DRM_FRAME_BUFFER || - (map->flags & _DRM_WRITE_COMBINING)) { - map->mtrr = - arch_phys_wc_add(map->offset, map->size); - } - if (map->type == _DRM_REGISTERS) { - if (map->flags & _DRM_WRITE_COMBINING) - map->handle = ioremap_wc(map->offset, - map->size); - else - map->handle = ioremap(map->offset, map->size); - if (!map->handle) { - kfree(map); - return -ENOMEM; - } - } - - break; - case _DRM_SHM: - list = drm_find_matching_map(dev, map); - if (list != NULL) { - if (list->map->size != map->size) { - DRM_DEBUG("Matching maps of type %d with " - "mismatched sizes, (%ld vs %ld)\n", - map->type, map->size, list->map->size); - list->map->size = map->size; - } - - kfree(map); - *maplist = list; - return 0; - } - map->handle = vmalloc_user(map->size); - DRM_DEBUG("%lu %d %p\n", - map->size, order_base_2(map->size), map->handle); - if (!map->handle) { - kfree(map); - return -ENOMEM; - } - map->offset = (unsigned long)map->handle; - if (map->flags & _DRM_CONTAINS_LOCK) { - /* Prevent a 2nd X Server from creating a 2nd lock */ - if (dev->master->lock.hw_lock != NULL) { - vfree(map->handle); - kfree(map); - return -EBUSY; - } - dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */ - } - break; - case _DRM_AGP: { - struct drm_agp_mem *entry; - int valid = 0; - - if (!dev->agp) { - kfree(map); - return -EINVAL; - } -#ifdef __alpha__ - map->offset += dev->hose->mem_space->start; -#endif - /* In some cases (i810 driver), user space may have already - * added the AGP base itself, because dev->agp->base previously - * only got set during AGP enable. So, only add the base - * address if the map's offset isn't already within the - * aperture. - */ - if (map->offset < dev->agp->base || - map->offset > dev->agp->base + - dev->agp->agp_info.aper_size * 1024 * 1024 - 1) { - map->offset += dev->agp->base; - } - map->mtrr = dev->agp->agp_mtrr; /* for getmap */ - - /* This assumes the DRM is in total control of AGP space. - * It's not always the case as AGP can be in the control - * of user space (i.e. i810 driver). So this loop will get - * skipped and we double check that dev->agp->memory is - * actually set as well as being invalid before EPERM'ing - */ - list_for_each_entry(entry, &dev->agp->memory, head) { - if ((map->offset >= entry->bound) && - (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { - valid = 1; - break; - } - } - if (!list_empty(&dev->agp->memory) && !valid) { - kfree(map); - return -EPERM; - } - DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n", - (unsigned long long)map->offset, map->size); - - break; - } - case _DRM_SCATTER_GATHER: - if (!dev->sg) { - kfree(map); - return -EINVAL; - } - map->offset += (unsigned long)dev->sg->virtual; - break; - case _DRM_CONSISTENT: - /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, - * As we're limiting the address to 2^32-1 (or less), - * casting it down to 32 bits is no problem, but we - * need to point to a 64bit variable first. - */ - map->handle = dma_alloc_coherent(dev->dev, - map->size, - &map->offset, - GFP_KERNEL); - if (!map->handle) { - kfree(map); - return -ENOMEM; - } - break; - default: - kfree(map); - return -EINVAL; - } - - list = kzalloc(sizeof(*list), GFP_KERNEL); - if (!list) { - if (map->type == _DRM_REGISTERS) - iounmap(map->handle); - kfree(map); - return -EINVAL; - } - list->map = map; - - mutex_lock(&dev->struct_mutex); - list_add(&list->head, &dev->maplist); - - /* Assign a 32-bit handle */ - /* We do it here so that dev->struct_mutex protects the increment */ - user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle : - map->offset; - ret = drm_map_handle(dev, &list->hash, user_token, 0, - (map->type == _DRM_SHM)); - if (ret) { - if (map->type == _DRM_REGISTERS) - iounmap(map->handle); - kfree(map); - kfree(list); - mutex_unlock(&dev->struct_mutex); - return ret; - } - - list->user_token = list->hash.key << PAGE_SHIFT; - mutex_unlock(&dev->struct_mutex); - - if (!(map->flags & _DRM_DRIVER)) - list->master = dev->master; - *maplist = list; - return 0; -} - -int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset, - unsigned int size, enum drm_map_type type, - enum drm_map_flags flags, struct drm_local_map **map_ptr) -{ - struct drm_map_list *list; - int rc; - - rc = drm_addmap_core(dev, offset, size, type, flags, &list); - if (!rc) - *map_ptr = list->map; - return rc; -} -EXPORT_SYMBOL(drm_legacy_addmap); - -struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, - unsigned int token) -{ - struct drm_map_list *_entry; - - list_for_each_entry(_entry, &dev->maplist, head) - if (_entry->user_token == token) - return _entry->map; - return NULL; -} -EXPORT_SYMBOL(drm_legacy_findmap); - -/* - * Ioctl to specify a range of memory that is available for mapping by a - * non-root process. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg pointer to a drm_map structure. - * \return zero on success or a negative value on error. - * - */ -int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_map *map = data; - struct drm_map_list *maplist; - int err; - - if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM)) - return -EPERM; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - err = drm_addmap_core(dev, map->offset, map->size, map->type, - map->flags, &maplist); - - if (err) - return err; - - /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ - map->handle = (void *)(unsigned long)maplist->user_token; - - /* - * It appears that there are no users of this value whatsoever -- - * drmAddMap just discards it. Let's not encourage its use. - * (Keeping drm_addmap_core's returned mtrr value would be wrong -- - * it's not a real mtrr index anymore.) - */ - map->mtrr = -1; - - return 0; -} - -/* - * Get a mapping information. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument, pointing to a drm_map structure. - * - * \return zero on success or a negative number on failure. - * - * Searches for the mapping with the specified offset and copies its information - * into userspace - */ -int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_map *map = data; - struct drm_map_list *r_list = NULL; - struct list_head *list; - int idx; - int i; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - idx = map->offset; - if (idx < 0) - return -EINVAL; - - i = 0; - mutex_lock(&dev->struct_mutex); - list_for_each(list, &dev->maplist) { - if (i == idx) { - r_list = list_entry(list, struct drm_map_list, head); - break; - } - i++; - } - if (!r_list || !r_list->map) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - - map->offset = r_list->map->offset; - map->size = r_list->map->size; - map->type = r_list->map->type; - map->flags = r_list->map->flags; - map->handle = (void *)(unsigned long) r_list->user_token; - map->mtrr = arch_phys_wc_index(r_list->map->mtrr); - - mutex_unlock(&dev->struct_mutex); - - return 0; -} - -/* - * Remove a map private from list and deallocate resources if the mapping - * isn't in use. - * - * Searches the map on drm_device::maplist, removes it from the list, see if - * it's being used, and free any associated resource (such as MTRR's) if it's not - * being on use. - * - * \sa drm_legacy_addmap - */ -int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) -{ - struct drm_map_list *r_list = NULL, *list_t; - int found = 0; - struct drm_master *master; - - /* Find the list entry for the map and remove it */ - list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { - if (r_list->map == map) { - master = r_list->master; - list_del(&r_list->head); - drm_ht_remove_key(&dev->map_hash, - r_list->user_token >> PAGE_SHIFT); - kfree(r_list); - found = 1; - break; - } - } - - if (!found) - return -EINVAL; - - switch (map->type) { - case _DRM_REGISTERS: - iounmap(map->handle); - fallthrough; - case _DRM_FRAME_BUFFER: - arch_phys_wc_del(map->mtrr); - break; - case _DRM_SHM: - vfree(map->handle); - if (master) { - if (dev->sigdata.lock == master->lock.hw_lock) - dev->sigdata.lock = NULL; - master->lock.hw_lock = NULL; /* SHM removed */ - master->lock.file_priv = NULL; - wake_up_interruptible_all(&master->lock.lock_queue); - } - break; - case _DRM_AGP: - case _DRM_SCATTER_GATHER: - break; - case _DRM_CONSISTENT: - dma_free_coherent(dev->dev, - map->size, - map->handle, - map->offset); - break; - } - kfree(map); - - return 0; -} -EXPORT_SYMBOL(drm_legacy_rmmap_locked); - -void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map) -{ - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - mutex_lock(&dev->struct_mutex); - drm_legacy_rmmap_locked(dev, map); - mutex_unlock(&dev->struct_mutex); -} -EXPORT_SYMBOL(drm_legacy_rmmap); - -void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master) -{ - struct drm_map_list *r_list, *list_temp; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - mutex_lock(&dev->struct_mutex); - list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) { - if (r_list->master == master) { - drm_legacy_rmmap_locked(dev, r_list->map); - r_list = NULL; - } - } - mutex_unlock(&dev->struct_mutex); -} - -void drm_legacy_rmmaps(struct drm_device *dev) -{ - struct drm_map_list *r_list, *list_temp; - - list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) - drm_legacy_rmmap(dev, r_list->map); -} - -/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on - * the last close of the device, and this is necessary for cleanup when things - * exit uncleanly. Therefore, having userland manually remove mappings seems - * like a pointless exercise since they're going away anyway. - * - * One use case might be after addmap is allowed for normal users for SHM and - * gets used by drivers that the server doesn't need to care about. This seems - * unlikely. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg pointer to a struct drm_map structure. - * \return zero on success or a negative value on error. - */ -int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_map *request = data; - struct drm_local_map *map = NULL; - struct drm_map_list *r_list; - int ret; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - mutex_lock(&dev->struct_mutex); - list_for_each_entry(r_list, &dev->maplist, head) { - if (r_list->map && - r_list->user_token == (unsigned long)request->handle && - r_list->map->flags & _DRM_REMOVABLE) { - map = r_list->map; - break; - } - } - - /* List has wrapped around to the head pointer, or it's empty we didn't - * find anything. - */ - if (list_empty(&dev->maplist) || !map) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - - /* Register and framebuffer maps are permanent */ - if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { - mutex_unlock(&dev->struct_mutex); - return 0; - } - - ret = drm_legacy_rmmap_locked(dev, map); - - mutex_unlock(&dev->struct_mutex); - - return ret; -} - -/* - * Cleanup after an error on one of the addbufs() functions. - * - * \param dev DRM device. - * \param entry buffer entry where the error occurred. - * - * Frees any pages and buffers associated with the given entry. - */ -static void drm_cleanup_buf_error(struct drm_device *dev, - struct drm_buf_entry *entry) -{ - drm_dma_handle_t *dmah; - int i; - - if (entry->seg_count) { - for (i = 0; i < entry->seg_count; i++) { - if (entry->seglist[i]) { - dmah = entry->seglist[i]; - dma_free_coherent(dev->dev, - dmah->size, - dmah->vaddr, - dmah->busaddr); - kfree(dmah); - } - } - kfree(entry->seglist); - - entry->seg_count = 0; - } - - if (entry->buf_count) { - for (i = 0; i < entry->buf_count; i++) { - kfree(entry->buflist[i].dev_private); - } - kfree(entry->buflist); - - entry->buf_count = 0; - } -} - -#if IS_ENABLED(CONFIG_AGP) -/* - * Add AGP buffers for DMA transfers. - * - * \param dev struct drm_device to which the buffers are to be added. - * \param request pointer to a struct drm_buf_desc describing the request. - * \return zero on success or a negative number on failure. - * - * After some sanity checks creates a drm_buf structure for each buffer and - * reallocates the buffer list of the same size order to accommodate the new - * buffers. - */ -int drm_legacy_addbufs_agp(struct drm_device *dev, - struct drm_buf_desc *request) -{ - struct drm_device_dma *dma = dev->dma; - struct drm_buf_entry *entry; - struct drm_agp_mem *agp_entry; - struct drm_buf *buf; - unsigned long offset; - unsigned long agp_offset; - int count; - int order; - int size; - int alignment; - int page_order; - int total; - int byte_count; - int i, valid; - struct drm_buf **temp_buflist; - - if (!dma) - return -EINVAL; - - count = request->count; - order = order_base_2(request->size); - size = 1 << order; - - alignment = (request->flags & _DRM_PAGE_ALIGN) - ? PAGE_ALIGN(size) : size; - page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; - total = PAGE_SIZE << page_order; - - byte_count = 0; - agp_offset = dev->agp->base + request->agp_start; - - DRM_DEBUG("count: %d\n", count); - DRM_DEBUG("order: %d\n", order); - DRM_DEBUG("size: %d\n", size); - DRM_DEBUG("agp_offset: %lx\n", agp_offset); - DRM_DEBUG("alignment: %d\n", alignment); - DRM_DEBUG("page_order: %d\n", page_order); - DRM_DEBUG("total: %d\n", total); - - if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) - return -EINVAL; - - /* Make sure buffers are located in AGP memory that we own */ - valid = 0; - list_for_each_entry(agp_entry, &dev->agp->memory, head) { - if ((agp_offset >= agp_entry->bound) && - (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { - valid = 1; - break; - } - } - if (!list_empty(&dev->agp->memory) && !valid) { - DRM_DEBUG("zone invalid\n"); - return -EINVAL; - } - spin_lock(&dev->buf_lock); - if (dev->buf_use) { - spin_unlock(&dev->buf_lock); - return -EBUSY; - } - atomic_inc(&dev->buf_alloc); - spin_unlock(&dev->buf_lock); - - mutex_lock(&dev->struct_mutex); - entry = &dma->bufs[order]; - if (entry->buf_count) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; /* May only call once for each order */ - } - - if (count < 0 || count > 4096) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -EINVAL; - } - - entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); - if (!entry->buflist) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - entry->buf_size = size; - entry->page_order = page_order; - - offset = 0; - - while (entry->buf_count < count) { - buf = &entry->buflist[entry->buf_count]; - buf->idx = dma->buf_count + entry->buf_count; - buf->total = alignment; - buf->order = order; - buf->used = 0; - - buf->offset = (dma->byte_count + offset); - buf->bus_address = agp_offset + offset; - buf->address = (void *)(agp_offset + offset); - buf->next = NULL; - buf->waiting = 0; - buf->pending = 0; - buf->file_priv = NULL; - - buf->dev_priv_size = dev->driver->dev_priv_size; - buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); - if (!buf->dev_private) { - /* Set count correctly so we free the proper amount. */ - entry->buf_count = count; - drm_cleanup_buf_error(dev, entry); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); - - offset += alignment; - entry->buf_count++; - byte_count += PAGE_SIZE << page_order; - } - - DRM_DEBUG("byte_count: %d\n", byte_count); - - temp_buflist = krealloc(dma->buflist, - (dma->buf_count + entry->buf_count) * - sizeof(*dma->buflist), GFP_KERNEL); - if (!temp_buflist) { - /* Free the entry because it isn't valid */ - drm_cleanup_buf_error(dev, entry); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - dma->buflist = temp_buflist; - - for (i = 0; i < entry->buf_count; i++) { - dma->buflist[i + dma->buf_count] = &entry->buflist[i]; - } - - dma->buf_count += entry->buf_count; - dma->seg_count += entry->seg_count; - dma->page_count += byte_count >> PAGE_SHIFT; - dma->byte_count += byte_count; - - DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); - DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); - - mutex_unlock(&dev->struct_mutex); - - request->count = entry->buf_count; - request->size = size; - - dma->flags = _DRM_DMA_USE_AGP; - - atomic_dec(&dev->buf_alloc); - return 0; -} -EXPORT_SYMBOL(drm_legacy_addbufs_agp); -#endif /* CONFIG_AGP */ - -int drm_legacy_addbufs_pci(struct drm_device *dev, - struct drm_buf_desc *request) -{ - struct drm_device_dma *dma = dev->dma; - int count; - int order; - int size; - int total; - int page_order; - struct drm_buf_entry *entry; - drm_dma_handle_t *dmah; - struct drm_buf *buf; - int alignment; - unsigned long offset; - int i; - int byte_count; - int page_count; - unsigned long *temp_pagelist; - struct drm_buf **temp_buflist; - - if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) - return -EOPNOTSUPP; - - if (!dma) - return -EINVAL; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - count = request->count; - order = order_base_2(request->size); - size = 1 << order; - - DRM_DEBUG("count=%d, size=%d (%d), order=%d\n", - request->count, request->size, size, order); - - if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) - return -EINVAL; - - alignment = (request->flags & _DRM_PAGE_ALIGN) - ? PAGE_ALIGN(size) : size; - page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; - total = PAGE_SIZE << page_order; - - spin_lock(&dev->buf_lock); - if (dev->buf_use) { - spin_unlock(&dev->buf_lock); - return -EBUSY; - } - atomic_inc(&dev->buf_alloc); - spin_unlock(&dev->buf_lock); - - mutex_lock(&dev->struct_mutex); - entry = &dma->bufs[order]; - if (entry->buf_count) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; /* May only call once for each order */ - } - - if (count < 0 || count > 4096) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -EINVAL; - } - - entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); - if (!entry->buflist) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL); - if (!entry->seglist) { - kfree(entry->buflist); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - /* Keep the original pagelist until we know all the allocations - * have succeeded - */ - temp_pagelist = kmalloc_array(dma->page_count + (count << page_order), - sizeof(*dma->pagelist), - GFP_KERNEL); - if (!temp_pagelist) { - kfree(entry->buflist); - kfree(entry->seglist); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - memcpy(temp_pagelist, - dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); - DRM_DEBUG("pagelist: %d entries\n", - dma->page_count + (count << page_order)); - - entry->buf_size = size; - entry->page_order = page_order; - byte_count = 0; - page_count = 0; - - while (entry->buf_count < count) { - dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL); - if (!dmah) { - /* Set count correctly so we free the proper amount. */ - entry->buf_count = count; - entry->seg_count = count; - drm_cleanup_buf_error(dev, entry); - kfree(temp_pagelist); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - dmah->size = total; - dmah->vaddr = dma_alloc_coherent(dev->dev, - dmah->size, - &dmah->busaddr, - GFP_KERNEL); - if (!dmah->vaddr) { - kfree(dmah); - - /* Set count correctly so we free the proper amount. */ - entry->buf_count = count; - entry->seg_count = count; - drm_cleanup_buf_error(dev, entry); - kfree(temp_pagelist); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - entry->seglist[entry->seg_count++] = dmah; - for (i = 0; i < (1 << page_order); i++) { - DRM_DEBUG("page %d @ 0x%08lx\n", - dma->page_count + page_count, - (unsigned long)dmah->vaddr + PAGE_SIZE * i); - temp_pagelist[dma->page_count + page_count++] - = (unsigned long)dmah->vaddr + PAGE_SIZE * i; - } - for (offset = 0; - offset + size <= total && entry->buf_count < count; - offset += alignment, ++entry->buf_count) { - buf = &entry->buflist[entry->buf_count]; - buf->idx = dma->buf_count + entry->buf_count; - buf->total = alignment; - buf->order = order; - buf->used = 0; - buf->offset = (dma->byte_count + byte_count + offset); - buf->address = (void *)(dmah->vaddr + offset); - buf->bus_address = dmah->busaddr + offset; - buf->next = NULL; - buf->waiting = 0; - buf->pending = 0; - buf->file_priv = NULL; - - buf->dev_priv_size = dev->driver->dev_priv_size; - buf->dev_private = kzalloc(buf->dev_priv_size, - GFP_KERNEL); - if (!buf->dev_private) { - /* Set count correctly so we free the proper amount. */ - entry->buf_count = count; - entry->seg_count = count; - drm_cleanup_buf_error(dev, entry); - kfree(temp_pagelist); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - DRM_DEBUG("buffer %d @ %p\n", - entry->buf_count, buf->address); - } - byte_count += PAGE_SIZE << page_order; - } - - temp_buflist = krealloc(dma->buflist, - (dma->buf_count + entry->buf_count) * - sizeof(*dma->buflist), GFP_KERNEL); - if (!temp_buflist) { - /* Free the entry because it isn't valid */ - drm_cleanup_buf_error(dev, entry); - kfree(temp_pagelist); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - dma->buflist = temp_buflist; - - for (i = 0; i < entry->buf_count; i++) { - dma->buflist[i + dma->buf_count] = &entry->buflist[i]; - } - - /* No allocations failed, so now we can replace the original pagelist - * with the new one. - */ - if (dma->page_count) { - kfree(dma->pagelist); - } - dma->pagelist = temp_pagelist; - - dma->buf_count += entry->buf_count; - dma->seg_count += entry->seg_count; - dma->page_count += entry->seg_count << page_order; - dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); - - mutex_unlock(&dev->struct_mutex); - - request->count = entry->buf_count; - request->size = size; - - if (request->flags & _DRM_PCI_BUFFER_RO) - dma->flags = _DRM_DMA_USE_PCI_RO; - - atomic_dec(&dev->buf_alloc); - return 0; - -} -EXPORT_SYMBOL(drm_legacy_addbufs_pci); - -static int drm_legacy_addbufs_sg(struct drm_device *dev, - struct drm_buf_desc *request) -{ - struct drm_device_dma *dma = dev->dma; - struct drm_buf_entry *entry; - struct drm_buf *buf; - unsigned long offset; - unsigned long agp_offset; - int count; - int order; - int size; - int alignment; - int page_order; - int total; - int byte_count; - int i; - struct drm_buf **temp_buflist; - - if (!drm_core_check_feature(dev, DRIVER_SG)) - return -EOPNOTSUPP; - - if (!dma) - return -EINVAL; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - count = request->count; - order = order_base_2(request->size); - size = 1 << order; - - alignment = (request->flags & _DRM_PAGE_ALIGN) - ? PAGE_ALIGN(size) : size; - page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; - total = PAGE_SIZE << page_order; - - byte_count = 0; - agp_offset = request->agp_start; - - DRM_DEBUG("count: %d\n", count); - DRM_DEBUG("order: %d\n", order); - DRM_DEBUG("size: %d\n", size); - DRM_DEBUG("agp_offset: %lu\n", agp_offset); - DRM_DEBUG("alignment: %d\n", alignment); - DRM_DEBUG("page_order: %d\n", page_order); - DRM_DEBUG("total: %d\n", total); - - if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) - return -EINVAL; - - spin_lock(&dev->buf_lock); - if (dev->buf_use) { - spin_unlock(&dev->buf_lock); - return -EBUSY; - } - atomic_inc(&dev->buf_alloc); - spin_unlock(&dev->buf_lock); - - mutex_lock(&dev->struct_mutex); - entry = &dma->bufs[order]; - if (entry->buf_count) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; /* May only call once for each order */ - } - - if (count < 0 || count > 4096) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -EINVAL; - } - - entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); - if (!entry->buflist) { - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - entry->buf_size = size; - entry->page_order = page_order; - - offset = 0; - - while (entry->buf_count < count) { - buf = &entry->buflist[entry->buf_count]; - buf->idx = dma->buf_count + entry->buf_count; - buf->total = alignment; - buf->order = order; - buf->used = 0; - - buf->offset = (dma->byte_count + offset); - buf->bus_address = agp_offset + offset; - buf->address = (void *)(agp_offset + offset - + (unsigned long)dev->sg->virtual); - buf->next = NULL; - buf->waiting = 0; - buf->pending = 0; - buf->file_priv = NULL; - - buf->dev_priv_size = dev->driver->dev_priv_size; - buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); - if (!buf->dev_private) { - /* Set count correctly so we free the proper amount. */ - entry->buf_count = count; - drm_cleanup_buf_error(dev, entry); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - - DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); - - offset += alignment; - entry->buf_count++; - byte_count += PAGE_SIZE << page_order; - } - - DRM_DEBUG("byte_count: %d\n", byte_count); - - temp_buflist = krealloc(dma->buflist, - (dma->buf_count + entry->buf_count) * - sizeof(*dma->buflist), GFP_KERNEL); - if (!temp_buflist) { - /* Free the entry because it isn't valid */ - drm_cleanup_buf_error(dev, entry); - mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); - return -ENOMEM; - } - dma->buflist = temp_buflist; - - for (i = 0; i < entry->buf_count; i++) { - dma->buflist[i + dma->buf_count] = &entry->buflist[i]; - } - - dma->buf_count += entry->buf_count; - dma->seg_count += entry->seg_count; - dma->page_count += byte_count >> PAGE_SHIFT; - dma->byte_count += byte_count; - - DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); - DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); - - mutex_unlock(&dev->struct_mutex); - - request->count = entry->buf_count; - request->size = size; - - dma->flags = _DRM_DMA_USE_SG; - - atomic_dec(&dev->buf_alloc); - return 0; -} - -/* - * Add buffers for DMA transfers (ioctl). - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg pointer to a struct drm_buf_desc request. - * \return zero on success or a negative number on failure. - * - * According with the memory type specified in drm_buf_desc::flags and the - * build options, it dispatches the call either to addbufs_agp(), - * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent - * PCI memory respectively. - */ -int drm_legacy_addbufs(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_buf_desc *request = data; - int ret; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) - return -EOPNOTSUPP; - -#if IS_ENABLED(CONFIG_AGP) - if (request->flags & _DRM_AGP_BUFFER) - ret = drm_legacy_addbufs_agp(dev, request); - else -#endif - if (request->flags & _DRM_SG_BUFFER) - ret = drm_legacy_addbufs_sg(dev, request); - else if (request->flags & _DRM_FB_BUFFER) - ret = -EINVAL; - else - ret = drm_legacy_addbufs_pci(dev, request); - - return ret; -} - -/* - * Get information about the buffer mappings. - * - * This was originally mean for debugging purposes, or by a sophisticated - * client library to determine how best to use the available buffers (e.g., - * large buffers can be used for image transfer). - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg pointer to a drm_buf_info structure. - * \return zero on success or a negative number on failure. - * - * Increments drm_device::buf_use while holding the drm_device::buf_lock - * lock, preventing of allocating more buffers after this call. Information - * about each requested buffer is then copied into user space. - */ -int __drm_legacy_infobufs(struct drm_device *dev, - void *data, int *p, - int (*f)(void *, int, struct drm_buf_entry *)) -{ - struct drm_device_dma *dma = dev->dma; - int i; - int count; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) - return -EOPNOTSUPP; - - if (!dma) - return -EINVAL; - - spin_lock(&dev->buf_lock); - if (atomic_read(&dev->buf_alloc)) { - spin_unlock(&dev->buf_lock); - return -EBUSY; - } - ++dev->buf_use; /* Can't allocate more after this call */ - spin_unlock(&dev->buf_lock); - - for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { - if (dma->bufs[i].buf_count) - ++count; - } - - DRM_DEBUG("count = %d\n", count); - - if (*p >= count) { - for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { - struct drm_buf_entry *from = &dma->bufs[i]; - - if (from->buf_count) { - if (f(data, count, from) < 0) - return -EFAULT; - DRM_DEBUG("%d %d %d %d %d\n", - i, - dma->bufs[i].buf_count, - dma->bufs[i].buf_size, - dma->bufs[i].low_mark, - dma->bufs[i].high_mark); - ++count; - } - } - } - *p = count; - - return 0; -} - -static int copy_one_buf(void *data, int count, struct drm_buf_entry *from) -{ - struct drm_buf_info *request = data; - struct drm_buf_desc __user *to = &request->list[count]; - struct drm_buf_desc v = {.count = from->buf_count, - .size = from->buf_size, - .low_mark = from->low_mark, - .high_mark = from->high_mark}; - - if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags))) - return -EFAULT; - return 0; -} - -int drm_legacy_infobufs(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_buf_info *request = data; - - return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf); -} - -/* - * Specifies a low and high water mark for buffer allocation - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg a pointer to a drm_buf_desc structure. - * \return zero on success or a negative number on failure. - * - * Verifies that the size order is bounded between the admissible orders and - * updates the respective drm_device_dma::bufs entry low and high water mark. - * - * \note This ioctl is deprecated and mostly never used. - */ -int drm_legacy_markbufs(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_device_dma *dma = dev->dma; - struct drm_buf_desc *request = data; - int order; - struct drm_buf_entry *entry; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) - return -EOPNOTSUPP; - - if (!dma) - return -EINVAL; - - DRM_DEBUG("%d, %d, %d\n", - request->size, request->low_mark, request->high_mark); - order = order_base_2(request->size); - if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) - return -EINVAL; - entry = &dma->bufs[order]; - - if (request->low_mark < 0 || request->low_mark > entry->buf_count) - return -EINVAL; - if (request->high_mark < 0 || request->high_mark > entry->buf_count) - return -EINVAL; - - entry->low_mark = request->low_mark; - entry->high_mark = request->high_mark; - - return 0; -} - -/* - * Unreserve the buffers in list, previously reserved using drmDMA. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg pointer to a drm_buf_free structure. - * \return zero on success or a negative number on failure. - * - * Calls free_buffer() for each used buffer. - * This function is primarily used for debugging. - */ -int drm_legacy_freebufs(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_device_dma *dma = dev->dma; - struct drm_buf_free *request = data; - int i; - int idx; - struct drm_buf *buf; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) - return -EOPNOTSUPP; - - if (!dma) - return -EINVAL; - - DRM_DEBUG("%d\n", request->count); - for (i = 0; i < request->count; i++) { - if (copy_from_user(&idx, &request->list[i], sizeof(idx))) - return -EFAULT; - if (idx < 0 || idx >= dma->buf_count) { - DRM_ERROR("Index %d (of %d max)\n", - idx, dma->buf_count - 1); - return -EINVAL; - } - idx = array_index_nospec(idx, dma->buf_count); - buf = dma->buflist[idx]; - if (buf->file_priv != file_priv) { - DRM_ERROR("Process %d freeing buffer not owned\n", - task_pid_nr(current)); - return -EINVAL; - } - drm_legacy_free_buffer(dev, buf); - } - - return 0; -} - -/* - * Maps all of the DMA buffers into client-virtual space (ioctl). - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg pointer to a drm_buf_map structure. - * \return zero on success or a negative number on failure. - * - * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information - * about each buffer into user space. For PCI buffers, it calls vm_mmap() with - * offset equal to 0, which drm_mmap() interprets as PCI buffers and calls - * drm_mmap_dma(). - */ -int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p, - void __user **v, - int (*f)(void *, int, unsigned long, - struct drm_buf *), - struct drm_file *file_priv) -{ - struct drm_device_dma *dma = dev->dma; - int retcode = 0; - unsigned long virtual; - int i; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) - return -EOPNOTSUPP; - - if (!dma) - return -EINVAL; - - spin_lock(&dev->buf_lock); - if (atomic_read(&dev->buf_alloc)) { - spin_unlock(&dev->buf_lock); - return -EBUSY; - } - dev->buf_use++; /* Can't allocate more after this call */ - spin_unlock(&dev->buf_lock); - - if (*p >= dma->buf_count) { - if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP)) - || (drm_core_check_feature(dev, DRIVER_SG) - && (dma->flags & _DRM_DMA_USE_SG))) { - struct drm_local_map *map = dev->agp_buffer_map; - unsigned long token = dev->agp_buffer_token; - - if (!map) { - retcode = -EINVAL; - goto done; - } - virtual = vm_mmap(file_priv->filp, 0, map->size, - PROT_READ | PROT_WRITE, - MAP_SHARED, - token); - } else { - virtual = vm_mmap(file_priv->filp, 0, dma->byte_count, - PROT_READ | PROT_WRITE, - MAP_SHARED, 0); - } - if (virtual > -1024UL) { - /* Real error */ - retcode = (signed long)virtual; - goto done; - } - *v = (void __user *)virtual; - - for (i = 0; i < dma->buf_count; i++) { - if (f(data, i, virtual, dma->buflist[i]) < 0) { - retcode = -EFAULT; - goto done; - } - } - } - done: - *p = dma->buf_count; - DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode); - - return retcode; -} - -static int map_one_buf(void *data, int idx, unsigned long virtual, - struct drm_buf *buf) -{ - struct drm_buf_map *request = data; - unsigned long address = virtual + buf->offset; /* *** */ - - if (copy_to_user(&request->list[idx].idx, &buf->idx, - sizeof(request->list[0].idx))) - return -EFAULT; - if (copy_to_user(&request->list[idx].total, &buf->total, - sizeof(request->list[0].total))) - return -EFAULT; - if (clear_user(&request->list[idx].used, sizeof(int))) - return -EFAULT; - if (copy_to_user(&request->list[idx].address, &address, - sizeof(address))) - return -EFAULT; - return 0; -} - -int drm_legacy_mapbufs(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_buf_map *request = data; - - return __drm_legacy_mapbufs(dev, data, &request->count, - &request->virtual, map_one_buf, - file_priv); -} - -int drm_legacy_dma_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (dev->driver->dma_ioctl) - return dev->driver->dma_ioctl(dev, data, file_priv); - else - return -EINVAL; -} - -struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev) -{ - struct drm_map_list *entry; - - list_for_each_entry(entry, &dev->maplist, head) { - if (entry->map && entry->map->type == _DRM_SHM && - (entry->map->flags & _DRM_CONTAINS_LOCK)) { - return entry->map; - } - } - return NULL; -} -EXPORT_SYMBOL(drm_legacy_getsarea); diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index c3027115d0..9403b3f576 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -5,7 +5,6 @@ #include #include -#include #include #include #include @@ -84,16 +83,13 @@ int drm_client_init(struct drm_device *dev, struct drm_client_dev *client, if (!drm_core_check_feature(dev, DRIVER_MODESET) || !dev->driver->dumb_create) return -EOPNOTSUPP; - if (funcs && !try_module_get(funcs->owner)) - return -ENODEV; - client->dev = dev; client->name = name; client->funcs = funcs; ret = drm_client_modeset_create(client); if (ret) - goto err_put_module; + return ret; ret = drm_client_open(client); if (ret) @@ -105,10 +101,6 @@ int drm_client_init(struct drm_device *dev, struct drm_client_dev *client, err_free: drm_client_modeset_free(client); -err_put_module: - if (funcs) - module_put(funcs->owner); - return ret; } EXPORT_SYMBOL(drm_client_init); @@ -177,8 +169,6 @@ void drm_client_release(struct drm_client_dev *client) drm_client_modeset_free(client); drm_client_close(client); drm_dev_put(dev); - if (client->funcs) - module_put(client->funcs->owner); } EXPORT_SYMBOL(drm_client_release); diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index 871e4e2129..0683a129b3 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -777,6 +777,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, unsigned int total_modes_count = 0; struct drm_client_offset *offsets; unsigned int connector_count = 0; + /* points to modes protected by mode_config.mutex */ struct drm_display_mode **modes; struct drm_crtc **crtcs; int i, ret = 0; @@ -845,7 +846,6 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, drm_client_pick_crtcs(client, connectors, connector_count, crtcs, modes, 0, width, height); } - mutex_unlock(&dev->mode_config.mutex); drm_client_modeset_release(client); @@ -875,6 +875,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, modeset->y = offset->y; } } + mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&client->modeset_mutex); out: diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index c3725086f4..b0516505f7 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -1198,6 +1198,12 @@ static const u32 dp_colorspaces = * drm_connector_set_path_property(), in the case of DP MST with the * path property the MST manager created. Userspace cannot change this * property. + * + * In the case of DP MST, the property has the format + * ``mst:-`` where ```` is the KMS object ID of the + * parent connector and ```` is a hyphen-separated list of DP MST + * port numbers. Note, KMS object IDs are not guaranteed to be stable + * across reboots. * TILE: * Connector tile group property to indicate how a set of DRM connector * compose together into one logical screen. This is used by both high-res diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c deleted file mode 100644 index a0fc779e5e..0000000000 --- a/drivers/gpu/drm/drm_context.c +++ /dev/null @@ -1,513 +0,0 @@ -/* - * Legacy: Generic DRM Contexts - * - * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Author: Rickard E. (Rik) Faith - * Author: Gareth Hughes - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include - -#include -#include -#include - -#include "drm_legacy.h" - -struct drm_ctx_list { - struct list_head head; - drm_context_t handle; - struct drm_file *tag; -}; - -/******************************************************************/ -/** \name Context bitmap support */ -/*@{*/ - -/* - * Free a handle from the context bitmap. - * - * \param dev DRM device. - * \param ctx_handle context handle. - * - * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry - * in drm_device::ctx_idr, while holding the drm_device::struct_mutex - * lock. - */ -void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle) -{ - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - mutex_lock(&dev->struct_mutex); - idr_remove(&dev->ctx_idr, ctx_handle); - mutex_unlock(&dev->struct_mutex); -} - -/* - * Context bitmap allocation. - * - * \param dev DRM device. - * \return (non-negative) context handle on success or a negative number on failure. - * - * Allocate a new idr from drm_device::ctx_idr while holding the - * drm_device::struct_mutex lock. - */ -static int drm_legacy_ctxbitmap_next(struct drm_device * dev) -{ - int ret; - - mutex_lock(&dev->struct_mutex); - ret = idr_alloc(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0, - GFP_KERNEL); - mutex_unlock(&dev->struct_mutex); - return ret; -} - -/* - * Context bitmap initialization. - * - * \param dev DRM device. - * - * Initialise the drm_device::ctx_idr - */ -void drm_legacy_ctxbitmap_init(struct drm_device * dev) -{ - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - idr_init(&dev->ctx_idr); -} - -/* - * Context bitmap cleanup. - * - * \param dev DRM device. - * - * Free all idr members using drm_ctx_sarea_free helper function - * while holding the drm_device::struct_mutex lock. - */ -void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev) -{ - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - mutex_lock(&dev->struct_mutex); - idr_destroy(&dev->ctx_idr); - mutex_unlock(&dev->struct_mutex); -} - -/** - * drm_legacy_ctxbitmap_flush() - Flush all contexts owned by a file - * @dev: DRM device to operate on - * @file: Open file to flush contexts for - * - * This iterates over all contexts on @dev and drops them if they're owned by - * @file. Note that after this call returns, new contexts might be added if - * the file is still alive. - */ -void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file) -{ - struct drm_ctx_list *pos, *tmp; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - mutex_lock(&dev->ctxlist_mutex); - - list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) { - if (pos->tag == file && - pos->handle != DRM_KERNEL_CONTEXT) { - if (dev->driver->context_dtor) - dev->driver->context_dtor(dev, pos->handle); - - drm_legacy_ctxbitmap_free(dev, pos->handle); - list_del(&pos->head); - kfree(pos); - } - } - - mutex_unlock(&dev->ctxlist_mutex); -} - -/*@}*/ - -/******************************************************************/ -/** \name Per Context SAREA Support */ -/*@{*/ - -/* - * Get per-context SAREA. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx_priv_map structure. - * \return zero on success or a negative number on failure. - * - * Gets the map from drm_device::ctx_idr with the handle specified and - * returns its handle. - */ -int drm_legacy_getsareactx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx_priv_map *request = data; - struct drm_local_map *map; - struct drm_map_list *_entry; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - mutex_lock(&dev->struct_mutex); - - map = idr_find(&dev->ctx_idr, request->ctx_id); - if (!map) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - - request->handle = NULL; - list_for_each_entry(_entry, &dev->maplist, head) { - if (_entry->map == map) { - request->handle = - (void *)(unsigned long)_entry->user_token; - break; - } - } - - mutex_unlock(&dev->struct_mutex); - - if (request->handle == NULL) - return -EINVAL; - - return 0; -} - -/* - * Set per-context SAREA. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx_priv_map structure. - * \return zero on success or a negative number on failure. - * - * Searches the mapping specified in \p arg and update the entry in - * drm_device::ctx_idr with it. - */ -int drm_legacy_setsareactx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx_priv_map *request = data; - struct drm_local_map *map = NULL; - struct drm_map_list *r_list = NULL; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - mutex_lock(&dev->struct_mutex); - list_for_each_entry(r_list, &dev->maplist, head) { - if (r_list->map - && r_list->user_token == (unsigned long) request->handle) - goto found; - } - bad: - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - - found: - map = r_list->map; - if (!map) - goto bad; - - if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id))) - goto bad; - - mutex_unlock(&dev->struct_mutex); - - return 0; -} - -/*@}*/ - -/******************************************************************/ -/** \name The actual DRM context handling routines */ -/*@{*/ - -/* - * Switch context. - * - * \param dev DRM device. - * \param old old context handle. - * \param new new context handle. - * \return zero on success or a negative number on failure. - * - * Attempt to set drm_device::context_flag. - */ -static int drm_context_switch(struct drm_device * dev, int old, int new) -{ - if (test_and_set_bit(0, &dev->context_flag)) { - DRM_ERROR("Reentering -- FIXME\n"); - return -EBUSY; - } - - DRM_DEBUG("Context switch from %d to %d\n", old, new); - - if (new == dev->last_context) { - clear_bit(0, &dev->context_flag); - return 0; - } - - return 0; -} - -/* - * Complete context switch. - * - * \param dev DRM device. - * \param new new context handle. - * \return zero on success or a negative number on failure. - * - * Updates drm_device::last_context and drm_device::last_switch. Verifies the - * hardware lock is held, clears the drm_device::context_flag and wakes up - * drm_device::context_wait. - */ -static int drm_context_switch_complete(struct drm_device *dev, - struct drm_file *file_priv, int new) -{ - dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ - - if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) { - DRM_ERROR("Lock isn't held after context switch\n"); - } - - /* If a context switch is ever initiated - when the kernel holds the lock, release - that lock here. - */ - clear_bit(0, &dev->context_flag); - - return 0; -} - -/* - * Reserve contexts. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx_res structure. - * \return zero on success or a negative number on failure. - */ -int drm_legacy_resctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx_res *res = data; - struct drm_ctx ctx; - int i; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (res->count >= DRM_RESERVED_CONTEXTS) { - memset(&ctx, 0, sizeof(ctx)); - for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { - ctx.handle = i; - if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx))) - return -EFAULT; - } - } - res->count = DRM_RESERVED_CONTEXTS; - - return 0; -} - -/* - * Add context. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx structure. - * \return zero on success or a negative number on failure. - * - * Get a new handle for the context and copy to userspace. - */ -int drm_legacy_addctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx_list *ctx_entry; - struct drm_ctx *ctx = data; - int tmp_handle; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - tmp_handle = drm_legacy_ctxbitmap_next(dev); - if (tmp_handle == DRM_KERNEL_CONTEXT) { - /* Skip kernel's context and get a new one. */ - tmp_handle = drm_legacy_ctxbitmap_next(dev); - } - DRM_DEBUG("%d\n", tmp_handle); - if (tmp_handle < 0) { - DRM_DEBUG("Not enough free contexts.\n"); - /* Should this return -EBUSY instead? */ - return tmp_handle; - } - - ctx->handle = tmp_handle; - - ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL); - if (!ctx_entry) { - DRM_DEBUG("out of memory\n"); - return -ENOMEM; - } - - INIT_LIST_HEAD(&ctx_entry->head); - ctx_entry->handle = ctx->handle; - ctx_entry->tag = file_priv; - - mutex_lock(&dev->ctxlist_mutex); - list_add(&ctx_entry->head, &dev->ctxlist); - mutex_unlock(&dev->ctxlist_mutex); - - return 0; -} - -/* - * Get context. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx structure. - * \return zero on success or a negative number on failure. - */ -int drm_legacy_getctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx *ctx = data; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - /* This is 0, because we don't handle any context flags */ - ctx->flags = 0; - - return 0; -} - -/* - * Switch context. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx structure. - * \return zero on success or a negative number on failure. - * - * Calls context_switch(). - */ -int drm_legacy_switchctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx *ctx = data; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - DRM_DEBUG("%d\n", ctx->handle); - return drm_context_switch(dev, dev->last_context, ctx->handle); -} - -/* - * New context. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx structure. - * \return zero on success or a negative number on failure. - * - * Calls context_switch_complete(). - */ -int drm_legacy_newctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx *ctx = data; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - DRM_DEBUG("%d\n", ctx->handle); - drm_context_switch_complete(dev, file_priv, ctx->handle); - - return 0; -} - -/* - * Remove context. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument pointing to a drm_ctx structure. - * \return zero on success or a negative number on failure. - * - * If not the special kernel context, calls ctxbitmap_free() to free the specified context. - */ -int drm_legacy_rmctx(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_ctx *ctx = data; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - DRM_DEBUG("%d\n", ctx->handle); - if (ctx->handle != DRM_KERNEL_CONTEXT) { - if (dev->driver->context_dtor) - dev->driver->context_dtor(dev, ctx->handle); - drm_legacy_ctxbitmap_free(dev, ctx->handle); - } - - mutex_lock(&dev->ctxlist_mutex); - if (!list_empty(&dev->ctxlist)) { - struct drm_ctx_list *pos, *n; - - list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { - if (pos->handle == ctx->handle) { - list_del(&pos->head); - kfree(pos); - } - } - } - mutex_unlock(&dev->ctxlist_mutex); - - return 0; -} - -/*@}*/ diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index cb90e70d85..65f9f66933 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -904,6 +904,7 @@ out: connector_set = NULL; fb = NULL; mode = NULL; + num_connectors = 0; DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index a209659a99..2dafc39a27 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -439,11 +439,8 @@ EXPORT_SYMBOL(drm_crtc_helper_set_mode); * @state: atomic state object * * Provides a default CRTC-state check handler for CRTCs that only have - * one primary plane attached to it. - * - * This is often the case for the CRTC of simple framebuffers. See also - * drm_plane_helper_atomic_check() for the respective plane-state check - * helper function. + * one primary plane attached to it. This is often the case for the CRTC + * of simple framebuffers. * * RETURNS: * Zero on success, or an errno code otherwise. diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index 8556c3b3ff..a514d5207e 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -222,6 +222,8 @@ int drm_mode_addfb2_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_mode_rmfb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int drm_mode_closefb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); int drm_mode_getfb(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_mode_getfb2_ioctl(struct drm_device *dev, @@ -251,7 +253,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state, struct drm_file *file_priv, struct drm_mode_object *obj, struct drm_property *prop, - uint64_t prop_value); + u64 prop_value, bool async_flip); int drm_atomic_get_property(struct drm_mode_object *obj, struct drm_property *property, uint64_t *val); diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index f291fb4b35..f4715a67e3 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -314,10 +314,8 @@ void drm_debugfs_dev_register(struct drm_device *dev) drm_framebuffer_debugfs_init(dev); drm_client_debugfs_init(dev); } - if (drm_drv_uses_atomic_modeset(dev)) { + if (drm_drv_uses_atomic_modeset(dev)) drm_atomic_debugfs_init(dev); - drm_bridge_debugfs_init(dev); - } } int drm_debugfs_register(struct drm_minor *minor, int minor_id, @@ -589,4 +587,65 @@ void drm_debugfs_crtc_remove(struct drm_crtc *crtc) crtc->debugfs_entry = NULL; } +static int bridges_show(struct seq_file *m, void *data) +{ + struct drm_encoder *encoder = m->private; + struct drm_printer p = drm_seq_file_printer(m); + struct drm_bridge *bridge; + unsigned int idx = 0; + + drm_for_each_bridge_in_chain(encoder, bridge) { + drm_printf(&p, "bridge[%d]: %ps\n", idx++, bridge->funcs); + drm_printf(&p, "\ttype: [%d] %s\n", + bridge->type, + drm_get_connector_type_name(bridge->type)); +#ifdef CONFIG_OF + if (bridge->of_node) + drm_printf(&p, "\tOF: %pOFfc\n", bridge->of_node); +#endif + drm_printf(&p, "\tops: [0x%x]", bridge->ops); + if (bridge->ops & DRM_BRIDGE_OP_DETECT) + drm_puts(&p, " detect"); + if (bridge->ops & DRM_BRIDGE_OP_EDID) + drm_puts(&p, " edid"); + if (bridge->ops & DRM_BRIDGE_OP_HPD) + drm_puts(&p, " hpd"); + if (bridge->ops & DRM_BRIDGE_OP_MODES) + drm_puts(&p, " modes"); + drm_puts(&p, "\n"); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(bridges); + +void drm_debugfs_encoder_add(struct drm_encoder *encoder) +{ + struct drm_minor *minor = encoder->dev->primary; + struct dentry *root; + char *name; + + name = kasprintf(GFP_KERNEL, "encoder-%d", encoder->index); + if (!name) + return; + + root = debugfs_create_dir(name, minor->debugfs_root); + kfree(name); + + encoder->debugfs_entry = root; + + /* bridges list */ + debugfs_create_file("bridges", 0444, root, encoder, + &bridges_fops); + + if (encoder->funcs && encoder->funcs->debugfs_init) + encoder->funcs->debugfs_init(encoder, root); +} + +void drm_debugfs_encoder_remove(struct drm_encoder *encoder) +{ + debugfs_remove_recursive(encoder->debugfs_entry); + encoder->debugfs_entry = NULL; +} + #endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c deleted file mode 100644 index eb6b741a6f..0000000000 --- a/drivers/gpu/drm/drm_dma.c +++ /dev/null @@ -1,178 +0,0 @@ -/* - * \file drm_dma.c - * DMA IOCTL and function support - * - * \author Rickard E. (Rik) Faith - * \author Gareth Hughes - */ - -/* - * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com - * - * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include - -#include -#include - -#include "drm_legacy.h" - -/** - * drm_legacy_dma_setup() - Initialize the DMA data. - * - * @dev: DRM device. - * Return: zero on success or a negative value on failure. - * - * Allocate and initialize a drm_device_dma structure. - */ -int drm_legacy_dma_setup(struct drm_device *dev) -{ - int i; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) || - !drm_core_check_feature(dev, DRIVER_LEGACY)) - return 0; - - dev->buf_use = 0; - atomic_set(&dev->buf_alloc, 0); - - dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL); - if (!dev->dma) - return -ENOMEM; - - for (i = 0; i <= DRM_MAX_ORDER; i++) - memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0])); - - return 0; -} - -/** - * drm_legacy_dma_takedown() - Cleanup the DMA resources. - * - * @dev: DRM device. - * - * Free all pages associated with DMA buffers, the buffers and pages lists, and - * finally the drm_device::dma structure itself. - */ -void drm_legacy_dma_takedown(struct drm_device *dev) -{ - struct drm_device_dma *dma = dev->dma; - drm_dma_handle_t *dmah; - int i, j; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) || - !drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - if (!dma) - return; - - /* Clear dma buffers */ - for (i = 0; i <= DRM_MAX_ORDER; i++) { - if (dma->bufs[i].seg_count) { - DRM_DEBUG("order %d: buf_count = %d," - " seg_count = %d\n", - i, - dma->bufs[i].buf_count, - dma->bufs[i].seg_count); - for (j = 0; j < dma->bufs[i].seg_count; j++) { - if (dma->bufs[i].seglist[j]) { - dmah = dma->bufs[i].seglist[j]; - dma_free_coherent(dev->dev, - dmah->size, - dmah->vaddr, - dmah->busaddr); - kfree(dmah); - } - } - kfree(dma->bufs[i].seglist); - } - if (dma->bufs[i].buf_count) { - for (j = 0; j < dma->bufs[i].buf_count; j++) { - kfree(dma->bufs[i].buflist[j].dev_private); - } - kfree(dma->bufs[i].buflist); - } - } - - kfree(dma->buflist); - kfree(dma->pagelist); - kfree(dev->dma); - dev->dma = NULL; -} - -/** - * drm_legacy_free_buffer() - Free a buffer. - * - * @dev: DRM device. - * @buf: buffer to free. - * - * Resets the fields of \p buf. - */ -void drm_legacy_free_buffer(struct drm_device *dev, struct drm_buf * buf) -{ - if (!buf) - return; - - buf->waiting = 0; - buf->pending = 0; - buf->file_priv = NULL; - buf->used = 0; -} - -/** - * drm_legacy_reclaim_buffers() - Reclaim the buffers. - * - * @dev: DRM device. - * @file_priv: DRM file private. - * - * Frees each buffer associated with \p file_priv not already on the hardware. - */ -void drm_legacy_reclaim_buffers(struct drm_device *dev, - struct drm_file *file_priv) -{ - struct drm_device_dma *dma = dev->dma; - int i; - - if (!dma) - return; - for (i = 0; i < dma->buf_count; i++) { - if (dma->buflist[i]->file_priv == file_priv) { - switch (dma->buflist[i]->list) { - case DRM_LIST_NONE: - drm_legacy_free_buffer(dev, dma->buflist[i]); - break; - case DRM_LIST_WAIT: - dma->buflist[i]->list = DRM_LIST_RECLAIM; - break; - default: - /* Buffer already on hardware. */ - break; - } - } - } -} diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 3c835c99da..243cacb357 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -48,7 +48,6 @@ #include "drm_crtc_internal.h" #include "drm_internal.h" -#include "drm_legacy.h" MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"); MODULE_DESCRIPTION("DRM shared core routines"); @@ -585,8 +584,6 @@ static void drm_fs_inode_free(struct inode *inode) static void drm_dev_init_release(struct drm_device *dev, void *res) { - drm_legacy_ctxbitmap_cleanup(dev); - drm_legacy_remove_map_hash(dev); drm_fs_inode_free(dev->anon_inode); put_device(dev->dev); @@ -597,7 +594,6 @@ static void drm_dev_init_release(struct drm_device *dev, void *res) mutex_destroy(&dev->clientlist_mutex); mutex_destroy(&dev->filelist_mutex); mutex_destroy(&dev->struct_mutex); - drm_legacy_destroy_members(dev); } static int drm_dev_init(struct drm_device *dev, @@ -632,7 +628,6 @@ static int drm_dev_init(struct drm_device *dev, return -EINVAL; } - drm_legacy_init_members(dev); INIT_LIST_HEAD(&dev->filelist); INIT_LIST_HEAD(&dev->filelist_internal); INIT_LIST_HEAD(&dev->clientlist); @@ -673,12 +668,6 @@ static int drm_dev_init(struct drm_device *dev, goto err; } - ret = drm_legacy_create_map_hash(dev); - if (ret) - goto err; - - drm_legacy_ctxbitmap_init(dev); - if (drm_core_check_feature(dev, DRIVER_GEM)) { ret = drm_gem_init(dev); if (ret) { @@ -996,9 +985,6 @@ EXPORT_SYMBOL(drm_dev_register); */ void drm_dev_unregister(struct drm_device *dev) { - if (drm_core_check_feature(dev, DRIVER_LEGACY)) - drm_lastclose(dev); - dev->registered = false; drm_client_dev_unregister(dev); @@ -1009,9 +995,6 @@ void drm_dev_unregister(struct drm_device *dev) if (dev->driver->unload) dev->driver->unload(dev); - drm_legacy_pci_agp_destroy(dev); - drm_legacy_rmmaps(dev); - remove_compat_control_link(dev); drm_minor_unregister(dev, DRM_MINOR_ACCEL); drm_minor_unregister(dev, DRM_MINOR_PRIMARY); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 3b40650998..69c6880402 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -41,10 +41,12 @@ #include #include #include +#include #include #include #include "drm_crtc_internal.h" +#include "drm_internal.h" static int oui(u8 first, u8 second, u8 third) { @@ -5510,6 +5512,27 @@ static void clear_eld(struct drm_connector *connector) connector->audio_latency[1] = 0; } +/* + * Get 3-byte SAD buffer from struct cea_sad. + */ +void drm_edid_cta_sad_get(const struct cea_sad *cta_sad, u8 *sad) +{ + sad[0] = cta_sad->format << 3 | cta_sad->channels; + sad[1] = cta_sad->freq; + sad[2] = cta_sad->byte2; +} + +/* + * Set struct cea_sad from 3-byte SAD buffer. + */ +void drm_edid_cta_sad_set(struct cea_sad *cta_sad, const u8 *sad) +{ + cta_sad->format = (sad[0] & 0x78) >> 3; + cta_sad->channels = sad[0] & 0x07; + cta_sad->freq = sad[1] & 0x7f; + cta_sad->byte2 = sad[2]; +} + /* * drm_edid_to_eld - build ELD from EDID * @connector: connector corresponding to the HDMI/DP sink @@ -5594,7 +5617,7 @@ static void drm_edid_to_eld(struct drm_connector *connector, } static int _drm_edid_to_sad(const struct drm_edid *drm_edid, - struct cea_sad **sads) + struct cea_sad **psads) { const struct cea_db *db; struct cea_db_iter iter; @@ -5603,20 +5626,16 @@ static int _drm_edid_to_sad(const struct drm_edid *drm_edid, cea_db_iter_edid_begin(drm_edid, &iter); cea_db_iter_for_each(db, &iter) { if (cea_db_tag(db) == CTA_DB_AUDIO) { - int j; + struct cea_sad *sads; + int i; count = cea_db_payload_len(db) / 3; /* SAD is 3B */ - *sads = kcalloc(count, sizeof(**sads), GFP_KERNEL); - if (!*sads) + sads = kcalloc(count, sizeof(*sads), GFP_KERNEL); + *psads = sads; + if (!sads) return -ENOMEM; - for (j = 0; j < count; j++) { - const u8 *sad = &db->data[j * 3]; - - (*sads)[j].format = (sad[0] & 0x78) >> 3; - (*sads)[j].channels = sad[0] & 0x7; - (*sads)[j].freq = sad[1] & 0x7F; - (*sads)[j].byte2 = sad[2]; - } + for (i = 0; i < count; i++) + drm_edid_cta_sad_set(&sads[i], &db->data[i * 3]); break; } } diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c index 5d9ef267eb..60fcb80bce 100644 --- a/drivers/gpu/drm/drm_edid_load.c +++ b/drivers/gpu/drm/drm_edid_load.c @@ -23,22 +23,6 @@ module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644); MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob " "from built-in data or /lib/firmware instead. "); -/* Use only for backward compatibility with drm_kms_helper.edid_firmware */ -int __drm_set_edid_firmware_path(const char *path) -{ - scnprintf(edid_firmware, sizeof(edid_firmware), "%s", path); - - return 0; -} -EXPORT_SYMBOL(__drm_set_edid_firmware_path); - -/* Use only for backward compatibility with drm_kms_helper.edid_firmware */ -int __drm_get_edid_firmware_path(char *buf, size_t bufsize) -{ - return scnprintf(buf, bufsize, "%s", edid_firmware); -} -EXPORT_SYMBOL(__drm_get_edid_firmware_path); - #define GENERIC_EDIDS 6 static const char * const generic_edid_name[GENERIC_EDIDS] = { "edid/800x600.bin", diff --git a/drivers/gpu/drm/drm_eld.c b/drivers/gpu/drm/drm_eld.c new file mode 100644 index 0000000000..5177991aa2 --- /dev/null +++ b/drivers/gpu/drm/drm_eld.c @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include +#include + +#include "drm_internal.h" + +/** + * drm_eld_sad_get - get SAD from ELD to struct cea_sad + * @eld: ELD buffer + * @sad_index: SAD index + * @cta_sad: destination struct cea_sad + * + * @return: 0 on success, or negative on errors + */ +int drm_eld_sad_get(const u8 *eld, int sad_index, struct cea_sad *cta_sad) +{ + const u8 *sad; + + if (sad_index >= drm_eld_sad_count(eld)) + return -EINVAL; + + sad = eld + DRM_ELD_CEA_SAD(drm_eld_mnl(eld), sad_index); + + drm_edid_cta_sad_set(cta_sad, sad); + + return 0; +} +EXPORT_SYMBOL(drm_eld_sad_get); + +/** + * drm_eld_sad_set - set SAD to ELD from struct cea_sad + * @eld: ELD buffer + * @sad_index: SAD index + * @cta_sad: source struct cea_sad + * + * @return: 0 on success, or negative on errors + */ +int drm_eld_sad_set(u8 *eld, int sad_index, const struct cea_sad *cta_sad) +{ + u8 *sad; + + if (sad_index >= drm_eld_sad_count(eld)) + return -EINVAL; + + sad = eld + DRM_ELD_CEA_SAD(drm_eld_mnl(eld), sad_index); + + drm_edid_cta_sad_get(cta_sad, sad); + + return 0; +} +EXPORT_SYMBOL(drm_eld_sad_set); diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c index 1143bc7f32..8f2bc6a284 100644 --- a/drivers/gpu/drm/drm_encoder.c +++ b/drivers/gpu/drm/drm_encoder.c @@ -30,6 +30,7 @@ #include #include "drm_crtc_internal.h" +#include "drm_internal.h" /** * DOC: overview @@ -74,6 +75,8 @@ int drm_encoder_register_all(struct drm_device *dev) int ret = 0; drm_for_each_encoder(encoder, dev) { + drm_debugfs_encoder_add(encoder); + if (encoder->funcs && encoder->funcs->late_register) ret = encoder->funcs->late_register(encoder); if (ret) @@ -90,6 +93,7 @@ void drm_encoder_unregister_all(struct drm_device *dev) drm_for_each_encoder(encoder, dev) { if (encoder->funcs && encoder->funcs->early_unregister) encoder->funcs->early_unregister(encoder); + drm_debugfs_encoder_remove(encoder); } } diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c index 5d2809de45..48ee851b61 100644 --- a/drivers/gpu/drm/drm_exec.c +++ b/drivers/gpu/drm/drm_exec.c @@ -69,16 +69,23 @@ static void drm_exec_unlock_all(struct drm_exec *exec) * drm_exec_init - initialize a drm_exec object * @exec: the drm_exec object to initialize * @flags: controls locking behavior, see DRM_EXEC_* defines + * @nr: the initial # of objects * * Initialize the object and make sure that we can track locked objects. + * + * If nr is non-zero then it is used as the initial objects table size. + * In either case, the table will grow (be re-allocated) on demand. */ -void drm_exec_init(struct drm_exec *exec, uint32_t flags) +void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr) { + if (!nr) + nr = PAGE_SIZE / sizeof(void *); + exec->flags = flags; - exec->objects = kmalloc(PAGE_SIZE, GFP_KERNEL); + exec->objects = kvmalloc_array(nr, sizeof(void *), GFP_KERNEL); /* If allocation here fails, just delay that till the first use */ - exec->max_objects = exec->objects ? PAGE_SIZE / sizeof(void *) : 0; + exec->max_objects = exec->objects ? nr : 0; exec->num_objects = 0; exec->contended = DRM_EXEC_DUMMY; exec->prelocked = NULL; diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 54a7103c1c..8c87287c3e 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -47,21 +47,12 @@ #include "drm_crtc_internal.h" #include "drm_internal.h" -#include "drm_legacy.h" /* from BKL pushdown */ DEFINE_MUTEX(drm_global_mutex); bool drm_dev_needs_global_mutex(struct drm_device *dev) { - /* - * Legacy drivers rely on all kinds of BKL locking semantics, don't - * bother. They also still need BKL locking for their ioctls, so better - * safe than sorry. - */ - if (drm_core_check_feature(dev, DRIVER_LEGACY)) - return true; - /* * The deprecated ->load callback must be called after the driver is * already registered. This means such drivers rely on the BKL to make @@ -107,9 +98,7 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev) * drm_send_event() as the main starting points. * * The memory mapping implementation will vary depending on how the driver - * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap() - * function, modern drivers should use one of the provided memory-manager - * specific implementations. For GEM-based drivers this is drm_gem_mmap(). + * manages memory. For GEM-based drivers this is drm_gem_mmap(). * * No other file operations are supported by the DRM userspace API. Overall the * following is an example &file_operations structure:: @@ -254,18 +243,6 @@ void drm_file_free(struct drm_file *file) (long)old_encode_dev(file->minor->kdev->devt), atomic_read(&dev->open_count)); -#ifdef CONFIG_DRM_LEGACY - if (drm_core_check_feature(dev, DRIVER_LEGACY) && - dev->driver->preclose) - dev->driver->preclose(dev, file); -#endif - - if (drm_core_check_feature(dev, DRIVER_LEGACY)) - drm_legacy_lock_release(dev, file->filp); - - if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) - drm_legacy_reclaim_buffers(dev, file); - drm_events_release(file); if (drm_core_check_feature(dev, DRIVER_MODESET)) { @@ -279,8 +256,6 @@ void drm_file_free(struct drm_file *file) if (drm_core_check_feature(dev, DRIVER_GEM)) drm_gem_release(dev, file); - drm_legacy_ctxbitmap_flush(dev, file); - if (drm_is_primary_client(file)) drm_master_release(file); @@ -367,29 +342,6 @@ int drm_open_helper(struct file *filp, struct drm_minor *minor) list_add(&priv->lhead, &dev->filelist); mutex_unlock(&dev->filelist_mutex); -#ifdef CONFIG_DRM_LEGACY -#ifdef __alpha__ - /* - * Default the hose - */ - if (!dev->hose) { - struct pci_dev *pci_dev; - - pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL); - if (pci_dev) { - dev->hose = pci_dev->sysdata; - pci_dev_put(pci_dev); - } - if (!dev->hose) { - struct pci_bus *b = list_entry(pci_root_buses.next, - struct pci_bus, node); - if (b) - dev->hose = b->sysdata; - } - } -#endif -#endif - return 0; } @@ -411,7 +363,6 @@ int drm_open(struct inode *inode, struct file *filp) struct drm_device *dev; struct drm_minor *minor; int retcode; - int need_setup = 0; minor = drm_minor_acquire(iminor(inode)); if (IS_ERR(minor)) @@ -421,8 +372,7 @@ int drm_open(struct inode *inode, struct file *filp) if (drm_dev_needs_global_mutex(dev)) mutex_lock(&drm_global_mutex); - if (!atomic_fetch_inc(&dev->open_count)) - need_setup = 1; + atomic_fetch_inc(&dev->open_count); /* share address_space across all char-devs of a single device */ filp->f_mapping = dev->anon_inode->i_mapping; @@ -430,13 +380,6 @@ int drm_open(struct inode *inode, struct file *filp) retcode = drm_open_helper(filp, minor); if (retcode) goto err_undo; - if (need_setup) { - retcode = drm_legacy_setup(dev); - if (retcode) { - drm_close_helper(filp); - goto err_undo; - } - } if (drm_dev_needs_global_mutex(dev)) mutex_unlock(&drm_global_mutex); @@ -460,9 +403,6 @@ void drm_lastclose(struct drm_device * dev) dev->driver->lastclose(dev); drm_dbg_core(dev, "driver lastclose completed\n"); - if (drm_core_check_feature(dev, DRIVER_LEGACY)) - drm_legacy_dev_reinit(dev); - drm_client_dev_restore(dev); } @@ -913,7 +853,7 @@ static void print_size(struct drm_printer *p, const char *stat, unsigned u; for (u = 0; u < ARRAY_SIZE(units) - 1; u++) { - if (sz < SZ_1K) + if (sz == 0 || !IS_ALIGNED(sz, SZ_1K)) break; sz = div_u64(sz, SZ_1K); } diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c index 060b753881..8c6090a90d 100644 --- a/drivers/gpu/drm/drm_flip_work.c +++ b/drivers/gpu/drm/drm_flip_work.c @@ -27,14 +27,12 @@ #include #include -/** - * drm_flip_work_allocate_task - allocate a flip-work task - * @data: data associated to the task - * @flags: allocator flags - * - * Allocate a drm_flip_task object and attach private data to it. - */ -struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags) +struct drm_flip_task { + struct list_head node; + void *data; +}; + +static struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags) { struct drm_flip_task *task; @@ -44,18 +42,8 @@ struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags) return task; } -EXPORT_SYMBOL(drm_flip_work_allocate_task); -/** - * drm_flip_work_queue_task - queue a specific task - * @work: the flip-work - * @task: the task to handle - * - * Queues task, that will later be run (passed back to drm_flip_func_t - * func) on a work queue after drm_flip_work_commit() is called. - */ -void drm_flip_work_queue_task(struct drm_flip_work *work, - struct drm_flip_task *task) +static void drm_flip_work_queue_task(struct drm_flip_work *work, struct drm_flip_task *task) { unsigned long flags; @@ -63,7 +51,6 @@ void drm_flip_work_queue_task(struct drm_flip_work *work, list_add_tail(&task->node, &work->queued); spin_unlock_irqrestore(&work->lock, flags); } -EXPORT_SYMBOL(drm_flip_work_queue_task); /** * drm_flip_work_queue - queue work diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c index f93a4efcee..b1be458ed4 100644 --- a/drivers/gpu/drm/drm_format_helper.c +++ b/drivers/gpu/drm/drm_format_helper.c @@ -20,6 +20,97 @@ #include #include +/** + * drm_format_conv_state_init - Initialize format-conversion state + * @state: The state to initialize + * + * Clears all fields in struct drm_format_conv_state. The state will + * be empty with no preallocated resources. + */ +void drm_format_conv_state_init(struct drm_format_conv_state *state) +{ + state->tmp.mem = NULL; + state->tmp.size = 0; + state->tmp.preallocated = false; +} +EXPORT_SYMBOL(drm_format_conv_state_init); + +/** + * drm_format_conv_state_copy - Copy format-conversion state + * @state: Destination state + * @old_state: Source state + * + * Copies format-conversion state from @old_state to @state; except for + * temporary storage. + */ +void drm_format_conv_state_copy(struct drm_format_conv_state *state, + const struct drm_format_conv_state *old_state) +{ + /* + * So far, there's only temporary storage here, which we don't + * duplicate. Just clear the fields. + */ + state->tmp.mem = NULL; + state->tmp.size = 0; + state->tmp.preallocated = false; +} +EXPORT_SYMBOL(drm_format_conv_state_copy); + +/** + * drm_format_conv_state_reserve - Allocates storage for format conversion + * @state: The format-conversion state + * @new_size: The minimum allocation size + * @flags: Flags for kmalloc() + * + * Allocates at least @new_size bytes and returns a pointer to the memory + * range. After calling this function, previously returned memory blocks + * are invalid. It's best to collect all memory requirements of a format + * conversion and call this function once to allocate the range. + * + * Returns: + * A pointer to the allocated memory range, or NULL otherwise. + */ +void *drm_format_conv_state_reserve(struct drm_format_conv_state *state, + size_t new_size, gfp_t flags) +{ + void *mem; + + if (new_size <= state->tmp.size) + goto out; + else if (state->tmp.preallocated) + return NULL; + + mem = krealloc(state->tmp.mem, new_size, flags); + if (!mem) + return NULL; + + state->tmp.mem = mem; + state->tmp.size = new_size; + +out: + return state->tmp.mem; +} +EXPORT_SYMBOL(drm_format_conv_state_reserve); + +/** + * drm_format_conv_state_release - Releases an format-conversion storage + * @state: The format-conversion state + * + * Releases the memory range references by the format-conversion state. + * After this call, all pointers to the memory are invalid. Prefer + * drm_format_conv_state_init() for cleaning up and unloading a driver. + */ +void drm_format_conv_state_release(struct drm_format_conv_state *state) +{ + if (state->tmp.preallocated) + return; + + kfree(state->tmp.mem); + state->tmp.mem = NULL; + state->tmp.size = 0; +} +EXPORT_SYMBOL(drm_format_conv_state_release); + static unsigned int clip_offset(const struct drm_rect *clip, unsigned int pitch, unsigned int cpp) { return clip->y1 * pitch + clip->x1 * cpp; @@ -45,6 +136,7 @@ EXPORT_SYMBOL(drm_fb_clip_offset); static int __drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_pixsize, const void *vaddr, const struct drm_framebuffer *fb, const struct drm_rect *clip, bool vaddr_cached_hint, + struct drm_format_conv_state *state, void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels)) { unsigned long linepixels = drm_rect_width(clip); @@ -60,7 +152,7 @@ static int __drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_p * one line at a time. */ if (!vaddr_cached_hint) { - stmp = kmalloc(sbuf_len, GFP_KERNEL); + stmp = drm_format_conv_state_reserve(state, sbuf_len, GFP_KERNEL); if (!stmp) return -ENOMEM; } @@ -79,8 +171,6 @@ static int __drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_p dst += dst_pitch; } - kfree(stmp); - return 0; } @@ -88,6 +178,7 @@ static int __drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_p static int __drm_fb_xfrm_toio(void __iomem *dst, unsigned long dst_pitch, unsigned long dst_pixsize, const void *vaddr, const struct drm_framebuffer *fb, const struct drm_rect *clip, bool vaddr_cached_hint, + struct drm_format_conv_state *state, void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels)) { unsigned long linepixels = drm_rect_width(clip); @@ -101,9 +192,9 @@ static int __drm_fb_xfrm_toio(void __iomem *dst, unsigned long dst_pitch, unsign void *dbuf; if (vaddr_cached_hint) { - dbuf = kmalloc(dbuf_len, GFP_KERNEL); + dbuf = drm_format_conv_state_reserve(state, dbuf_len, GFP_KERNEL); } else { - dbuf = kmalloc(stmp_off + sbuf_len, GFP_KERNEL); + dbuf = drm_format_conv_state_reserve(state, stmp_off + sbuf_len, GFP_KERNEL); stmp = dbuf + stmp_off; } if (!dbuf) @@ -124,8 +215,6 @@ static int __drm_fb_xfrm_toio(void __iomem *dst, unsigned long dst_pitch, unsign dst += dst_pitch; } - kfree(dbuf); - return 0; } @@ -134,6 +223,7 @@ static int drm_fb_xfrm(struct iosys_map *dst, const unsigned int *dst_pitch, const u8 *dst_pixsize, const struct iosys_map *src, const struct drm_framebuffer *fb, const struct drm_rect *clip, bool vaddr_cached_hint, + struct drm_format_conv_state *state, void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels)) { static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = { @@ -146,10 +236,12 @@ static int drm_fb_xfrm(struct iosys_map *dst, /* TODO: handle src in I/O memory here */ if (dst[0].is_iomem) return __drm_fb_xfrm_toio(dst[0].vaddr_iomem, dst_pitch[0], dst_pixsize[0], - src[0].vaddr, fb, clip, vaddr_cached_hint, xfrm_line); + src[0].vaddr, fb, clip, vaddr_cached_hint, state, + xfrm_line); else return __drm_fb_xfrm(dst[0].vaddr, dst_pitch[0], dst_pixsize[0], - src[0].vaddr, fb, clip, vaddr_cached_hint, xfrm_line); + src[0].vaddr, fb, clip, vaddr_cached_hint, state, + xfrm_line); } /** @@ -235,6 +327,7 @@ static void drm_fb_swab32_line(void *dbuf, const void *sbuf, unsigned int pixels * @fb: DRM framebuffer * @clip: Clip rectangle area to copy * @cached: Source buffer is mapped cached (eg. not write-combined) + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory and swaps per-pixel * bytes during the process. Destination and framebuffer formats must match. The @@ -249,7 +342,8 @@ static void drm_fb_swab32_line(void *dbuf, const void *sbuf, unsigned int pixels */ void drm_fb_swab(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip, bool cached) + const struct drm_rect *clip, bool cached, + struct drm_format_conv_state *state) { const struct drm_format_info *format = fb->format; u8 cpp = DIV_ROUND_UP(drm_format_info_bpp(format, 0), 8); @@ -268,7 +362,7 @@ void drm_fb_swab(struct iosys_map *dst, const unsigned int *dst_pitch, return; } - drm_fb_xfrm(dst, dst_pitch, &cpp, src, fb, clip, cached, swab_line); + drm_fb_xfrm(dst, dst_pitch, &cpp, src, fb, clip, cached, state, swab_line); } EXPORT_SYMBOL(drm_fb_swab); @@ -295,6 +389,7 @@ static void drm_fb_xrgb8888_to_rgb332_line(void *dbuf, const void *sbuf, unsigne * @src: Array of XRGB8888 source buffers * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory and converts the * color format during the process. Destination and framebuffer formats must match. The @@ -309,13 +404,13 @@ static void drm_fb_xrgb8888_to_rgb332_line(void *dbuf, const void *sbuf, unsigne */ void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 1, }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, drm_fb_xrgb8888_to_rgb332_line); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332); @@ -364,6 +459,7 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf, * @src: Array of XRGB8888 source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * @swab: Swap bytes * * This function copies parts of a framebuffer to display memory and converts the @@ -379,7 +475,8 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf, */ void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip, bool swab) + const struct drm_rect *clip, struct drm_format_conv_state *state, + bool swab) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 2, @@ -392,7 +489,7 @@ void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pi else xfrm_line = drm_fb_xrgb8888_to_rgb565_line; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, xfrm_line); + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, xfrm_line); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565); @@ -421,6 +518,7 @@ static void drm_fb_xrgb8888_to_xrgb1555_line(void *dbuf, const void *sbuf, unsig * @src: Array of XRGB8888 source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory and converts * the color format during the process. The parameters @dst, @dst_pitch and @@ -436,13 +534,13 @@ static void drm_fb_xrgb8888_to_xrgb1555_line(void *dbuf, const void *sbuf, unsig */ void drm_fb_xrgb8888_to_xrgb1555(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 2, }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, drm_fb_xrgb8888_to_xrgb1555_line); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb1555); @@ -473,6 +571,7 @@ static void drm_fb_xrgb8888_to_argb1555_line(void *dbuf, const void *sbuf, unsig * @src: Array of XRGB8888 source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory and converts * the color format during the process. The parameters @dst, @dst_pitch and @@ -488,13 +587,13 @@ static void drm_fb_xrgb8888_to_argb1555_line(void *dbuf, const void *sbuf, unsig */ void drm_fb_xrgb8888_to_argb1555(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 2, }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, drm_fb_xrgb8888_to_argb1555_line); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb1555); @@ -525,6 +624,7 @@ static void drm_fb_xrgb8888_to_rgba5551_line(void *dbuf, const void *sbuf, unsig * @src: Array of XRGB8888 source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory and converts * the color format during the process. The parameters @dst, @dst_pitch and @@ -540,13 +640,13 @@ static void drm_fb_xrgb8888_to_rgba5551_line(void *dbuf, const void *sbuf, unsig */ void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 2, }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, drm_fb_xrgb8888_to_rgba5551_line); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgba5551); @@ -575,6 +675,7 @@ static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigne * @src: Array of XRGB8888 source buffers * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory and converts the * color format during the process. Destination and framebuffer formats must match. The @@ -590,13 +691,13 @@ static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigne */ void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 3, }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, drm_fb_xrgb8888_to_rgb888_line); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888); @@ -623,6 +724,7 @@ static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsig * @src: Array of XRGB8888 source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory and converts the * color format during the process. The parameters @dst, @dst_pitch and @src refer @@ -638,13 +740,13 @@ static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsig */ void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 4, }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, drm_fb_xrgb8888_to_argb8888_line); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb8888); @@ -669,13 +771,14 @@ static void drm_fb_xrgb8888_to_abgr8888_line(void *dbuf, const void *sbuf, unsig static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, + struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 4, }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, drm_fb_xrgb8888_to_abgr8888_line); } @@ -699,13 +802,14 @@ static void drm_fb_xrgb8888_to_xbgr8888_line(void *dbuf, const void *sbuf, unsig static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, + struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 4, }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, drm_fb_xrgb8888_to_xbgr8888_line); } @@ -735,6 +839,7 @@ static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, un * @src: Array of XRGB8888 source buffers * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory and converts the * color format during the process. Destination and framebuffer formats must match. The @@ -750,13 +855,14 @@ static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, un */ void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, + struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 4, }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, drm_fb_xrgb8888_to_xrgb2101010_line); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010); @@ -788,6 +894,7 @@ static void drm_fb_xrgb8888_to_argb2101010_line(void *dbuf, const void *sbuf, un * @src: Array of XRGB8888 source buffers * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory and converts * the color format during the process. The parameters @dst, @dst_pitch and @@ -803,13 +910,14 @@ static void drm_fb_xrgb8888_to_argb2101010_line(void *dbuf, const void *sbuf, un */ void drm_fb_xrgb8888_to_argb2101010(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, + struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 4, }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, drm_fb_xrgb8888_to_argb2101010_line); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb2101010); @@ -839,6 +947,7 @@ static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned * @src: Array of XRGB8888 source buffers * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory and converts the * color format during the process. Destination and framebuffer formats must match. The @@ -858,13 +967,13 @@ static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned */ void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 1, }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, drm_fb_xrgb8888_to_gray8_line); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8); @@ -878,6 +987,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8); * @src: The framebuffer memory to copy from * @fb: The framebuffer to copy from * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory. If the * formats of the display and the framebuffer mismatch, the blit function @@ -896,7 +1006,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8); */ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, struct drm_format_conv_state *state) { uint32_t fb_format = fb->format->format; @@ -904,44 +1014,44 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d drm_fb_memcpy(dst, dst_pitch, src, fb, clip); return 0; } else if (fb_format == (dst_format | DRM_FORMAT_BIG_ENDIAN)) { - drm_fb_swab(dst, dst_pitch, src, fb, clip, false); + drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state); return 0; } else if (fb_format == (dst_format & ~DRM_FORMAT_BIG_ENDIAN)) { - drm_fb_swab(dst, dst_pitch, src, fb, clip, false); + drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state); return 0; } else if (fb_format == DRM_FORMAT_XRGB8888) { if (dst_format == DRM_FORMAT_RGB565) { - drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, false); + drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, state, false); return 0; } else if (dst_format == DRM_FORMAT_XRGB1555) { - drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip); + drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip, state); return 0; } else if (dst_format == DRM_FORMAT_ARGB1555) { - drm_fb_xrgb8888_to_argb1555(dst, dst_pitch, src, fb, clip); + drm_fb_xrgb8888_to_argb1555(dst, dst_pitch, src, fb, clip, state); return 0; } else if (dst_format == DRM_FORMAT_RGBA5551) { - drm_fb_xrgb8888_to_rgba5551(dst, dst_pitch, src, fb, clip); + drm_fb_xrgb8888_to_rgba5551(dst, dst_pitch, src, fb, clip, state); return 0; } else if (dst_format == DRM_FORMAT_RGB888) { - drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip); + drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip, state); return 0; } else if (dst_format == DRM_FORMAT_ARGB8888) { - drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip); + drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip, state); return 0; } else if (dst_format == DRM_FORMAT_XBGR8888) { - drm_fb_xrgb8888_to_xbgr8888(dst, dst_pitch, src, fb, clip); + drm_fb_xrgb8888_to_xbgr8888(dst, dst_pitch, src, fb, clip, state); return 0; } else if (dst_format == DRM_FORMAT_ABGR8888) { - drm_fb_xrgb8888_to_abgr8888(dst, dst_pitch, src, fb, clip); + drm_fb_xrgb8888_to_abgr8888(dst, dst_pitch, src, fb, clip, state); return 0; } else if (dst_format == DRM_FORMAT_XRGB2101010) { - drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip); + drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip, state); return 0; } else if (dst_format == DRM_FORMAT_ARGB2101010) { - drm_fb_xrgb8888_to_argb2101010(dst, dst_pitch, src, fb, clip); + drm_fb_xrgb8888_to_argb2101010(dst, dst_pitch, src, fb, clip, state); return 0; } else if (dst_format == DRM_FORMAT_BGRX8888) { - drm_fb_swab(dst, dst_pitch, src, fb, clip, false); + drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state); return 0; } } @@ -978,6 +1088,7 @@ static void drm_fb_gray8_to_mono_line(void *dbuf, const void *sbuf, unsigned int * @src: Array of XRGB8888 source buffers * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory and converts the * color format during the process. Destination and framebuffer formats must match. The @@ -1002,7 +1113,7 @@ static void drm_fb_gray8_to_mono_line(void *dbuf, const void *sbuf, unsigned int */ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, struct drm_format_conv_state *state) { static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = { 0, 0, 0, 0 @@ -1042,7 +1153,7 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc * Allocate a buffer to be used for both copying from the cma * memory and to store the intermediate grayscale line pixels. */ - src32 = kmalloc(len_src32 + linepixels, GFP_KERNEL); + src32 = drm_format_conv_state_reserve(state, len_src32 + linepixels, GFP_KERNEL); if (!src32) return; @@ -1056,8 +1167,6 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc vaddr += fb->pitches[0]; mono += dst_pitch_0; } - - kfree(src32); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono); diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 36c5629af7..888aadb6a4 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -394,6 +394,31 @@ static void drm_mode_rmfb_work_fn(struct work_struct *w) } } +static int drm_mode_closefb(struct drm_framebuffer *fb, + struct drm_file *file_priv) +{ + struct drm_framebuffer *fbl; + bool found = false; + + mutex_lock(&file_priv->fbs_lock); + list_for_each_entry(fbl, &file_priv->fbs, filp_head) + if (fb == fbl) + found = true; + + if (!found) { + mutex_unlock(&file_priv->fbs_lock); + return -ENOENT; + } + + list_del_init(&fb->filp_head); + mutex_unlock(&file_priv->fbs_lock); + + /* Drop the reference that was stored in the fbs list */ + drm_framebuffer_put(fb); + + return 0; +} + /** * drm_mode_rmfb - remove an FB from the configuration * @dev: drm device @@ -410,9 +435,8 @@ static void drm_mode_rmfb_work_fn(struct work_struct *w) int drm_mode_rmfb(struct drm_device *dev, u32 fb_id, struct drm_file *file_priv) { - struct drm_framebuffer *fb = NULL; - struct drm_framebuffer *fbl = NULL; - int found = 0; + struct drm_framebuffer *fb; + int ret; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; @@ -421,24 +445,13 @@ int drm_mode_rmfb(struct drm_device *dev, u32 fb_id, if (!fb) return -ENOENT; - mutex_lock(&file_priv->fbs_lock); - list_for_each_entry(fbl, &file_priv->fbs, filp_head) - if (fb == fbl) - found = 1; - if (!found) { - mutex_unlock(&file_priv->fbs_lock); - goto fail_unref; + ret = drm_mode_closefb(fb, file_priv); + if (ret != 0) { + drm_framebuffer_put(fb); + return ret; } - list_del_init(&fb->filp_head); - mutex_unlock(&file_priv->fbs_lock); - - /* drop the reference we picked up in framebuffer lookup */ - drm_framebuffer_put(fb); - /* - * we now own the reference that was stored in the fbs list - * * drm_framebuffer_remove may fail with -EINTR on pending signals, * so run this in a separate stack as there's no way to correctly * handle this after the fb is already removed from the lookup table. @@ -448,6 +461,7 @@ int drm_mode_rmfb(struct drm_device *dev, u32 fb_id, INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn); INIT_LIST_HEAD(&arg.fbs); + drm_WARN_ON(dev, !list_empty(&fb->filp_head)); list_add_tail(&fb->filp_head, &arg.fbs); schedule_work(&arg.work); @@ -457,10 +471,6 @@ int drm_mode_rmfb(struct drm_device *dev, u32 fb_id, drm_framebuffer_put(fb); return 0; - -fail_unref: - drm_framebuffer_put(fb); - return -ENOENT; } int drm_mode_rmfb_ioctl(struct drm_device *dev, @@ -471,6 +481,28 @@ int drm_mode_rmfb_ioctl(struct drm_device *dev, return drm_mode_rmfb(dev, *fb_id, file_priv); } +int drm_mode_closefb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_closefb *r = data; + struct drm_framebuffer *fb; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EOPNOTSUPP; + + if (r->pad) + return -EINVAL; + + fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id); + if (!fb) + return -ENOENT; + + ret = drm_mode_closefb(fb, file_priv); + drm_framebuffer_put(fb); + return ret; +} + /** * drm_mode_getfb - get FB info * @dev: drm device for the ioctl @@ -796,6 +828,8 @@ void drm_framebuffer_free(struct kref *kref) container_of(kref, struct drm_framebuffer, base.refcount); struct drm_device *dev = fb->dev; + drm_WARN_ON(dev, !list_empty(&fb->filp_head)); + /* * The lookup idr holds a weak reference, which has not necessarily been * removed at this point. Check for that. @@ -1088,7 +1122,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb) dev = fb->dev; - WARN_ON(!list_empty(&fb->filp_head)); + drm_WARN_ON(dev, !list_empty(&fb->filp_head)); /* * drm ABI mandates that we remove any deleted framebuffers from active diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c index 5d4b9cd077..93337543aa 100644 --- a/drivers/gpu/drm/drm_gem_atomic_helper.c +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c @@ -218,7 +218,14 @@ void __drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane, struct drm_shadow_plane_state *new_shadow_plane_state) { + struct drm_plane_state *plane_state = plane->state; + struct drm_shadow_plane_state *shadow_plane_state = + to_drm_shadow_plane_state(plane_state); + __drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base); + + drm_format_conv_state_copy(&new_shadow_plane_state->fmtcnv_state, + &shadow_plane_state->fmtcnv_state); } EXPORT_SYMBOL(__drm_gem_duplicate_shadow_plane_state); @@ -266,6 +273,7 @@ EXPORT_SYMBOL(drm_gem_duplicate_shadow_plane_state); */ void __drm_gem_destroy_shadow_plane_state(struct drm_shadow_plane_state *shadow_plane_state) { + drm_format_conv_state_release(&shadow_plane_state->fmtcnv_state); __drm_atomic_helper_plane_destroy_state(&shadow_plane_state->base); } EXPORT_SYMBOL(__drm_gem_destroy_shadow_plane_state); @@ -302,6 +310,7 @@ void __drm_gem_reset_shadow_plane(struct drm_plane *plane, struct drm_shadow_plane_state *shadow_plane_state) { __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); + drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state); } EXPORT_SYMBOL(__drm_gem_reset_shadow_plane); diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index b80d4e1cc9..f9eb56f24b 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -61,6 +61,42 @@ * contained within struct drm_gpuva already. Hence, for inserting &drm_gpuva * entries from within dma-fence signalling critical sections it is enough to * pre-allocate the &drm_gpuva structures. + * + * &drm_gem_objects which are private to a single VM can share a common + * &dma_resv in order to improve locking efficiency (e.g. with &drm_exec). + * For this purpose drivers must pass a &drm_gem_object to drm_gpuvm_init(), in + * the following called 'resv object', which serves as the container of the + * GPUVM's shared &dma_resv. This resv object can be a driver specific + * &drm_gem_object, such as the &drm_gem_object containing the root page table, + * but it can also be a 'dummy' object, which can be allocated with + * drm_gpuvm_resv_object_alloc(). + * + * In order to connect a struct drm_gpuva its backing &drm_gem_object each + * &drm_gem_object maintains a list of &drm_gpuvm_bo structures, and each + * &drm_gpuvm_bo contains a list of &drm_gpuva structures. + * + * A &drm_gpuvm_bo is an abstraction that represents a combination of a + * &drm_gpuvm and a &drm_gem_object. Every such combination should be unique. + * This is ensured by the API through drm_gpuvm_bo_obtain() and + * drm_gpuvm_bo_obtain_prealloc() which first look into the corresponding + * &drm_gem_object list of &drm_gpuvm_bos for an existing instance of this + * particular combination. If not existent a new instance is created and linked + * to the &drm_gem_object. + * + * &drm_gpuvm_bo structures, since unique for a given &drm_gpuvm, are also used + * as entry for the &drm_gpuvm's lists of external and evicted objects. Those + * lists are maintained in order to accelerate locking of dma-resv locks and + * validation of evicted objects bound in a &drm_gpuvm. For instance, all + * &drm_gem_object's &dma_resv of a given &drm_gpuvm can be locked by calling + * drm_gpuvm_exec_lock(). Once locked drivers can call drm_gpuvm_validate() in + * order to validate all evicted &drm_gem_objects. It is also possible to lock + * additional &drm_gem_objects by providing the corresponding parameters to + * drm_gpuvm_exec_lock() as well as open code the &drm_exec loop while making + * use of helper functions such as drm_gpuvm_prepare_range() or + * drm_gpuvm_prepare_objects(). + * + * Every bound &drm_gem_object is treated as external object when its &dma_resv + * structure is different than the &drm_gpuvm's common &dma_resv structure. */ /** @@ -386,21 +422,42 @@ /** * DOC: Locking * - * Generally, the GPU VA manager does not take care of locking itself, it is - * the drivers responsibility to take care about locking. Drivers might want to - * protect the following operations: inserting, removing and iterating - * &drm_gpuva objects as well as generating all kinds of operations, such as - * split / merge or prefetch. - * - * The GPU VA manager also does not take care of the locking of the backing - * &drm_gem_object buffers GPU VA lists by itself; drivers are responsible to - * enforce mutual exclusion using either the GEMs dma_resv lock or alternatively - * a driver specific external lock. For the latter see also - * drm_gem_gpuva_set_lock(). - * - * However, the GPU VA manager contains lockdep checks to ensure callers of its - * API hold the corresponding lock whenever the &drm_gem_objects GPU VA list is - * accessed by functions such as drm_gpuva_link() or drm_gpuva_unlink(). + * In terms of managing &drm_gpuva entries DRM GPUVM does not take care of + * locking itself, it is the drivers responsibility to take care about locking. + * Drivers might want to protect the following operations: inserting, removing + * and iterating &drm_gpuva objects as well as generating all kinds of + * operations, such as split / merge or prefetch. + * + * DRM GPUVM also does not take care of the locking of the backing + * &drm_gem_object buffers GPU VA lists and &drm_gpuvm_bo abstractions by + * itself; drivers are responsible to enforce mutual exclusion using either the + * GEMs dma_resv lock or alternatively a driver specific external lock. For the + * latter see also drm_gem_gpuva_set_lock(). + * + * However, DRM GPUVM contains lockdep checks to ensure callers of its API hold + * the corresponding lock whenever the &drm_gem_objects GPU VA list is accessed + * by functions such as drm_gpuva_link() or drm_gpuva_unlink(), but also + * drm_gpuvm_bo_obtain() and drm_gpuvm_bo_put(). + * + * The latter is required since on creation and destruction of a &drm_gpuvm_bo + * the &drm_gpuvm_bo is attached / removed from the &drm_gem_objects gpuva list. + * Subsequent calls to drm_gpuvm_bo_obtain() for the same &drm_gpuvm and + * &drm_gem_object must be able to observe previous creations and destructions + * of &drm_gpuvm_bos in order to keep instances unique. + * + * The &drm_gpuvm's lists for keeping track of external and evicted objects are + * protected against concurrent insertion / removal and iteration internally. + * + * However, drivers still need ensure to protect concurrent calls to functions + * iterating those lists, namely drm_gpuvm_prepare_objects() and + * drm_gpuvm_validate(). + * + * Alternatively, drivers can set the &DRM_GPUVM_RESV_PROTECTED flag to indicate + * that the corresponding &dma_resv locks are held in order to protect the + * lists. If &DRM_GPUVM_RESV_PROTECTED is set, internal locking is disabled and + * the corresponding lockdep checks are enabled. This is an optimization for + * drivers which are capable of taking the corresponding &dma_resv locks and + * hence do not require internal locking. */ /** @@ -430,6 +487,7 @@ * { * struct drm_gpuva_ops *ops; * struct drm_gpuva_op *op + * struct drm_gpuvm_bo *vm_bo; * * driver_lock_va_space(); * ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range, @@ -437,6 +495,10 @@ * if (IS_ERR(ops)) * return PTR_ERR(ops); * + * vm_bo = drm_gpuvm_bo_obtain(gpuvm, obj); + * if (IS_ERR(vm_bo)) + * return PTR_ERR(vm_bo); + * * drm_gpuva_for_each_op(op, ops) { * struct drm_gpuva *va; * @@ -449,7 +511,7 @@ * * driver_vm_map(); * drm_gpuva_map(gpuvm, va, &op->map); - * drm_gpuva_link(va); + * drm_gpuva_link(va, vm_bo); * * break; * case DRM_GPUVA_OP_REMAP: { @@ -476,11 +538,11 @@ * driver_vm_remap(); * drm_gpuva_remap(prev, next, &op->remap); * - * drm_gpuva_unlink(va); * if (prev) - * drm_gpuva_link(prev); + * drm_gpuva_link(prev, va->vm_bo); * if (next) - * drm_gpuva_link(next); + * drm_gpuva_link(next, va->vm_bo); + * drm_gpuva_unlink(va); * * break; * } @@ -496,6 +558,7 @@ * break; * } * } + * drm_gpuvm_bo_put(vm_bo); * driver_unlock_va_space(); * * return 0; @@ -505,6 +568,7 @@ * * struct driver_context { * struct drm_gpuvm *gpuvm; + * struct drm_gpuvm_bo *vm_bo; * struct drm_gpuva *new_va; * struct drm_gpuva *prev_va; * struct drm_gpuva *next_va; @@ -525,6 +589,7 @@ * struct drm_gem_object *obj, u64 offset) * { * struct driver_context ctx; + * struct drm_gpuvm_bo *vm_bo; * struct drm_gpuva_ops *ops; * struct drm_gpuva_op *op; * int ret = 0; @@ -534,16 +599,23 @@ * ctx.new_va = kzalloc(sizeof(*ctx.new_va), GFP_KERNEL); * ctx.prev_va = kzalloc(sizeof(*ctx.prev_va), GFP_KERNEL); * ctx.next_va = kzalloc(sizeof(*ctx.next_va), GFP_KERNEL); - * if (!ctx.new_va || !ctx.prev_va || !ctx.next_va) { + * ctx.vm_bo = drm_gpuvm_bo_create(gpuvm, obj); + * if (!ctx.new_va || !ctx.prev_va || !ctx.next_va || !vm_bo) { * ret = -ENOMEM; * goto out; * } * + * // Typically protected with a driver specific GEM gpuva lock + * // used in the fence signaling path for drm_gpuva_link() and + * // drm_gpuva_unlink(), hence pre-allocate. + * ctx.vm_bo = drm_gpuvm_bo_obtain_prealloc(ctx.vm_bo); + * * driver_lock_va_space(); * ret = drm_gpuvm_sm_map(gpuvm, &ctx, addr, range, obj, offset); * driver_unlock_va_space(); * * out: + * drm_gpuvm_bo_put(ctx.vm_bo); * kfree(ctx.new_va); * kfree(ctx.prev_va); * kfree(ctx.next_va); @@ -556,7 +628,7 @@ * * drm_gpuva_map(ctx->vm, ctx->new_va, &op->map); * - * drm_gpuva_link(ctx->new_va); + * drm_gpuva_link(ctx->new_va, ctx->vm_bo); * * // prevent the new GPUVA from being freed in * // driver_mapping_create() @@ -568,22 +640,23 @@ * int driver_gpuva_remap(struct drm_gpuva_op *op, void *__ctx) * { * struct driver_context *ctx = __ctx; + * struct drm_gpuva *va = op->remap.unmap->va; * * drm_gpuva_remap(ctx->prev_va, ctx->next_va, &op->remap); * - * drm_gpuva_unlink(op->remap.unmap->va); - * kfree(op->remap.unmap->va); - * * if (op->remap.prev) { - * drm_gpuva_link(ctx->prev_va); + * drm_gpuva_link(ctx->prev_va, va->vm_bo); * ctx->prev_va = NULL; * } * * if (op->remap.next) { - * drm_gpuva_link(ctx->next_va); + * drm_gpuva_link(ctx->next_va, va->vm_bo); * ctx->next_va = NULL; * } * + * drm_gpuva_unlink(va); + * kfree(va); + * * return 0; * } * @@ -597,6 +670,201 @@ * } */ +/** + * get_next_vm_bo_from_list() - get the next vm_bo element + * @__gpuvm: the &drm_gpuvm + * @__list_name: the name of the list we're iterating on + * @__local_list: a pointer to the local list used to store already iterated items + * @__prev_vm_bo: the previous element we got from get_next_vm_bo_from_list() + * + * This helper is here to provide lockless list iteration. Lockless as in, the + * iterator releases the lock immediately after picking the first element from + * the list, so list insertion deletion can happen concurrently. + * + * Elements popped from the original list are kept in a local list, so removal + * and is_empty checks can still happen while we're iterating the list. + */ +#define get_next_vm_bo_from_list(__gpuvm, __list_name, __local_list, __prev_vm_bo) \ + ({ \ + struct drm_gpuvm_bo *__vm_bo = NULL; \ + \ + drm_gpuvm_bo_put(__prev_vm_bo); \ + \ + spin_lock(&(__gpuvm)->__list_name.lock); \ + if (!(__gpuvm)->__list_name.local_list) \ + (__gpuvm)->__list_name.local_list = __local_list; \ + else \ + drm_WARN_ON((__gpuvm)->drm, \ + (__gpuvm)->__list_name.local_list != __local_list); \ + \ + while (!list_empty(&(__gpuvm)->__list_name.list)) { \ + __vm_bo = list_first_entry(&(__gpuvm)->__list_name.list, \ + struct drm_gpuvm_bo, \ + list.entry.__list_name); \ + if (kref_get_unless_zero(&__vm_bo->kref)) { \ + list_move_tail(&(__vm_bo)->list.entry.__list_name, \ + __local_list); \ + break; \ + } else { \ + list_del_init(&(__vm_bo)->list.entry.__list_name); \ + __vm_bo = NULL; \ + } \ + } \ + spin_unlock(&(__gpuvm)->__list_name.lock); \ + \ + __vm_bo; \ + }) + +/** + * for_each_vm_bo_in_list() - internal vm_bo list iterator + * @__gpuvm: the &drm_gpuvm + * @__list_name: the name of the list we're iterating on + * @__local_list: a pointer to the local list used to store already iterated items + * @__vm_bo: the struct drm_gpuvm_bo to assign in each iteration step + * + * This helper is here to provide lockless list iteration. Lockless as in, the + * iterator releases the lock immediately after picking the first element from the + * list, hence list insertion and deletion can happen concurrently. + * + * It is not allowed to re-assign the vm_bo pointer from inside this loop. + * + * Typical use: + * + * struct drm_gpuvm_bo *vm_bo; + * LIST_HEAD(my_local_list); + * + * ret = 0; + * for_each_vm_bo_in_list(gpuvm, , &my_local_list, vm_bo) { + * ret = do_something_with_vm_bo(..., vm_bo); + * if (ret) + * break; + * } + * // Drop ref in case we break out of the loop. + * drm_gpuvm_bo_put(vm_bo); + * restore_vm_bo_list(gpuvm, , &my_local_list); + * + * + * Only used for internal list iterations, not meant to be exposed to the outside + * world. + */ +#define for_each_vm_bo_in_list(__gpuvm, __list_name, __local_list, __vm_bo) \ + for (__vm_bo = get_next_vm_bo_from_list(__gpuvm, __list_name, \ + __local_list, NULL); \ + __vm_bo; \ + __vm_bo = get_next_vm_bo_from_list(__gpuvm, __list_name, \ + __local_list, __vm_bo)) + +static void +__restore_vm_bo_list(struct drm_gpuvm *gpuvm, spinlock_t *lock, + struct list_head *list, struct list_head **local_list) +{ + /* Merge back the two lists, moving local list elements to the + * head to preserve previous ordering, in case it matters. + */ + spin_lock(lock); + if (*local_list) { + list_splice(*local_list, list); + *local_list = NULL; + } + spin_unlock(lock); +} + +/** + * restore_vm_bo_list() - move vm_bo elements back to their original list + * @__gpuvm: the &drm_gpuvm + * @__list_name: the name of the list we're iterating on + * + * When we're done iterating a vm_bo list, we should call restore_vm_bo_list() + * to restore the original state and let new iterations take place. + */ +#define restore_vm_bo_list(__gpuvm, __list_name) \ + __restore_vm_bo_list((__gpuvm), &(__gpuvm)->__list_name.lock, \ + &(__gpuvm)->__list_name.list, \ + &(__gpuvm)->__list_name.local_list) + +static void +cond_spin_lock(spinlock_t *lock, bool cond) +{ + if (cond) + spin_lock(lock); +} + +static void +cond_spin_unlock(spinlock_t *lock, bool cond) +{ + if (cond) + spin_unlock(lock); +} + +static void +__drm_gpuvm_bo_list_add(struct drm_gpuvm *gpuvm, spinlock_t *lock, + struct list_head *entry, struct list_head *list) +{ + cond_spin_lock(lock, !!lock); + if (list_empty(entry)) + list_add_tail(entry, list); + cond_spin_unlock(lock, !!lock); +} + +/** + * drm_gpuvm_bo_list_add() - insert a vm_bo into the given list + * @__vm_bo: the &drm_gpuvm_bo + * @__list_name: the name of the list to insert into + * @__lock: whether to lock with the internal spinlock + * + * Inserts the given @__vm_bo into the list specified by @__list_name. + */ +#define drm_gpuvm_bo_list_add(__vm_bo, __list_name, __lock) \ + __drm_gpuvm_bo_list_add((__vm_bo)->vm, \ + __lock ? &(__vm_bo)->vm->__list_name.lock : \ + NULL, \ + &(__vm_bo)->list.entry.__list_name, \ + &(__vm_bo)->vm->__list_name.list) + +static void +__drm_gpuvm_bo_list_del(struct drm_gpuvm *gpuvm, spinlock_t *lock, + struct list_head *entry, bool init) +{ + cond_spin_lock(lock, !!lock); + if (init) { + if (!list_empty(entry)) + list_del_init(entry); + } else { + list_del(entry); + } + cond_spin_unlock(lock, !!lock); +} + +/** + * drm_gpuvm_bo_list_del_init() - remove a vm_bo from the given list + * @__vm_bo: the &drm_gpuvm_bo + * @__list_name: the name of the list to insert into + * @__lock: whether to lock with the internal spinlock + * + * Removes the given @__vm_bo from the list specified by @__list_name. + */ +#define drm_gpuvm_bo_list_del_init(__vm_bo, __list_name, __lock) \ + __drm_gpuvm_bo_list_del((__vm_bo)->vm, \ + __lock ? &(__vm_bo)->vm->__list_name.lock : \ + NULL, \ + &(__vm_bo)->list.entry.__list_name, \ + true) + +/** + * drm_gpuvm_bo_list_del() - remove a vm_bo from the given list + * @__vm_bo: the &drm_gpuvm_bo + * @__list_name: the name of the list to insert into + * @__lock: whether to lock with the internal spinlock + * + * Removes the given @__vm_bo from the list specified by @__list_name. + */ +#define drm_gpuvm_bo_list_del(__vm_bo, __list_name, __lock) \ + __drm_gpuvm_bo_list_del((__vm_bo)->vm, \ + __lock ? &(__vm_bo)->vm->__list_name.lock : \ + NULL, \ + &(__vm_bo)->list.entry.__list_name, \ + false) + #define to_drm_gpuva(__node) container_of((__node), struct drm_gpuva, rb.node) #define GPUVA_START(node) ((node)->va.addr) @@ -618,8 +886,14 @@ drm_gpuvm_check_overflow(u64 addr, u64 range) { u64 end; - return WARN(check_add_overflow(addr, range, &end), - "GPUVA address limited to %zu bytes.\n", sizeof(end)); + return check_add_overflow(addr, range, &end); +} + +static bool +drm_gpuvm_warn_check_overflow(struct drm_gpuvm *gpuvm, u64 addr, u64 range) +{ + return drm_WARN(gpuvm->drm, drm_gpuvm_check_overflow(addr, range), + "GPUVA address limited to %zu bytes.\n", sizeof(addr)); } static bool @@ -643,7 +917,18 @@ drm_gpuvm_in_kernel_node(struct drm_gpuvm *gpuvm, u64 addr, u64 range) return krange && addr < kend && kstart < end; } -static bool +/** + * drm_gpuvm_range_valid() - checks whether the given range is valid for the + * given &drm_gpuvm + * @gpuvm: the GPUVM to check the range for + * @addr: the base address + * @range: the range starting from the base address + * + * Checks whether the range is within the GPUVM's managed boundaries. + * + * Returns: true for a valid range, false otherwise + */ +bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range) { @@ -651,11 +936,52 @@ drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, drm_gpuvm_in_mm_range(gpuvm, addr, range) && !drm_gpuvm_in_kernel_node(gpuvm, addr, range); } +EXPORT_SYMBOL_GPL(drm_gpuvm_range_valid); + +static void +drm_gpuvm_gem_object_free(struct drm_gem_object *obj) +{ + drm_gem_object_release(obj); + kfree(obj); +} + +static const struct drm_gem_object_funcs drm_gpuvm_object_funcs = { + .free = drm_gpuvm_gem_object_free, +}; + +/** + * drm_gpuvm_resv_object_alloc() - allocate a dummy &drm_gem_object + * @drm: the drivers &drm_device + * + * Allocates a dummy &drm_gem_object which can be passed to drm_gpuvm_init() in + * order to serve as root GEM object providing the &drm_resv shared across + * &drm_gem_objects local to a single GPUVM. + * + * Returns: the &drm_gem_object on success, NULL on failure + */ +struct drm_gem_object * +drm_gpuvm_resv_object_alloc(struct drm_device *drm) +{ + struct drm_gem_object *obj; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return NULL; + + obj->funcs = &drm_gpuvm_object_funcs; + drm_gem_private_object_init(drm, obj, 0); + + return obj; +} +EXPORT_SYMBOL_GPL(drm_gpuvm_resv_object_alloc); /** * drm_gpuvm_init() - initialize a &drm_gpuvm * @gpuvm: pointer to the &drm_gpuvm to initialize * @name: the name of the GPU VA space + * @flags: the &drm_gpuvm_flags for this GPUVM + * @drm: the &drm_device this VM resides in + * @r_obj: the resv &drm_gem_object providing the GPUVM's common &dma_resv * @start_offset: the start offset of the GPU VA space * @range: the size of the GPU VA space * @reserve_offset: the start of the kernel reserved GPU VA area @@ -668,8 +994,10 @@ drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, * &name is expected to be managed by the surrounding driver structures. */ void -drm_gpuvm_init(struct drm_gpuvm *gpuvm, - const char *name, +drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, + enum drm_gpuvm_flags flags, + struct drm_device *drm, + struct drm_gem_object *r_obj, u64 start_offset, u64 range, u64 reserve_offset, u64 reserve_range, const struct drm_gpuvm_ops *ops) @@ -677,45 +1005,713 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, gpuvm->rb.tree = RB_ROOT_CACHED; INIT_LIST_HEAD(&gpuvm->rb.list); - drm_gpuvm_check_overflow(start_offset, range); - gpuvm->mm_start = start_offset; - gpuvm->mm_range = range; + INIT_LIST_HEAD(&gpuvm->extobj.list); + spin_lock_init(&gpuvm->extobj.lock); + + INIT_LIST_HEAD(&gpuvm->evict.list); + spin_lock_init(&gpuvm->evict.lock); + + kref_init(&gpuvm->kref); gpuvm->name = name ? name : "unknown"; + gpuvm->flags = flags; gpuvm->ops = ops; + gpuvm->drm = drm; + gpuvm->r_obj = r_obj; - memset(&gpuvm->kernel_alloc_node, 0, sizeof(struct drm_gpuva)); + drm_gem_object_get(r_obj); + + drm_gpuvm_warn_check_overflow(gpuvm, start_offset, range); + gpuvm->mm_start = start_offset; + gpuvm->mm_range = range; + memset(&gpuvm->kernel_alloc_node, 0, sizeof(struct drm_gpuva)); if (reserve_range) { gpuvm->kernel_alloc_node.va.addr = reserve_offset; gpuvm->kernel_alloc_node.va.range = reserve_range; - if (likely(!drm_gpuvm_check_overflow(reserve_offset, - reserve_range))) + if (likely(!drm_gpuvm_warn_check_overflow(gpuvm, reserve_offset, + reserve_range))) __drm_gpuva_insert(gpuvm, &gpuvm->kernel_alloc_node); } } EXPORT_SYMBOL_GPL(drm_gpuvm_init); +static void +drm_gpuvm_fini(struct drm_gpuvm *gpuvm) +{ + gpuvm->name = NULL; + + if (gpuvm->kernel_alloc_node.va.range) + __drm_gpuva_remove(&gpuvm->kernel_alloc_node); + + drm_WARN(gpuvm->drm, !RB_EMPTY_ROOT(&gpuvm->rb.tree.rb_root), + "GPUVA tree is not empty, potentially leaking memory.\n"); + + drm_WARN(gpuvm->drm, !list_empty(&gpuvm->extobj.list), + "Extobj list should be empty.\n"); + drm_WARN(gpuvm->drm, !list_empty(&gpuvm->evict.list), + "Evict list should be empty.\n"); + + drm_gem_object_put(gpuvm->r_obj); +} + +static void +drm_gpuvm_free(struct kref *kref) +{ + struct drm_gpuvm *gpuvm = container_of(kref, struct drm_gpuvm, kref); + + drm_gpuvm_fini(gpuvm); + + if (drm_WARN_ON(gpuvm->drm, !gpuvm->ops->vm_free)) + return; + + gpuvm->ops->vm_free(gpuvm); +} + /** - * drm_gpuvm_destroy() - cleanup a &drm_gpuvm - * @gpuvm: pointer to the &drm_gpuvm to clean up + * drm_gpuvm_put() - drop a struct drm_gpuvm reference + * @gpuvm: the &drm_gpuvm to release the reference of + * + * This releases a reference to @gpuvm. * - * Note that it is a bug to call this function on a manager that still - * holds GPU VA mappings. + * This function may be called from atomic context. */ void -drm_gpuvm_destroy(struct drm_gpuvm *gpuvm) +drm_gpuvm_put(struct drm_gpuvm *gpuvm) { - gpuvm->name = NULL; + if (gpuvm) + kref_put(&gpuvm->kref, drm_gpuvm_free); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_put); - if (gpuvm->kernel_alloc_node.va.range) - __drm_gpuva_remove(&gpuvm->kernel_alloc_node); +static int +exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj, + unsigned int num_fences) +{ + return num_fences ? drm_exec_prepare_obj(exec, obj, num_fences) : + drm_exec_lock_obj(exec, obj); +} - WARN(!RB_EMPTY_ROOT(&gpuvm->rb.tree.rb_root), - "GPUVA tree is not empty, potentially leaking memory."); +/** + * drm_gpuvm_prepare_vm() - prepare the GPUVMs common dma-resv + * @gpuvm: the &drm_gpuvm + * @exec: the &drm_exec context + * @num_fences: the amount of &dma_fences to reserve + * + * Calls drm_exec_prepare_obj() for the GPUVMs dummy &drm_gem_object; if + * @num_fences is zero drm_exec_lock_obj() is called instead. + * + * Using this function directly, it is the drivers responsibility to call + * drm_exec_init() and drm_exec_fini() accordingly. + * + * Returns: 0 on success, negative error code on failure. + */ +int +drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + unsigned int num_fences) +{ + return exec_prepare_obj(exec, gpuvm->r_obj, num_fences); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_vm); + +static int +__drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + unsigned int num_fences) +{ + struct drm_gpuvm_bo *vm_bo; + LIST_HEAD(extobjs); + int ret = 0; + + for_each_vm_bo_in_list(gpuvm, extobj, &extobjs, vm_bo) { + ret = exec_prepare_obj(exec, vm_bo->obj, num_fences); + if (ret) + break; + } + /* Drop ref in case we break out of the loop. */ + drm_gpuvm_bo_put(vm_bo); + restore_vm_bo_list(gpuvm, extobj); + + return ret; } -EXPORT_SYMBOL_GPL(drm_gpuvm_destroy); + +static int +drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + unsigned int num_fences) +{ + struct drm_gpuvm_bo *vm_bo; + int ret = 0; + + drm_gpuvm_resv_assert_held(gpuvm); + list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) { + ret = exec_prepare_obj(exec, vm_bo->obj, num_fences); + if (ret) + break; + + if (vm_bo->evicted) + drm_gpuvm_bo_list_add(vm_bo, evict, false); + } + + return ret; +} + +/** + * drm_gpuvm_prepare_objects() - prepare all assoiciated BOs + * @gpuvm: the &drm_gpuvm + * @exec: the &drm_exec locking context + * @num_fences: the amount of &dma_fences to reserve + * + * Calls drm_exec_prepare_obj() for all &drm_gem_objects the given + * &drm_gpuvm contains mappings of; if @num_fences is zero drm_exec_lock_obj() + * is called instead. + * + * Using this function directly, it is the drivers responsibility to call + * drm_exec_init() and drm_exec_fini() accordingly. + * + * Note: This function is safe against concurrent insertion and removal of + * external objects, however it is not safe against concurrent usage itself. + * + * Drivers need to make sure to protect this case with either an outer VM lock + * or by calling drm_gpuvm_prepare_vm() before this function within the + * drm_exec_until_all_locked() loop, such that the GPUVM's dma-resv lock ensures + * mutual exclusion. + * + * Returns: 0 on success, negative error code on failure. + */ +int +drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + unsigned int num_fences) +{ + if (drm_gpuvm_resv_protected(gpuvm)) + return drm_gpuvm_prepare_objects_locked(gpuvm, exec, + num_fences); + else + return __drm_gpuvm_prepare_objects(gpuvm, exec, num_fences); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_objects); + +/** + * drm_gpuvm_prepare_range() - prepare all BOs mapped within a given range + * @gpuvm: the &drm_gpuvm + * @exec: the &drm_exec locking context + * @addr: the start address within the VA space + * @range: the range to iterate within the VA space + * @num_fences: the amount of &dma_fences to reserve + * + * Calls drm_exec_prepare_obj() for all &drm_gem_objects mapped between @addr + * and @addr + @range; if @num_fences is zero drm_exec_lock_obj() is called + * instead. + * + * Returns: 0 on success, negative error code on failure. + */ +int +drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, struct drm_exec *exec, + u64 addr, u64 range, unsigned int num_fences) +{ + struct drm_gpuva *va; + u64 end = addr + range; + int ret; + + drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) { + struct drm_gem_object *obj = va->gem.obj; + + ret = exec_prepare_obj(exec, obj, num_fences); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_range); + +/** + * drm_gpuvm_exec_lock() - lock all dma-resv of all assoiciated BOs + * @vm_exec: the &drm_gpuvm_exec wrapper + * + * Acquires all dma-resv locks of all &drm_gem_objects the given + * &drm_gpuvm contains mappings of. + * + * Addionally, when calling this function with struct drm_gpuvm_exec::extra + * being set the driver receives the given @fn callback to lock additional + * dma-resv in the context of the &drm_gpuvm_exec instance. Typically, drivers + * would call drm_exec_prepare_obj() from within this callback. + * + * Returns: 0 on success, negative error code on failure. + */ +int +drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec) +{ + struct drm_gpuvm *gpuvm = vm_exec->vm; + struct drm_exec *exec = &vm_exec->exec; + unsigned int num_fences = vm_exec->num_fences; + int ret; + + drm_exec_init(exec, vm_exec->flags, 0); + + drm_exec_until_all_locked(exec) { + ret = drm_gpuvm_prepare_vm(gpuvm, exec, num_fences); + drm_exec_retry_on_contention(exec); + if (ret) + goto err; + + ret = drm_gpuvm_prepare_objects(gpuvm, exec, num_fences); + drm_exec_retry_on_contention(exec); + if (ret) + goto err; + + if (vm_exec->extra.fn) { + ret = vm_exec->extra.fn(vm_exec); + drm_exec_retry_on_contention(exec); + if (ret) + goto err; + } + } + + return 0; + +err: + drm_exec_fini(exec); + return ret; +} +EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock); + +static int +fn_lock_array(struct drm_gpuvm_exec *vm_exec) +{ + struct { + struct drm_gem_object **objs; + unsigned int num_objs; + } *args = vm_exec->extra.priv; + + return drm_exec_prepare_array(&vm_exec->exec, args->objs, + args->num_objs, vm_exec->num_fences); +} + +/** + * drm_gpuvm_exec_lock_array() - lock all dma-resv of all assoiciated BOs + * @vm_exec: the &drm_gpuvm_exec wrapper + * @objs: additional &drm_gem_objects to lock + * @num_objs: the number of additional &drm_gem_objects to lock + * + * Acquires all dma-resv locks of all &drm_gem_objects the given &drm_gpuvm + * contains mappings of, plus the ones given through @objs. + * + * Returns: 0 on success, negative error code on failure. + */ +int +drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec, + struct drm_gem_object **objs, + unsigned int num_objs) +{ + struct { + struct drm_gem_object **objs; + unsigned int num_objs; + } args; + + args.objs = objs; + args.num_objs = num_objs; + + vm_exec->extra.fn = fn_lock_array; + vm_exec->extra.priv = &args; + + return drm_gpuvm_exec_lock(vm_exec); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock_array); + +/** + * drm_gpuvm_exec_lock_range() - prepare all BOs mapped within a given range + * @vm_exec: the &drm_gpuvm_exec wrapper + * @addr: the start address within the VA space + * @range: the range to iterate within the VA space + * + * Acquires all dma-resv locks of all &drm_gem_objects mapped between @addr and + * @addr + @range. + * + * Returns: 0 on success, negative error code on failure. + */ +int +drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec, + u64 addr, u64 range) +{ + struct drm_gpuvm *gpuvm = vm_exec->vm; + struct drm_exec *exec = &vm_exec->exec; + int ret; + + drm_exec_init(exec, vm_exec->flags, 0); + + drm_exec_until_all_locked(exec) { + ret = drm_gpuvm_prepare_range(gpuvm, exec, addr, range, + vm_exec->num_fences); + drm_exec_retry_on_contention(exec); + if (ret) + goto err; + } + + return ret; + +err: + drm_exec_fini(exec); + return ret; +} +EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock_range); + +static int +__drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec) +{ + const struct drm_gpuvm_ops *ops = gpuvm->ops; + struct drm_gpuvm_bo *vm_bo; + LIST_HEAD(evict); + int ret = 0; + + for_each_vm_bo_in_list(gpuvm, evict, &evict, vm_bo) { + ret = ops->vm_bo_validate(vm_bo, exec); + if (ret) + break; + } + /* Drop ref in case we break out of the loop. */ + drm_gpuvm_bo_put(vm_bo); + restore_vm_bo_list(gpuvm, evict); + + return ret; +} + +static int +drm_gpuvm_validate_locked(struct drm_gpuvm *gpuvm, struct drm_exec *exec) +{ + const struct drm_gpuvm_ops *ops = gpuvm->ops; + struct drm_gpuvm_bo *vm_bo, *next; + int ret = 0; + + drm_gpuvm_resv_assert_held(gpuvm); + + list_for_each_entry_safe(vm_bo, next, &gpuvm->evict.list, + list.entry.evict) { + ret = ops->vm_bo_validate(vm_bo, exec); + if (ret) + break; + + dma_resv_assert_held(vm_bo->obj->resv); + if (!vm_bo->evicted) + drm_gpuvm_bo_list_del_init(vm_bo, evict, false); + } + + return ret; +} + +/** + * drm_gpuvm_validate() - validate all BOs marked as evicted + * @gpuvm: the &drm_gpuvm to validate evicted BOs + * @exec: the &drm_exec instance used for locking the GPUVM + * + * Calls the &drm_gpuvm_ops::vm_bo_validate callback for all evicted buffer + * objects being mapped in the given &drm_gpuvm. + * + * Returns: 0 on success, negative error code on failure. + */ +int +drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec) +{ + const struct drm_gpuvm_ops *ops = gpuvm->ops; + + if (unlikely(!ops || !ops->vm_bo_validate)) + return -EOPNOTSUPP; + + if (drm_gpuvm_resv_protected(gpuvm)) + return drm_gpuvm_validate_locked(gpuvm, exec); + else + return __drm_gpuvm_validate(gpuvm, exec); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_validate); + +/** + * drm_gpuvm_resv_add_fence - add fence to private and all extobj + * dma-resv + * @gpuvm: the &drm_gpuvm to add a fence to + * @exec: the &drm_exec locking context + * @fence: fence to add + * @private_usage: private dma-resv usage + * @extobj_usage: extobj dma-resv usage + */ +void +drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + struct dma_fence *fence, + enum dma_resv_usage private_usage, + enum dma_resv_usage extobj_usage) +{ + struct drm_gem_object *obj; + unsigned long index; + + drm_exec_for_each_locked_object(exec, index, obj) { + dma_resv_assert_held(obj->resv); + dma_resv_add_fence(obj->resv, fence, + drm_gpuvm_is_extobj(gpuvm, obj) ? + extobj_usage : private_usage); + } +} +EXPORT_SYMBOL_GPL(drm_gpuvm_resv_add_fence); + +/** + * drm_gpuvm_bo_create() - create a new instance of struct drm_gpuvm_bo + * @gpuvm: The &drm_gpuvm the @obj is mapped in. + * @obj: The &drm_gem_object being mapped in the @gpuvm. + * + * If provided by the driver, this function uses the &drm_gpuvm_ops + * vm_bo_alloc() callback to allocate. + * + * Returns: a pointer to the &drm_gpuvm_bo on success, NULL on failure + */ +struct drm_gpuvm_bo * +drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj) +{ + const struct drm_gpuvm_ops *ops = gpuvm->ops; + struct drm_gpuvm_bo *vm_bo; + + if (ops && ops->vm_bo_alloc) + vm_bo = ops->vm_bo_alloc(); + else + vm_bo = kzalloc(sizeof(*vm_bo), GFP_KERNEL); + + if (unlikely(!vm_bo)) + return NULL; + + vm_bo->vm = drm_gpuvm_get(gpuvm); + vm_bo->obj = obj; + drm_gem_object_get(obj); + + kref_init(&vm_bo->kref); + INIT_LIST_HEAD(&vm_bo->list.gpuva); + INIT_LIST_HEAD(&vm_bo->list.entry.gem); + + INIT_LIST_HEAD(&vm_bo->list.entry.extobj); + INIT_LIST_HEAD(&vm_bo->list.entry.evict); + + return vm_bo; +} +EXPORT_SYMBOL_GPL(drm_gpuvm_bo_create); + +static void +drm_gpuvm_bo_destroy(struct kref *kref) +{ + struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo, + kref); + struct drm_gpuvm *gpuvm = vm_bo->vm; + const struct drm_gpuvm_ops *ops = gpuvm->ops; + struct drm_gem_object *obj = vm_bo->obj; + bool lock = !drm_gpuvm_resv_protected(gpuvm); + + if (!lock) + drm_gpuvm_resv_assert_held(gpuvm); + + drm_gpuvm_bo_list_del(vm_bo, extobj, lock); + drm_gpuvm_bo_list_del(vm_bo, evict, lock); + + drm_gem_gpuva_assert_lock_held(obj); + list_del(&vm_bo->list.entry.gem); + + if (ops && ops->vm_bo_free) + ops->vm_bo_free(vm_bo); + else + kfree(vm_bo); + + drm_gpuvm_put(gpuvm); + drm_gem_object_put(obj); +} + +/** + * drm_gpuvm_bo_put() - drop a struct drm_gpuvm_bo reference + * @vm_bo: the &drm_gpuvm_bo to release the reference of + * + * This releases a reference to @vm_bo. + * + * If the reference count drops to zero, the &gpuvm_bo is destroyed, which + * includes removing it from the GEMs gpuva list. Hence, if a call to this + * function can potentially let the reference count drop to zero the caller must + * hold the dma-resv or driver specific GEM gpuva lock. + * + * This function may only be called from non-atomic context. + * + * Returns: true if vm_bo was destroyed, false otherwise. + */ +bool +drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo) +{ + might_sleep(); + + if (vm_bo) + return !!kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy); + + return false; +} +EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put); + +static struct drm_gpuvm_bo * +__drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj) +{ + struct drm_gpuvm_bo *vm_bo; + + drm_gem_gpuva_assert_lock_held(obj); + drm_gem_for_each_gpuvm_bo(vm_bo, obj) + if (vm_bo->vm == gpuvm) + return vm_bo; + + return NULL; +} + +/** + * drm_gpuvm_bo_find() - find the &drm_gpuvm_bo for the given + * &drm_gpuvm and &drm_gem_object + * @gpuvm: The &drm_gpuvm the @obj is mapped in. + * @obj: The &drm_gem_object being mapped in the @gpuvm. + * + * Find the &drm_gpuvm_bo representing the combination of the given + * &drm_gpuvm and &drm_gem_object. If found, increases the reference + * count of the &drm_gpuvm_bo accordingly. + * + * Returns: a pointer to the &drm_gpuvm_bo on success, NULL on failure + */ +struct drm_gpuvm_bo * +drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj) +{ + struct drm_gpuvm_bo *vm_bo = __drm_gpuvm_bo_find(gpuvm, obj); + + return vm_bo ? drm_gpuvm_bo_get(vm_bo) : NULL; +} +EXPORT_SYMBOL_GPL(drm_gpuvm_bo_find); + +/** + * drm_gpuvm_bo_obtain() - obtains and instance of the &drm_gpuvm_bo for the + * given &drm_gpuvm and &drm_gem_object + * @gpuvm: The &drm_gpuvm the @obj is mapped in. + * @obj: The &drm_gem_object being mapped in the @gpuvm. + * + * Find the &drm_gpuvm_bo representing the combination of the given + * &drm_gpuvm and &drm_gem_object. If found, increases the reference + * count of the &drm_gpuvm_bo accordingly. If not found, allocates a new + * &drm_gpuvm_bo. + * + * A new &drm_gpuvm_bo is added to the GEMs gpuva list. + * + * Returns: a pointer to the &drm_gpuvm_bo on success, an ERR_PTR on failure + */ +struct drm_gpuvm_bo * +drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj) +{ + struct drm_gpuvm_bo *vm_bo; + + vm_bo = drm_gpuvm_bo_find(gpuvm, obj); + if (vm_bo) + return vm_bo; + + vm_bo = drm_gpuvm_bo_create(gpuvm, obj); + if (!vm_bo) + return ERR_PTR(-ENOMEM); + + drm_gem_gpuva_assert_lock_held(obj); + list_add_tail(&vm_bo->list.entry.gem, &obj->gpuva.list); + + return vm_bo; +} +EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain); + +/** + * drm_gpuvm_bo_obtain_prealloc() - obtains and instance of the &drm_gpuvm_bo + * for the given &drm_gpuvm and &drm_gem_object + * @__vm_bo: A pre-allocated struct drm_gpuvm_bo. + * + * Find the &drm_gpuvm_bo representing the combination of the given + * &drm_gpuvm and &drm_gem_object. If found, increases the reference + * count of the found &drm_gpuvm_bo accordingly, while the @__vm_bo reference + * count is decreased. If not found @__vm_bo is returned without further + * increase of the reference count. + * + * A new &drm_gpuvm_bo is added to the GEMs gpuva list. + * + * Returns: a pointer to the found &drm_gpuvm_bo or @__vm_bo if no existing + * &drm_gpuvm_bo was found + */ +struct drm_gpuvm_bo * +drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *__vm_bo) +{ + struct drm_gpuvm *gpuvm = __vm_bo->vm; + struct drm_gem_object *obj = __vm_bo->obj; + struct drm_gpuvm_bo *vm_bo; + + vm_bo = drm_gpuvm_bo_find(gpuvm, obj); + if (vm_bo) { + drm_gpuvm_bo_put(__vm_bo); + return vm_bo; + } + + drm_gem_gpuva_assert_lock_held(obj); + list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list); + + return __vm_bo; +} +EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain_prealloc); + +/** + * drm_gpuvm_bo_extobj_add() - adds the &drm_gpuvm_bo to its &drm_gpuvm's + * extobj list + * @vm_bo: The &drm_gpuvm_bo to add to its &drm_gpuvm's the extobj list. + * + * Adds the given @vm_bo to its &drm_gpuvm's extobj list if not on the list + * already and if the corresponding &drm_gem_object is an external object, + * actually. + */ +void +drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo) +{ + struct drm_gpuvm *gpuvm = vm_bo->vm; + bool lock = !drm_gpuvm_resv_protected(gpuvm); + + if (!lock) + drm_gpuvm_resv_assert_held(gpuvm); + + if (drm_gpuvm_is_extobj(gpuvm, vm_bo->obj)) + drm_gpuvm_bo_list_add(vm_bo, extobj, lock); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_bo_extobj_add); + +/** + * drm_gpuvm_bo_evict() - add / remove a &drm_gpuvm_bo to / from the &drm_gpuvms + * evicted list + * @vm_bo: the &drm_gpuvm_bo to add or remove + * @evict: indicates whether the object is evicted + * + * Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvms evicted list. + */ +void +drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict) +{ + struct drm_gpuvm *gpuvm = vm_bo->vm; + struct drm_gem_object *obj = vm_bo->obj; + bool lock = !drm_gpuvm_resv_protected(gpuvm); + + dma_resv_assert_held(obj->resv); + vm_bo->evicted = evict; + + /* Can't add external objects to the evicted list directly if not using + * internal spinlocks, since in this case the evicted list is protected + * with the VM's common dma-resv lock. + */ + if (drm_gpuvm_is_extobj(gpuvm, obj) && !lock) + return; + + if (evict) + drm_gpuvm_bo_list_add(vm_bo, evict, lock); + else + drm_gpuvm_bo_list_del_init(vm_bo, evict, lock); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_bo_evict); static int __drm_gpuva_insert(struct drm_gpuvm *gpuvm, @@ -764,11 +1760,21 @@ drm_gpuva_insert(struct drm_gpuvm *gpuvm, { u64 addr = va->va.addr; u64 range = va->va.range; + int ret; if (unlikely(!drm_gpuvm_range_valid(gpuvm, addr, range))) return -EINVAL; - return __drm_gpuva_insert(gpuvm, va); + ret = __drm_gpuva_insert(gpuvm, va); + if (likely(!ret)) + /* Take a reference of the GPUVM for the successfully inserted + * drm_gpuva. We can't take the reference in + * __drm_gpuva_insert() itself, since we don't want to increse + * the reference count for the GPUVM's kernel_alloc_node. + */ + drm_gpuvm_get(gpuvm); + + return ret; } EXPORT_SYMBOL_GPL(drm_gpuva_insert); @@ -795,35 +1801,46 @@ drm_gpuva_remove(struct drm_gpuva *va) struct drm_gpuvm *gpuvm = va->vm; if (unlikely(va == &gpuvm->kernel_alloc_node)) { - WARN(1, "Can't destroy kernel reserved node.\n"); + drm_WARN(gpuvm->drm, 1, + "Can't destroy kernel reserved node.\n"); return; } __drm_gpuva_remove(va); + drm_gpuvm_put(va->vm); } EXPORT_SYMBOL_GPL(drm_gpuva_remove); /** * drm_gpuva_link() - link a &drm_gpuva * @va: the &drm_gpuva to link + * @vm_bo: the &drm_gpuvm_bo to add the &drm_gpuva to * - * This adds the given &va to the GPU VA list of the &drm_gem_object it is - * associated with. + * This adds the given &va to the GPU VA list of the &drm_gpuvm_bo and the + * &drm_gpuvm_bo to the &drm_gem_object it is associated with. + * + * For every &drm_gpuva entry added to the &drm_gpuvm_bo an additional + * reference of the latter is taken. * * This function expects the caller to protect the GEM's GPUVA list against - * concurrent access using the GEMs dma_resv lock. + * concurrent access using either the GEMs dma_resv lock or a driver specific + * lock set through drm_gem_gpuva_set_lock(). */ void -drm_gpuva_link(struct drm_gpuva *va) +drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo) { struct drm_gem_object *obj = va->gem.obj; + struct drm_gpuvm *gpuvm = va->vm; if (unlikely(!obj)) return; - drm_gem_gpuva_assert_lock_held(obj); + drm_WARN_ON(gpuvm->drm, obj != vm_bo->obj); - list_add_tail(&va->gem.entry, &obj->gpuva.list); + va->vm_bo = drm_gpuvm_bo_get(vm_bo); + + drm_gem_gpuva_assert_lock_held(obj); + list_add_tail(&va->gem.entry, &vm_bo->list.gpuva); } EXPORT_SYMBOL_GPL(drm_gpuva_link); @@ -834,20 +1851,31 @@ EXPORT_SYMBOL_GPL(drm_gpuva_link); * This removes the given &va from the GPU VA list of the &drm_gem_object it is * associated with. * + * This removes the given &va from the GPU VA list of the &drm_gpuvm_bo and + * the &drm_gpuvm_bo from the &drm_gem_object it is associated with in case + * this call unlinks the last &drm_gpuva from the &drm_gpuvm_bo. + * + * For every &drm_gpuva entry removed from the &drm_gpuvm_bo a reference of + * the latter is dropped. + * * This function expects the caller to protect the GEM's GPUVA list against - * concurrent access using the GEMs dma_resv lock. + * concurrent access using either the GEMs dma_resv lock or a driver specific + * lock set through drm_gem_gpuva_set_lock(). */ void drm_gpuva_unlink(struct drm_gpuva *va) { struct drm_gem_object *obj = va->gem.obj; + struct drm_gpuvm_bo *vm_bo = va->vm_bo; if (unlikely(!obj)) return; drm_gem_gpuva_assert_lock_held(obj); - list_del_init(&va->gem.entry); + + va->vm_bo = NULL; + drm_gpuvm_bo_put(vm_bo); } EXPORT_SYMBOL_GPL(drm_gpuva_unlink); @@ -992,10 +2020,10 @@ drm_gpuva_remap(struct drm_gpuva *prev, struct drm_gpuva *next, struct drm_gpuva_op_remap *op) { - struct drm_gpuva *curr = op->unmap->va; - struct drm_gpuvm *gpuvm = curr->vm; + struct drm_gpuva *va = op->unmap->va; + struct drm_gpuvm *gpuvm = va->vm; - drm_gpuva_remove(curr); + drm_gpuva_remove(va); if (op->prev) { drm_gpuva_init_from_op(prev, op->prev); @@ -1637,9 +2665,8 @@ err_free_ops: EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create); /** - * drm_gpuvm_gem_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM - * @gpuvm: the &drm_gpuvm representing the GPU VA space - * @obj: the &drm_gem_object to unmap + * drm_gpuvm_bo_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM + * @vm_bo: the &drm_gpuvm_bo abstraction * * This function creates a list of operations to perform unmapping for every * GPUVA attached to a GEM. @@ -1656,15 +2683,14 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create); * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure */ struct drm_gpuva_ops * -drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm, - struct drm_gem_object *obj) +drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo) { struct drm_gpuva_ops *ops; struct drm_gpuva_op *op; struct drm_gpuva *va; int ret; - drm_gem_gpuva_assert_lock_held(obj); + drm_gem_gpuva_assert_lock_held(vm_bo->obj); ops = kzalloc(sizeof(*ops), GFP_KERNEL); if (!ops) @@ -1672,8 +2698,8 @@ drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm, INIT_LIST_HEAD(&ops->list); - drm_gem_for_each_gpuva(va, obj) { - op = gpuva_op_alloc(gpuvm); + drm_gpuvm_bo_for_each_va(va, vm_bo) { + op = gpuva_op_alloc(vm_bo->vm); if (!op) { ret = -ENOMEM; goto err_free_ops; @@ -1687,10 +2713,10 @@ drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm, return ops; err_free_ops: - drm_gpuva_ops_free(gpuvm, ops); + drm_gpuva_ops_free(vm_bo->vm, ops); return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(drm_gpuvm_gem_unmap_ops_create); +EXPORT_SYMBOL_GPL(drm_gpuvm_bo_unmap_ops_create); /** * drm_gpuva_ops_free() - free the given &drm_gpuva_ops diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c deleted file mode 100644 index 60afa18655..0000000000 --- a/drivers/gpu/drm/drm_hashtab.c +++ /dev/null @@ -1,203 +0,0 @@ -/************************************************************************** - * - * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * - **************************************************************************/ -/* - * Simple open hash tab implementation. - * - * Authors: - * Thomas Hellström - */ - -#include -#include -#include -#include -#include - -#include - -#include "drm_legacy.h" - -int drm_ht_create(struct drm_open_hash *ht, unsigned int order) -{ - unsigned int size = 1 << order; - - ht->order = order; - ht->table = NULL; - if (size <= PAGE_SIZE / sizeof(*ht->table)) - ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL); - else - ht->table = vzalloc(array_size(size, sizeof(*ht->table))); - if (!ht->table) { - DRM_ERROR("Out of memory for hash table\n"); - return -ENOMEM; - } - return 0; -} - -void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key) -{ - struct drm_hash_item *entry; - struct hlist_head *h_list; - unsigned int hashed_key; - int count = 0; - - hashed_key = hash_long(key, ht->order); - DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); - h_list = &ht->table[hashed_key]; - hlist_for_each_entry(entry, h_list, head) - DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); -} - -static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht, - unsigned long key) -{ - struct drm_hash_item *entry; - struct hlist_head *h_list; - unsigned int hashed_key; - - hashed_key = hash_long(key, ht->order); - h_list = &ht->table[hashed_key]; - hlist_for_each_entry(entry, h_list, head) { - if (entry->key == key) - return &entry->head; - if (entry->key > key) - break; - } - return NULL; -} - -static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht, - unsigned long key) -{ - struct drm_hash_item *entry; - struct hlist_head *h_list; - unsigned int hashed_key; - - hashed_key = hash_long(key, ht->order); - h_list = &ht->table[hashed_key]; - hlist_for_each_entry_rcu(entry, h_list, head) { - if (entry->key == key) - return &entry->head; - if (entry->key > key) - break; - } - return NULL; -} - -int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) -{ - struct drm_hash_item *entry; - struct hlist_head *h_list; - struct hlist_node *parent; - unsigned int hashed_key; - unsigned long key = item->key; - - hashed_key = hash_long(key, ht->order); - h_list = &ht->table[hashed_key]; - parent = NULL; - hlist_for_each_entry(entry, h_list, head) { - if (entry->key == key) - return -EINVAL; - if (entry->key > key) - break; - parent = &entry->head; - } - if (parent) { - hlist_add_behind_rcu(&item->head, parent); - } else { - hlist_add_head_rcu(&item->head, h_list); - } - return 0; -} - -/* - * Just insert an item and return any "bits" bit key that hasn't been - * used before. - */ -int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, - unsigned long seed, int bits, int shift, - unsigned long add) -{ - int ret; - unsigned long mask = (1UL << bits) - 1; - unsigned long first, unshifted_key; - - unshifted_key = hash_long(seed, bits); - first = unshifted_key; - do { - item->key = (unshifted_key << shift) + add; - ret = drm_ht_insert_item(ht, item); - if (ret) - unshifted_key = (unshifted_key + 1) & mask; - } while(ret && (unshifted_key != first)); - - if (ret) { - DRM_ERROR("Available key bit space exhausted\n"); - return -EINVAL; - } - return 0; -} - -int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, - struct drm_hash_item **item) -{ - struct hlist_node *list; - - list = drm_ht_find_key_rcu(ht, key); - if (!list) - return -EINVAL; - - *item = hlist_entry(list, struct drm_hash_item, head); - return 0; -} - -int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key) -{ - struct hlist_node *list; - - list = drm_ht_find_key(ht, key); - if (list) { - hlist_del_init_rcu(list); - return 0; - } - return -EINVAL; -} - -int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item) -{ - hlist_del_init_rcu(&item->head); - return 0; -} - -void drm_ht_remove(struct drm_open_hash *ht) -{ - if (ht->table) { - kvfree(ht->table); - ht->table = NULL; - } -} diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 8462b657c3..8e4faf0a28 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -22,6 +22,7 @@ */ #include +#include #include #include @@ -31,6 +32,7 @@ #define DRM_IF_VERSION(maj, min) (maj << 16 | min) +struct cea_sad; struct dentry; struct dma_buf; struct iosys_map; @@ -115,17 +117,10 @@ void drm_handle_vblank_works(struct drm_vblank_crtc *vblank); /* IOCTLS */ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); -int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); /* drm_irq.c */ /* IOCTLS */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) -int drm_legacy_irq_control(struct drm_device *dev, void *data, - struct drm_file *file_priv); -#endif - int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); @@ -192,6 +187,8 @@ void drm_debugfs_connector_remove(struct drm_connector *connector); void drm_debugfs_crtc_add(struct drm_crtc *crtc); void drm_debugfs_crtc_remove(struct drm_crtc *crtc); void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc); +void drm_debugfs_encoder_add(struct drm_encoder *encoder); +void drm_debugfs_encoder_remove(struct drm_encoder *encoder); #else static inline void drm_debugfs_dev_fini(struct drm_device *dev) { @@ -229,6 +226,14 @@ static inline void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc) { } +static inline void drm_debugfs_encoder_add(struct drm_encoder *encoder) +{ +} + +static inline void drm_debugfs_encoder_remove(struct drm_encoder *encoder) +{ +} + #endif drm_ioctl_t drm_version; @@ -267,3 +272,7 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data, void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent, const struct drm_framebuffer *fb); void drm_framebuffer_debugfs_init(struct drm_device *dev); + +/* drm_edid.c */ +void drm_edid_cta_sad_get(const struct cea_sad *cta_sad, u8 *sad); +void drm_edid_cta_sad_set(struct cea_sad *cta_sad, const u8 *sad); diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index 025dc558c9..129e2b91db 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -31,12 +31,12 @@ #include #include +#include #include #include #include "drm_crtc_internal.h" #include "drm_internal.h" -#include "drm_legacy.h" #define DRM_IOCTL_VERSION32 DRM_IOWR(0x00, drm_version32_t) #define DRM_IOCTL_GET_UNIQUE32 DRM_IOWR(0x01, drm_unique32_t) @@ -163,92 +163,6 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd, return -EINVAL; } -#if IS_ENABLED(CONFIG_DRM_LEGACY) -typedef struct drm_map32 { - u32 offset; /* Requested physical address (0 for SAREA) */ - u32 size; /* Requested physical size (bytes) */ - enum drm_map_type type; /* Type of memory to map */ - enum drm_map_flags flags; /* Flags */ - u32 handle; /* User-space: "Handle" to pass to mmap() */ - int mtrr; /* MTRR slot used */ -} drm_map32_t; - -static int compat_drm_getmap(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_map32_t __user *argp = (void __user *)arg; - drm_map32_t m32; - struct drm_map map; - int err; - - if (copy_from_user(&m32, argp, sizeof(m32))) - return -EFAULT; - - map.offset = m32.offset; - err = drm_ioctl_kernel(file, drm_legacy_getmap_ioctl, &map, 0); - if (err) - return err; - - m32.offset = map.offset; - m32.size = map.size; - m32.type = map.type; - m32.flags = map.flags; - m32.handle = ptr_to_compat((void __user *)map.handle); - m32.mtrr = map.mtrr; - if (copy_to_user(argp, &m32, sizeof(m32))) - return -EFAULT; - return 0; - -} - -static int compat_drm_addmap(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_map32_t __user *argp = (void __user *)arg; - drm_map32_t m32; - struct drm_map map; - int err; - - if (copy_from_user(&m32, argp, sizeof(m32))) - return -EFAULT; - - map.offset = m32.offset; - map.size = m32.size; - map.type = m32.type; - map.flags = m32.flags; - - err = drm_ioctl_kernel(file, drm_legacy_addmap_ioctl, &map, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); - if (err) - return err; - - m32.offset = map.offset; - m32.mtrr = map.mtrr; - m32.handle = ptr_to_compat((void __user *)map.handle); - if (map.handle != compat_ptr(m32.handle)) - pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n", - map.handle, m32.type, m32.offset); - - if (copy_to_user(argp, &m32, sizeof(m32))) - return -EFAULT; - - return 0; -} - -static int compat_drm_rmmap(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_map32_t __user *argp = (void __user *)arg; - struct drm_map map; - u32 handle; - - if (get_user(handle, &argp->handle)) - return -EFAULT; - map.handle = compat_ptr(handle); - return drm_ioctl_kernel(file, drm_legacy_rmmap_ioctl, &map, DRM_AUTH); -} -#endif - typedef struct drm_client32 { int idx; /* Which client desired? */ int auth; /* Is client authenticated? */ @@ -308,501 +222,6 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd, return 0; } -#if IS_ENABLED(CONFIG_DRM_LEGACY) -typedef struct drm_buf_desc32 { - int count; /* Number of buffers of this size */ - int size; /* Size in bytes */ - int low_mark; /* Low water mark */ - int high_mark; /* High water mark */ - int flags; - u32 agp_start; /* Start address in the AGP aperture */ -} drm_buf_desc32_t; - -static int compat_drm_addbufs(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_buf_desc32_t __user *argp = (void __user *)arg; - drm_buf_desc32_t desc32; - struct drm_buf_desc desc; - int err; - - if (copy_from_user(&desc32, argp, sizeof(drm_buf_desc32_t))) - return -EFAULT; - - desc = (struct drm_buf_desc){ - desc32.count, desc32.size, desc32.low_mark, desc32.high_mark, - desc32.flags, desc32.agp_start - }; - - err = drm_ioctl_kernel(file, drm_legacy_addbufs, &desc, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); - if (err) - return err; - - desc32 = (drm_buf_desc32_t){ - desc.count, desc.size, desc.low_mark, desc.high_mark, - desc.flags, desc.agp_start - }; - if (copy_to_user(argp, &desc32, sizeof(drm_buf_desc32_t))) - return -EFAULT; - - return 0; -} - -static int compat_drm_markbufs(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_buf_desc32_t b32; - drm_buf_desc32_t __user *argp = (void __user *)arg; - struct drm_buf_desc buf; - - if (copy_from_user(&b32, argp, sizeof(b32))) - return -EFAULT; - - buf.size = b32.size; - buf.low_mark = b32.low_mark; - buf.high_mark = b32.high_mark; - - return drm_ioctl_kernel(file, drm_legacy_markbufs, &buf, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); -} - -typedef struct drm_buf_info32 { - int count; /**< Entries in list */ - u32 list; -} drm_buf_info32_t; - -static int copy_one_buf32(void *data, int count, struct drm_buf_entry *from) -{ - drm_buf_info32_t *request = data; - drm_buf_desc32_t __user *to = compat_ptr(request->list); - drm_buf_desc32_t v = {.count = from->buf_count, - .size = from->buf_size, - .low_mark = from->low_mark, - .high_mark = from->high_mark}; - - if (copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags))) - return -EFAULT; - return 0; -} - -static int drm_legacy_infobufs32(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - drm_buf_info32_t *request = data; - - return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf32); -} - -static int compat_drm_infobufs(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_buf_info32_t req32; - drm_buf_info32_t __user *argp = (void __user *)arg; - int err; - - if (copy_from_user(&req32, argp, sizeof(req32))) - return -EFAULT; - - if (req32.count < 0) - req32.count = 0; - - err = drm_ioctl_kernel(file, drm_legacy_infobufs32, &req32, DRM_AUTH); - if (err) - return err; - - if (put_user(req32.count, &argp->count)) - return -EFAULT; - - return 0; -} - -typedef struct drm_buf_pub32 { - int idx; /**< Index into the master buffer list */ - int total; /**< Buffer size */ - int used; /**< Amount of buffer in use (for DMA) */ - u32 address; /**< Address of buffer */ -} drm_buf_pub32_t; - -typedef struct drm_buf_map32 { - int count; /**< Length of the buffer list */ - u32 virtual; /**< Mmap'd area in user-virtual */ - u32 list; /**< Buffer information */ -} drm_buf_map32_t; - -static int map_one_buf32(void *data, int idx, unsigned long virtual, - struct drm_buf *buf) -{ - drm_buf_map32_t *request = data; - drm_buf_pub32_t __user *to = compat_ptr(request->list) + idx; - drm_buf_pub32_t v; - - v.idx = buf->idx; - v.total = buf->total; - v.used = 0; - v.address = virtual + buf->offset; - if (copy_to_user(to, &v, sizeof(v))) - return -EFAULT; - return 0; -} - -static int drm_legacy_mapbufs32(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - drm_buf_map32_t *request = data; - void __user *v; - int err = __drm_legacy_mapbufs(dev, data, &request->count, - &v, map_one_buf32, - file_priv); - request->virtual = ptr_to_compat(v); - return err; -} - -static int compat_drm_mapbufs(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_buf_map32_t __user *argp = (void __user *)arg; - drm_buf_map32_t req32; - int err; - - if (copy_from_user(&req32, argp, sizeof(req32))) - return -EFAULT; - if (req32.count < 0) - return -EINVAL; - - err = drm_ioctl_kernel(file, drm_legacy_mapbufs32, &req32, DRM_AUTH); - if (err) - return err; - - if (put_user(req32.count, &argp->count) - || put_user(req32.virtual, &argp->virtual)) - return -EFAULT; - - return 0; -} - -typedef struct drm_buf_free32 { - int count; - u32 list; -} drm_buf_free32_t; - -static int compat_drm_freebufs(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_buf_free32_t req32; - struct drm_buf_free request; - drm_buf_free32_t __user *argp = (void __user *)arg; - - if (copy_from_user(&req32, argp, sizeof(req32))) - return -EFAULT; - - request.count = req32.count; - request.list = compat_ptr(req32.list); - return drm_ioctl_kernel(file, drm_legacy_freebufs, &request, DRM_AUTH); -} - -typedef struct drm_ctx_priv_map32 { - unsigned int ctx_id; /**< Context requesting private mapping */ - u32 handle; /**< Handle of map */ -} drm_ctx_priv_map32_t; - -static int compat_drm_setsareactx(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_ctx_priv_map32_t req32; - struct drm_ctx_priv_map request; - drm_ctx_priv_map32_t __user *argp = (void __user *)arg; - - if (copy_from_user(&req32, argp, sizeof(req32))) - return -EFAULT; - - request.ctx_id = req32.ctx_id; - request.handle = compat_ptr(req32.handle); - return drm_ioctl_kernel(file, drm_legacy_setsareactx, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); -} - -static int compat_drm_getsareactx(struct file *file, unsigned int cmd, - unsigned long arg) -{ - struct drm_ctx_priv_map req; - drm_ctx_priv_map32_t req32; - drm_ctx_priv_map32_t __user *argp = (void __user *)arg; - int err; - - if (copy_from_user(&req32, argp, sizeof(req32))) - return -EFAULT; - - req.ctx_id = req32.ctx_id; - err = drm_ioctl_kernel(file, drm_legacy_getsareactx, &req, DRM_AUTH); - if (err) - return err; - - req32.handle = ptr_to_compat((void __user *)req.handle); - if (copy_to_user(argp, &req32, sizeof(req32))) - return -EFAULT; - - return 0; -} - -typedef struct drm_ctx_res32 { - int count; - u32 contexts; -} drm_ctx_res32_t; - -static int compat_drm_resctx(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_ctx_res32_t __user *argp = (void __user *)arg; - drm_ctx_res32_t res32; - struct drm_ctx_res res; - int err; - - if (copy_from_user(&res32, argp, sizeof(res32))) - return -EFAULT; - - res.count = res32.count; - res.contexts = compat_ptr(res32.contexts); - err = drm_ioctl_kernel(file, drm_legacy_resctx, &res, DRM_AUTH); - if (err) - return err; - - res32.count = res.count; - if (copy_to_user(argp, &res32, sizeof(res32))) - return -EFAULT; - - return 0; -} - -typedef struct drm_dma32 { - int context; /**< Context handle */ - int send_count; /**< Number of buffers to send */ - u32 send_indices; /**< List of handles to buffers */ - u32 send_sizes; /**< Lengths of data to send */ - enum drm_dma_flags flags; /**< Flags */ - int request_count; /**< Number of buffers requested */ - int request_size; /**< Desired size for buffers */ - u32 request_indices; /**< Buffer information */ - u32 request_sizes; - int granted_count; /**< Number of buffers granted */ -} drm_dma32_t; - -static int compat_drm_dma(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_dma32_t d32; - drm_dma32_t __user *argp = (void __user *)arg; - struct drm_dma d; - int err; - - if (copy_from_user(&d32, argp, sizeof(d32))) - return -EFAULT; - - d.context = d32.context; - d.send_count = d32.send_count; - d.send_indices = compat_ptr(d32.send_indices); - d.send_sizes = compat_ptr(d32.send_sizes); - d.flags = d32.flags; - d.request_count = d32.request_count; - d.request_indices = compat_ptr(d32.request_indices); - d.request_sizes = compat_ptr(d32.request_sizes); - err = drm_ioctl_kernel(file, drm_legacy_dma_ioctl, &d, DRM_AUTH); - if (err) - return err; - - if (put_user(d.request_size, &argp->request_size) - || put_user(d.granted_count, &argp->granted_count)) - return -EFAULT; - - return 0; -} -#endif - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -#if IS_ENABLED(CONFIG_AGP) -typedef struct drm_agp_mode32 { - u32 mode; /**< AGP mode */ -} drm_agp_mode32_t; - -static int compat_drm_agp_enable(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_agp_mode32_t __user *argp = (void __user *)arg; - struct drm_agp_mode mode; - - if (get_user(mode.mode, &argp->mode)) - return -EFAULT; - - return drm_ioctl_kernel(file, drm_legacy_agp_enable_ioctl, &mode, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); -} - -typedef struct drm_agp_info32 { - int agp_version_major; - int agp_version_minor; - u32 mode; - u32 aperture_base; /* physical address */ - u32 aperture_size; /* bytes */ - u32 memory_allowed; /* bytes */ - u32 memory_used; - - /* PCI information */ - unsigned short id_vendor; - unsigned short id_device; -} drm_agp_info32_t; - -static int compat_drm_agp_info(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_agp_info32_t __user *argp = (void __user *)arg; - drm_agp_info32_t i32; - struct drm_agp_info info; - int err; - - err = drm_ioctl_kernel(file, drm_legacy_agp_info_ioctl, &info, DRM_AUTH); - if (err) - return err; - - i32.agp_version_major = info.agp_version_major; - i32.agp_version_minor = info.agp_version_minor; - i32.mode = info.mode; - i32.aperture_base = info.aperture_base; - i32.aperture_size = info.aperture_size; - i32.memory_allowed = info.memory_allowed; - i32.memory_used = info.memory_used; - i32.id_vendor = info.id_vendor; - i32.id_device = info.id_device; - if (copy_to_user(argp, &i32, sizeof(i32))) - return -EFAULT; - - return 0; -} - -typedef struct drm_agp_buffer32 { - u32 size; /**< In bytes -- will round to page boundary */ - u32 handle; /**< Used for binding / unbinding */ - u32 type; /**< Type of memory to allocate */ - u32 physical; /**< Physical used by i810 */ -} drm_agp_buffer32_t; - -static int compat_drm_agp_alloc(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_agp_buffer32_t __user *argp = (void __user *)arg; - drm_agp_buffer32_t req32; - struct drm_agp_buffer request; - int err; - - if (copy_from_user(&req32, argp, sizeof(req32))) - return -EFAULT; - - request.size = req32.size; - request.type = req32.type; - err = drm_ioctl_kernel(file, drm_legacy_agp_alloc_ioctl, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); - if (err) - return err; - - req32.handle = request.handle; - req32.physical = request.physical; - if (copy_to_user(argp, &req32, sizeof(req32))) { - drm_ioctl_kernel(file, drm_legacy_agp_free_ioctl, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); - return -EFAULT; - } - - return 0; -} - -static int compat_drm_agp_free(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_agp_buffer32_t __user *argp = (void __user *)arg; - struct drm_agp_buffer request; - - if (get_user(request.handle, &argp->handle)) - return -EFAULT; - - return drm_ioctl_kernel(file, drm_legacy_agp_free_ioctl, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); -} - -typedef struct drm_agp_binding32 { - u32 handle; /**< From drm_agp_buffer */ - u32 offset; /**< In bytes -- will round to page boundary */ -} drm_agp_binding32_t; - -static int compat_drm_agp_bind(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_agp_binding32_t __user *argp = (void __user *)arg; - drm_agp_binding32_t req32; - struct drm_agp_binding request; - - if (copy_from_user(&req32, argp, sizeof(req32))) - return -EFAULT; - - request.handle = req32.handle; - request.offset = req32.offset; - return drm_ioctl_kernel(file, drm_legacy_agp_bind_ioctl, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); -} - -static int compat_drm_agp_unbind(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_agp_binding32_t __user *argp = (void __user *)arg; - struct drm_agp_binding request; - - if (get_user(request.handle, &argp->handle)) - return -EFAULT; - - return drm_ioctl_kernel(file, drm_legacy_agp_unbind_ioctl, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); -} -#endif /* CONFIG_AGP */ - -typedef struct drm_scatter_gather32 { - u32 size; /**< In bytes -- will round to page boundary */ - u32 handle; /**< Used for mapping / unmapping */ -} drm_scatter_gather32_t; - -static int compat_drm_sg_alloc(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_scatter_gather32_t __user *argp = (void __user *)arg; - struct drm_scatter_gather request; - int err; - - if (get_user(request.size, &argp->size)) - return -EFAULT; - - err = drm_ioctl_kernel(file, drm_legacy_sg_alloc, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); - if (err) - return err; - - /* XXX not sure about the handle conversion here... */ - if (put_user(request.handle >> PAGE_SHIFT, &argp->handle)) - return -EFAULT; - - return 0; -} - -static int compat_drm_sg_free(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_scatter_gather32_t __user *argp = (void __user *)arg; - struct drm_scatter_gather request; - unsigned long x; - - if (get_user(x, &argp->handle)) - return -EFAULT; - request.handle = x << PAGE_SHIFT; - return drm_ioctl_kernel(file, drm_legacy_sg_free, &request, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); -} -#endif #if defined(CONFIG_X86) typedef struct drm_update_draw32 { drm_drawable_t handle; @@ -854,7 +273,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, req.request.type = req32.request.type; req.request.sequence = req32.request.sequence; req.request.signal = req32.request.signal; - err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED); + err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, 0); req32.reply.type = req.reply.type; req32.reply.sequence = req.reply.sequence; @@ -914,37 +333,9 @@ static struct { #define DRM_IOCTL32_DEF(n, f) [DRM_IOCTL_NR(n##32)] = {.fn = f, .name = #n} DRM_IOCTL32_DEF(DRM_IOCTL_VERSION, compat_drm_version), DRM_IOCTL32_DEF(DRM_IOCTL_GET_UNIQUE, compat_drm_getunique), -#if IS_ENABLED(CONFIG_DRM_LEGACY) - DRM_IOCTL32_DEF(DRM_IOCTL_GET_MAP, compat_drm_getmap), -#endif DRM_IOCTL32_DEF(DRM_IOCTL_GET_CLIENT, compat_drm_getclient), DRM_IOCTL32_DEF(DRM_IOCTL_GET_STATS, compat_drm_getstats), DRM_IOCTL32_DEF(DRM_IOCTL_SET_UNIQUE, compat_drm_setunique), -#if IS_ENABLED(CONFIG_DRM_LEGACY) - DRM_IOCTL32_DEF(DRM_IOCTL_ADD_MAP, compat_drm_addmap), - DRM_IOCTL32_DEF(DRM_IOCTL_ADD_BUFS, compat_drm_addbufs), - DRM_IOCTL32_DEF(DRM_IOCTL_MARK_BUFS, compat_drm_markbufs), - DRM_IOCTL32_DEF(DRM_IOCTL_INFO_BUFS, compat_drm_infobufs), - DRM_IOCTL32_DEF(DRM_IOCTL_MAP_BUFS, compat_drm_mapbufs), - DRM_IOCTL32_DEF(DRM_IOCTL_FREE_BUFS, compat_drm_freebufs), - DRM_IOCTL32_DEF(DRM_IOCTL_RM_MAP, compat_drm_rmmap), - DRM_IOCTL32_DEF(DRM_IOCTL_SET_SAREA_CTX, compat_drm_setsareactx), - DRM_IOCTL32_DEF(DRM_IOCTL_GET_SAREA_CTX, compat_drm_getsareactx), - DRM_IOCTL32_DEF(DRM_IOCTL_RES_CTX, compat_drm_resctx), - DRM_IOCTL32_DEF(DRM_IOCTL_DMA, compat_drm_dma), -#if IS_ENABLED(CONFIG_AGP) - DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ENABLE, compat_drm_agp_enable), - DRM_IOCTL32_DEF(DRM_IOCTL_AGP_INFO, compat_drm_agp_info), - DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ALLOC, compat_drm_agp_alloc), - DRM_IOCTL32_DEF(DRM_IOCTL_AGP_FREE, compat_drm_agp_free), - DRM_IOCTL32_DEF(DRM_IOCTL_AGP_BIND, compat_drm_agp_bind), - DRM_IOCTL32_DEF(DRM_IOCTL_AGP_UNBIND, compat_drm_agp_unbind), -#endif -#endif -#if IS_ENABLED(CONFIG_DRM_LEGACY) - DRM_IOCTL32_DEF(DRM_IOCTL_SG_ALLOC, compat_drm_sg_alloc), - DRM_IOCTL32_DEF(DRM_IOCTL_SG_FREE, compat_drm_sg_free), -#endif #if defined(CONFIG_X86) DRM_IOCTL32_DEF(DRM_IOCTL_UPDATE_DRAW, compat_drm_update_draw), #endif diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 77590b0f38..e368fc084c 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -42,7 +42,6 @@ #include "drm_crtc_internal.h" #include "drm_internal.h" -#include "drm_legacy.h" /** * DOC: getunique and setversion story @@ -301,6 +300,10 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_ case DRM_CAP_CRTC_IN_VBLANK_EVENT: req->value = 1; break; + case DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP: + req->value = drm_core_check_feature(dev, DRIVER_ATOMIC) && + dev->mode_config.async_page_flip; + break; default: return -EINVAL; } @@ -361,6 +364,15 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv) return -EINVAL; file_priv->writeback_connectors = req->value; break; + case DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT: + if (!drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT)) + return -EOPNOTSUPP; + if (!file_priv->atomic) + return -EINVAL; + if (req->value > 1) + return -EINVAL; + file_priv->supports_virtualized_cursor_plane = req->value; + break; default: return -EINVAL; } @@ -559,21 +571,11 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv) .name = #ioctl \ } -#if IS_ENABLED(CONFIG_DRM_LEGACY) -#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, _func, _flags) -#else -#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, drm_invalid_op, _flags) -#endif - /* Ioctl table */ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_legacy_irq_by_busid, - DRM_MASTER|DRM_ROOT_ONLY), - - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0), @@ -586,63 +588,15 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_MASTER), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH), - - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, 0), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_LOCK, drm_legacy_lock, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_legacy_unlock, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_legacy_irq_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - -#if IS_ENABLED(CONFIG_AGP) - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_legacy_agp_acquire_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_legacy_agp_release_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_legacy_agp_enable_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_legacy_agp_info_ioctl, DRM_AUTH), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_legacy_agp_alloc_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_legacy_agp_free_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_legacy_agp_bind_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_legacy_agp_unbind_ioctl, - DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), -#endif - - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - - DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, DRM_UNLOCKED), - - DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_legacy_modeset_ctl_ioctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), @@ -675,6 +629,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CLOSEFB, drm_mode_closefb_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, 0), @@ -774,7 +729,7 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata, { struct drm_file *file_priv = file->private_data; struct drm_device *dev = file_priv->minor->dev; - int retcode; + int ret; /* Update drm_file owner if fd was passed along. */ drm_file_update_pid(file_priv); @@ -782,20 +737,11 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata, if (drm_dev_is_unplugged(dev)) return -ENODEV; - retcode = drm_ioctl_permit(flags, file_priv); - if (unlikely(retcode)) - return retcode; - - /* Enforce sane locking for modern driver ioctls. */ - if (likely(!drm_core_check_feature(dev, DRIVER_LEGACY)) || - (flags & DRM_UNLOCKED)) - retcode = func(dev, kdata, file_priv); - else { - mutex_lock(&drm_global_mutex); - retcode = func(dev, kdata, file_priv); - mutex_unlock(&drm_global_mutex); - } - return retcode; + ret = drm_ioctl_permit(flags, file_priv); + if (unlikely(ret)) + return ret; + + return func(dev, kdata, file_priv); } EXPORT_SYMBOL(drm_ioctl_kernel); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c deleted file mode 100644 index d327638e15..0000000000 --- a/drivers/gpu/drm/drm_irq.c +++ /dev/null @@ -1,204 +0,0 @@ -/* - * drm_irq.c IRQ and vblank support - * - * \author Rickard E. (Rik) Faith - * \author Gareth Hughes - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com - * - * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - - -#include -#include /* For task queue support */ -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "drm_internal.h" - -static int drm_legacy_irq_install(struct drm_device *dev, int irq) -{ - int ret; - unsigned long sh_flags = 0; - - if (irq == 0) - return -EINVAL; - - if (dev->irq_enabled) - return -EBUSY; - dev->irq_enabled = true; - - DRM_DEBUG("irq=%d\n", irq); - - /* Before installing handler */ - if (dev->driver->irq_preinstall) - dev->driver->irq_preinstall(dev); - - /* PCI devices require shared interrupts. */ - if (dev_is_pci(dev->dev)) - sh_flags = IRQF_SHARED; - - ret = request_irq(irq, dev->driver->irq_handler, - sh_flags, dev->driver->name, dev); - - if (ret < 0) { - dev->irq_enabled = false; - return ret; - } - - /* After installing handler */ - if (dev->driver->irq_postinstall) - ret = dev->driver->irq_postinstall(dev); - - if (ret < 0) { - dev->irq_enabled = false; - if (drm_core_check_feature(dev, DRIVER_LEGACY)) - vga_client_unregister(to_pci_dev(dev->dev)); - free_irq(irq, dev); - } else { - dev->irq = irq; - } - - return ret; -} - -int drm_legacy_irq_uninstall(struct drm_device *dev) -{ - unsigned long irqflags; - bool irq_enabled; - int i; - - irq_enabled = dev->irq_enabled; - dev->irq_enabled = false; - - /* - * Wake up any waiters so they don't hang. This is just to paper over - * issues for UMS drivers which aren't in full control of their - * vblank/irq handling. KMS drivers must ensure that vblanks are all - * disabled when uninstalling the irq handler. - */ - if (drm_dev_has_vblank(dev)) { - spin_lock_irqsave(&dev->vbl_lock, irqflags); - for (i = 0; i < dev->num_crtcs; i++) { - struct drm_vblank_crtc *vblank = &dev->vblank[i]; - - if (!vblank->enabled) - continue; - - WARN_ON(drm_core_check_feature(dev, DRIVER_MODESET)); - - drm_vblank_disable_and_save(dev, i); - wake_up(&vblank->queue); - } - spin_unlock_irqrestore(&dev->vbl_lock, irqflags); - } - - if (!irq_enabled) - return -EINVAL; - - DRM_DEBUG("irq=%d\n", dev->irq); - - if (drm_core_check_feature(dev, DRIVER_LEGACY)) - vga_client_unregister(to_pci_dev(dev->dev)); - - if (dev->driver->irq_uninstall) - dev->driver->irq_uninstall(dev); - - free_irq(dev->irq, dev); - - return 0; -} -EXPORT_SYMBOL(drm_legacy_irq_uninstall); - -int drm_legacy_irq_control(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_control *ctl = data; - int ret = 0, irq; - struct pci_dev *pdev; - - /* if we haven't irq we fallback for compatibility reasons - - * this used to be a separate function in drm_dma.h - */ - - if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) - return 0; - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return 0; - /* UMS was only ever supported on pci devices. */ - if (WARN_ON(!dev_is_pci(dev->dev))) - return -EINVAL; - - switch (ctl->func) { - case DRM_INST_HANDLER: - pdev = to_pci_dev(dev->dev); - irq = pdev->irq; - - if (dev->if_version < DRM_IF_VERSION(1, 2) && - ctl->irq != irq) - return -EINVAL; - mutex_lock(&dev->struct_mutex); - ret = drm_legacy_irq_install(dev, irq); - mutex_unlock(&dev->struct_mutex); - - return ret; - case DRM_UNINST_HANDLER: - mutex_lock(&dev->struct_mutex); - ret = drm_legacy_irq_uninstall(dev); - mutex_unlock(&dev->struct_mutex); - - return ret; - default: - return -EINVAL; - } -} diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c index 0bf0fc1abf..0c7550c046 100644 --- a/drivers/gpu/drm/drm_kms_helper_common.c +++ b/drivers/gpu/drm/drm_kms_helper_common.c @@ -27,38 +27,6 @@ #include -#include -#include - -#include "drm_crtc_helper_internal.h" - MODULE_AUTHOR("David Airlie, Jesse Barnes"); MODULE_DESCRIPTION("DRM KMS helper"); MODULE_LICENSE("GPL and additional rights"); - -#if IS_ENABLED(CONFIG_DRM_LOAD_EDID_FIRMWARE) - -/* Backward compatibility for drm_kms_helper.edid_firmware */ -static int edid_firmware_set(const char *val, const struct kernel_param *kp) -{ - DRM_NOTE("drm_kms_helper.edid_firmware is deprecated, please use drm.edid_firmware instead.\n"); - - return __drm_set_edid_firmware_path(val); -} - -static int edid_firmware_get(char *buffer, const struct kernel_param *kp) -{ - return __drm_get_edid_firmware_path(buffer, PAGE_SIZE); -} - -static const struct kernel_param_ops edid_firmware_ops = { - .set = edid_firmware_set, - .get = edid_firmware_get, -}; - -module_param_cb(edid_firmware, &edid_firmware_ops, NULL, 0644); -__MODULE_PARM_TYPE(edid_firmware, "charp"); -MODULE_PARM_DESC(edid_firmware, - "DEPRECATED. Use drm.edid_firmware module parameter instead."); - -#endif diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h deleted file mode 100644 index 70c9dba114..0000000000 --- a/drivers/gpu/drm/drm_legacy.h +++ /dev/null @@ -1,290 +0,0 @@ -#ifndef __DRM_LEGACY_H__ -#define __DRM_LEGACY_H__ - -/* - * Copyright (c) 2014 David Herrmann - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * This file contains legacy interfaces that modern drm drivers - * should no longer be using. They cannot be removed as legacy - * drivers use them, and removing them are API breaks. - */ -#include - -#include -#include -#include - -struct agp_memory; -struct drm_buf_desc; -struct drm_device; -struct drm_file; -struct drm_hash_item; -struct drm_open_hash; - -/* - * Hash-table Support - */ - -#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) - -/* drm_hashtab.c */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) -int drm_ht_create(struct drm_open_hash *ht, unsigned int order); -int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item); -int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, - unsigned long seed, int bits, int shift, - unsigned long add); -int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item); - -void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key); -int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); -int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); -void drm_ht_remove(struct drm_open_hash *ht); -#endif - -/* - * RCU-safe interface - * - * The user of this API needs to make sure that two or more instances of the - * hash table manipulation functions are never run simultaneously. - * The lookup function drm_ht_find_item_rcu may, however, run simultaneously - * with any of the manipulation functions as long as it's called from within - * an RCU read-locked section. - */ -#define drm_ht_insert_item_rcu drm_ht_insert_item -#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please -#define drm_ht_remove_key_rcu drm_ht_remove_key -#define drm_ht_remove_item_rcu drm_ht_remove_item -#define drm_ht_find_item_rcu drm_ht_find_item - -/* - * Generic DRM Contexts - */ - -#define DRM_KERNEL_CONTEXT 0 -#define DRM_RESERVED_CONTEXTS 1 - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_ctxbitmap_init(struct drm_device *dev); -void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev); -void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file); -#else -static inline void drm_legacy_ctxbitmap_init(struct drm_device *dev) {} -static inline void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev) {} -static inline void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file) {} -#endif - -void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle); - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -int drm_legacy_resctx(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_addctx(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_getctx(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_switchctx(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_newctx(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f); - -int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f); -#endif - -/* - * Generic Buffer Management - */ - -#define DRM_MAP_HASH_OFFSET 0x10000000 - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -static inline int drm_legacy_create_map_hash(struct drm_device *dev) -{ - return drm_ht_create(&dev->map_hash, 12); -} - -static inline void drm_legacy_remove_map_hash(struct drm_device *dev) -{ - drm_ht_remove(&dev->map_hash); -} -#else -static inline int drm_legacy_create_map_hash(struct drm_device *dev) -{ - return 0; -} - -static inline void drm_legacy_remove_map_hash(struct drm_device *dev) {} -#endif - - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f); - -int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_infobufs(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_markbufs(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_freebufs(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f); -#endif - -int __drm_legacy_infobufs(struct drm_device *, void *, int *, - int (*)(void *, int, struct drm_buf_entry *)); -int __drm_legacy_mapbufs(struct drm_device *, void *, int *, - void __user **, - int (*)(void *, int, unsigned long, struct drm_buf *), - struct drm_file *); - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_master_rmmaps(struct drm_device *dev, - struct drm_master *master); -void drm_legacy_rmmaps(struct drm_device *dev); -#else -static inline void drm_legacy_master_rmmaps(struct drm_device *dev, - struct drm_master *master) {} -static inline void drm_legacy_rmmaps(struct drm_device *dev) {} -#endif - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_vma_flush(struct drm_device *d); -#else -static inline void drm_legacy_vma_flush(struct drm_device *d) -{ - /* do nothing */ -} -#endif - -/* - * AGP Support - */ - -struct drm_agp_mem { - unsigned long handle; - struct agp_memory *memory; - unsigned long bound; - int pages; - struct list_head head; -}; - -/* drm_agpsupport.c */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP) -void drm_legacy_agp_clear(struct drm_device *dev); - -int drm_legacy_agp_acquire_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_agp_release_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_agp_enable_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_agp_info_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_agp_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_agp_free_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_agp_unbind_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_agp_bind_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -#else -static inline void drm_legacy_agp_clear(struct drm_device *dev) {} -#endif - -/* drm_lock.c */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) -int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f); -int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f); -void drm_legacy_lock_release(struct drm_device *dev, struct file *filp); -#else -static inline void drm_legacy_lock_release(struct drm_device *dev, struct file *filp) {} -#endif - -/* DMA support */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) -int drm_legacy_dma_setup(struct drm_device *dev); -void drm_legacy_dma_takedown(struct drm_device *dev); -#else -static inline int drm_legacy_dma_setup(struct drm_device *dev) -{ - return 0; -} -#endif - -void drm_legacy_free_buffer(struct drm_device *dev, - struct drm_buf * buf); -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_reclaim_buffers(struct drm_device *dev, - struct drm_file *filp); -#else -static inline void drm_legacy_reclaim_buffers(struct drm_device *dev, - struct drm_file *filp) {} -#endif - -/* Scatter Gather Support */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_sg_cleanup(struct drm_device *dev); -int drm_legacy_sg_alloc(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_legacy_sg_free(struct drm_device *dev, void *data, - struct drm_file *file_priv); -#endif - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_init_members(struct drm_device *dev); -void drm_legacy_destroy_members(struct drm_device *dev); -void drm_legacy_dev_reinit(struct drm_device *dev); -int drm_legacy_setup(struct drm_device * dev); -#else -static inline void drm_legacy_init_members(struct drm_device *dev) {} -static inline void drm_legacy_destroy_members(struct drm_device *dev) {} -static inline void drm_legacy_dev_reinit(struct drm_device *dev) {} -static inline int drm_legacy_setup(struct drm_device * dev) { return 0; } -#endif - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master); -#else -static inline void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master) {} -#endif - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_master_legacy_init(struct drm_master *master); -#else -static inline void drm_master_legacy_init(struct drm_master *master) {} -#endif - -/* drm_pci.c */ -#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_PCI) -int drm_legacy_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv); -void drm_legacy_pci_agp_destroy(struct drm_device *dev); -#else -static inline int drm_legacy_irq_by_busid(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - return -EINVAL; -} - -static inline void drm_legacy_pci_agp_destroy(struct drm_device *dev) {} -#endif - -#endif /* __DRM_LEGACY_H__ */ diff --git a/drivers/gpu/drm/drm_legacy_misc.c b/drivers/gpu/drm/drm_legacy_misc.c deleted file mode 100644 index d4c5434062..0000000000 --- a/drivers/gpu/drm/drm_legacy_misc.c +++ /dev/null @@ -1,105 +0,0 @@ -/* - * \file drm_legacy_misc.c - * Misc legacy support functions. - * - * \author Rickard E. (Rik) Faith - * \author Gareth Hughes - */ - -/* - * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com - * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include -#include - -#include "drm_internal.h" -#include "drm_legacy.h" - -void drm_legacy_init_members(struct drm_device *dev) -{ - INIT_LIST_HEAD(&dev->ctxlist); - INIT_LIST_HEAD(&dev->vmalist); - INIT_LIST_HEAD(&dev->maplist); - spin_lock_init(&dev->buf_lock); - mutex_init(&dev->ctxlist_mutex); -} - -void drm_legacy_destroy_members(struct drm_device *dev) -{ - mutex_destroy(&dev->ctxlist_mutex); -} - -int drm_legacy_setup(struct drm_device * dev) -{ - int ret; - - if (dev->driver->firstopen && - drm_core_check_feature(dev, DRIVER_LEGACY)) { - ret = dev->driver->firstopen(dev); - if (ret != 0) - return ret; - } - - ret = drm_legacy_dma_setup(dev); - if (ret < 0) - return ret; - - - DRM_DEBUG("\n"); - return 0; -} - -void drm_legacy_dev_reinit(struct drm_device *dev) -{ - if (dev->irq_enabled) - drm_legacy_irq_uninstall(dev); - - mutex_lock(&dev->struct_mutex); - - drm_legacy_agp_clear(dev); - - drm_legacy_sg_cleanup(dev); - drm_legacy_vma_flush(dev); - drm_legacy_dma_takedown(dev); - - mutex_unlock(&dev->struct_mutex); - - dev->sigdata.lock = NULL; - - dev->context_flag = 0; - dev->last_context = 0; - dev->if_version = 0; - - DRM_DEBUG("lastclose completed\n"); -} - -void drm_master_legacy_init(struct drm_master *master) -{ - spin_lock_init(&master->lock.spinlock); - init_waitqueue_head(&master->lock.lock_queue); -} diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c deleted file mode 100644 index 1efbd5389d..0000000000 --- a/drivers/gpu/drm/drm_lock.c +++ /dev/null @@ -1,373 +0,0 @@ -/* - * \file drm_lock.c - * IOCTLs for locking - * - * \author Rickard E. (Rik) Faith - * \author Gareth Hughes - */ - -/* - * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com - * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include - -#include -#include -#include -#include - -#include "drm_internal.h" -#include "drm_legacy.h" - -static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); - -/* - * Take the heavyweight lock. - * - * \param lock lock pointer. - * \param context locking context. - * \return one if the lock is held, or zero otherwise. - * - * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. - */ -static -int drm_lock_take(struct drm_lock_data *lock_data, - unsigned int context) -{ - unsigned int old, new, prev; - volatile unsigned int *lock = &lock_data->hw_lock->lock; - - spin_lock_bh(&lock_data->spinlock); - do { - old = *lock; - if (old & _DRM_LOCK_HELD) - new = old | _DRM_LOCK_CONT; - else { - new = context | _DRM_LOCK_HELD | - ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? - _DRM_LOCK_CONT : 0); - } - prev = cmpxchg(lock, old, new); - } while (prev != old); - spin_unlock_bh(&lock_data->spinlock); - - if (_DRM_LOCKING_CONTEXT(old) == context) { - if (old & _DRM_LOCK_HELD) { - if (context != DRM_KERNEL_CONTEXT) { - DRM_ERROR("%d holds heavyweight lock\n", - context); - } - return 0; - } - } - - if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) { - /* Have lock */ - return 1; - } - return 0; -} - -/* - * This takes a lock forcibly and hands it to context. Should ONLY be used - * inside *_unlock to give lock to kernel before calling *_dma_schedule. - * - * \param dev DRM device. - * \param lock lock pointer. - * \param context locking context. - * \return always one. - * - * Resets the lock file pointer. - * Marks the lock as held by the given context, via the \p cmpxchg instruction. - */ -static int drm_lock_transfer(struct drm_lock_data *lock_data, - unsigned int context) -{ - unsigned int old, new, prev; - volatile unsigned int *lock = &lock_data->hw_lock->lock; - - lock_data->file_priv = NULL; - do { - old = *lock; - new = context | _DRM_LOCK_HELD; - prev = cmpxchg(lock, old, new); - } while (prev != old); - return 1; -} - -static int drm_legacy_lock_free(struct drm_lock_data *lock_data, - unsigned int context) -{ - unsigned int old, new, prev; - volatile unsigned int *lock = &lock_data->hw_lock->lock; - - spin_lock_bh(&lock_data->spinlock); - if (lock_data->kernel_waiters != 0) { - drm_lock_transfer(lock_data, 0); - lock_data->idle_has_lock = 1; - spin_unlock_bh(&lock_data->spinlock); - return 1; - } - spin_unlock_bh(&lock_data->spinlock); - - do { - old = *lock; - new = _DRM_LOCKING_CONTEXT(old); - prev = cmpxchg(lock, old, new); - } while (prev != old); - - if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { - DRM_ERROR("%d freed heavyweight lock held by %d\n", - context, _DRM_LOCKING_CONTEXT(old)); - return 1; - } - wake_up_interruptible(&lock_data->lock_queue); - return 0; -} - -/* - * Lock ioctl. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument, pointing to a drm_lock structure. - * \return zero on success or negative number on failure. - * - * Add the current task to the lock wait queue, and attempt to take to lock. - */ -int drm_legacy_lock(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - DECLARE_WAITQUEUE(entry, current); - struct drm_lock *lock = data; - struct drm_master *master = file_priv->master; - int ret = 0; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - ++file_priv->lock_count; - - if (lock->context == DRM_KERNEL_CONTEXT) { - DRM_ERROR("Process %d using kernel context %d\n", - task_pid_nr(current), lock->context); - return -EINVAL; - } - - DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", - lock->context, task_pid_nr(current), - master->lock.hw_lock ? master->lock.hw_lock->lock : -1, - lock->flags); - - add_wait_queue(&master->lock.lock_queue, &entry); - spin_lock_bh(&master->lock.spinlock); - master->lock.user_waiters++; - spin_unlock_bh(&master->lock.spinlock); - - for (;;) { - __set_current_state(TASK_INTERRUPTIBLE); - if (!master->lock.hw_lock) { - /* Device has been unregistered */ - send_sig(SIGTERM, current, 0); - ret = -EINTR; - break; - } - if (drm_lock_take(&master->lock, lock->context)) { - master->lock.file_priv = file_priv; - master->lock.lock_time = jiffies; - break; /* Got lock */ - } - - /* Contention */ - mutex_unlock(&drm_global_mutex); - schedule(); - mutex_lock(&drm_global_mutex); - if (signal_pending(current)) { - ret = -EINTR; - break; - } - } - spin_lock_bh(&master->lock.spinlock); - master->lock.user_waiters--; - spin_unlock_bh(&master->lock.spinlock); - __set_current_state(TASK_RUNNING); - remove_wait_queue(&master->lock.lock_queue, &entry); - - DRM_DEBUG("%d %s\n", lock->context, - ret ? "interrupted" : "has lock"); - if (ret) return ret; - - /* don't set the block all signals on the master process for now - * really probably not the correct answer but lets us debug xkb - * xserver for now */ - if (!drm_is_current_master(file_priv)) { - dev->sigdata.context = lock->context; - dev->sigdata.lock = master->lock.hw_lock; - } - - if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT)) - { - if (dev->driver->dma_quiescent(dev)) { - DRM_DEBUG("%d waiting for DMA quiescent\n", - lock->context); - return -EBUSY; - } - } - - return 0; -} - -/* - * Unlock ioctl. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument, pointing to a drm_lock structure. - * \return zero on success or negative number on failure. - * - * Transfer and free the lock. - */ -int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_lock *lock = data; - struct drm_master *master = file_priv->master; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (lock->context == DRM_KERNEL_CONTEXT) { - DRM_ERROR("Process %d using kernel context %d\n", - task_pid_nr(current), lock->context); - return -EINVAL; - } - - if (drm_legacy_lock_free(&master->lock, lock->context)) { - /* FIXME: Should really bail out here. */ - } - - return 0; -} - -/* - * This function returns immediately and takes the hw lock - * with the kernel context if it is free, otherwise it gets the highest priority when and if - * it is eventually released. - * - * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held - * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause - * a deadlock, which is why the "idlelock" was invented). - * - * This should be sufficient to wait for GPU idle without - * having to worry about starvation. - */ -void drm_legacy_idlelock_take(struct drm_lock_data *lock_data) -{ - int ret; - - spin_lock_bh(&lock_data->spinlock); - lock_data->kernel_waiters++; - if (!lock_data->idle_has_lock) { - - spin_unlock_bh(&lock_data->spinlock); - ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT); - spin_lock_bh(&lock_data->spinlock); - - if (ret == 1) - lock_data->idle_has_lock = 1; - } - spin_unlock_bh(&lock_data->spinlock); -} -EXPORT_SYMBOL(drm_legacy_idlelock_take); - -void drm_legacy_idlelock_release(struct drm_lock_data *lock_data) -{ - unsigned int old, prev; - volatile unsigned int *lock = &lock_data->hw_lock->lock; - - spin_lock_bh(&lock_data->spinlock); - if (--lock_data->kernel_waiters == 0) { - if (lock_data->idle_has_lock) { - do { - old = *lock; - prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT); - } while (prev != old); - wake_up_interruptible(&lock_data->lock_queue); - lock_data->idle_has_lock = 0; - } - } - spin_unlock_bh(&lock_data->spinlock); -} -EXPORT_SYMBOL(drm_legacy_idlelock_release); - -static int drm_legacy_i_have_hw_lock(struct drm_device *dev, - struct drm_file *file_priv) -{ - struct drm_master *master = file_priv->master; - - return (file_priv->lock_count && master->lock.hw_lock && - _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) && - master->lock.file_priv == file_priv); -} - -void drm_legacy_lock_release(struct drm_device *dev, struct file *filp) -{ - struct drm_file *file_priv = filp->private_data; - - /* if the master has gone away we can't do anything with the lock */ - if (!dev->master) - return; - - if (drm_legacy_i_have_hw_lock(dev, file_priv)) { - DRM_DEBUG("File %p released, freeing lock for context %d\n", - filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); - drm_legacy_lock_free(&file_priv->master->lock, - _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock)); - } -} - -void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master) -{ - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return; - - /* - * Since the master is disappearing, so is the - * possibility to lock. - */ - mutex_lock(&dev->struct_mutex); - if (master->lock.hw_lock) { - if (dev->sigdata.lock == master->lock.hw_lock) - dev->sigdata.lock = NULL; - master->lock.hw_lock = NULL; - master->lock.file_priv = NULL; - wake_up_interruptible_all(&master->lock.lock_queue); - } - mutex_unlock(&dev->struct_mutex); -} diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c deleted file mode 100644 index d2e1dccd81..0000000000 --- a/drivers/gpu/drm/drm_memory.c +++ /dev/null @@ -1,138 +0,0 @@ -/* - * \file drm_memory.c - * Memory management wrappers for DRM - * - * \author Rickard E. (Rik) Faith - * \author Gareth Hughes - */ - -/* - * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com - * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include -#include -#include - -#include -#include - -#include "drm_legacy.h" - -#if IS_ENABLED(CONFIG_AGP) - -#ifdef HAVE_PAGE_AGP -# include -#else -# ifdef __powerpc__ -# define PAGE_AGP pgprot_noncached_wc(PAGE_KERNEL) -# else -# define PAGE_AGP PAGE_KERNEL -# endif -#endif - -static void *agp_remap(unsigned long offset, unsigned long size, - struct drm_device *dev) -{ - unsigned long i, num_pages = - PAGE_ALIGN(size) / PAGE_SIZE; - struct drm_agp_mem *agpmem; - struct page **page_map; - struct page **phys_page_map; - void *addr; - - size = PAGE_ALIGN(size); - -#ifdef __alpha__ - offset -= dev->hose->mem_space->start; -#endif - - list_for_each_entry(agpmem, &dev->agp->memory, head) - if (agpmem->bound <= offset - && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= - (offset + size)) - break; - if (&agpmem->head == &dev->agp->memory) - return NULL; - - /* - * OK, we're mapping AGP space on a chipset/platform on which memory accesses by - * the CPU do not get remapped by the GART. We fix this by using the kernel's - * page-table instead (that's probably faster anyhow...). - */ - /* note: use vmalloc() because num_pages could be large... */ - page_map = vmalloc(array_size(num_pages, sizeof(struct page *))); - if (!page_map) - return NULL; - - phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE); - for (i = 0; i < num_pages; ++i) - page_map[i] = phys_page_map[i]; - addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); - vfree(page_map); - - return addr; -} - -#else /* CONFIG_AGP */ -static inline void *agp_remap(unsigned long offset, unsigned long size, - struct drm_device *dev) -{ - return NULL; -} - -#endif /* CONFIG_AGP */ - -void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev) -{ - if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) - map->handle = agp_remap(map->offset, map->size, dev); - else - map->handle = ioremap(map->offset, map->size); -} -EXPORT_SYMBOL(drm_legacy_ioremap); - -void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev) -{ - if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) - map->handle = agp_remap(map->offset, map->size, dev); - else - map->handle = ioremap_wc(map->offset, map->size); -} -EXPORT_SYMBOL(drm_legacy_ioremap_wc); - -void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev) -{ - if (!map->handle || !map->size) - return; - - if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) - vunmap(map->handle); - else - iounmap(map->handle); -} -EXPORT_SYMBOL(drm_legacy_ioremapfree); diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index e90f0bf895..daac649aab 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -197,12 +197,14 @@ EXPORT_SYMBOL(mipi_dbi_command_stackbuf); * @fb: The source framebuffer * @clip: Clipping rectangle of the area to be copied * @swap: When true, swap MSB/LSB of 16-bit values + * @fmtcnv_state: Format-conversion state * * Returns: * Zero on success, negative error code on failure. */ int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb, - struct drm_rect *clip, bool swap) + struct drm_rect *clip, bool swap, + struct drm_format_conv_state *fmtcnv_state) { struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0); struct iosys_map dst_map = IOSYS_MAP_INIT_VADDR(dst); @@ -215,12 +217,13 @@ int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer * switch (fb->format->format) { case DRM_FORMAT_RGB565: if (swap) - drm_fb_swab(&dst_map, NULL, src, fb, clip, !gem->import_attach); + drm_fb_swab(&dst_map, NULL, src, fb, clip, !gem->import_attach, + fmtcnv_state); else drm_fb_memcpy(&dst_map, NULL, src, fb, clip); break; case DRM_FORMAT_XRGB8888: - drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, src, fb, clip, swap); + drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, src, fb, clip, fmtcnv_state, swap); break; default: drm_err_once(fb->dev, "Format is not supported: %p4cc\n", @@ -252,7 +255,7 @@ static void mipi_dbi_set_window_address(struct mipi_dbi_dev *dbidev, } static void mipi_dbi_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb, - struct drm_rect *rect) + struct drm_rect *rect, struct drm_format_conv_state *fmtcnv_state) { struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev); unsigned int height = rect->y2 - rect->y1; @@ -270,7 +273,7 @@ static void mipi_dbi_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb, if (!dbi->dc || !full || swap || fb->format->format == DRM_FORMAT_XRGB8888) { tr = dbidev->tx_buf; - ret = mipi_dbi_buf_copy(tr, src, fb, rect, swap); + ret = mipi_dbi_buf_copy(tr, src, fb, rect, swap, fmtcnv_state); if (ret) goto err_msg; } else { @@ -332,7 +335,8 @@ void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe, return; if (drm_atomic_helper_damage_merged(old_state, state, &rect)) - mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect); + mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect, + &shadow_plane_state->fmtcnv_state); drm_dev_exit(idx); } @@ -368,7 +372,8 @@ void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev, if (!drm_dev_enter(&dbidev->drm, &idx)) return; - mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect); + mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect, + &shadow_plane_state->fmtcnv_state); backlight_enable(dbidev->backlight); drm_dev_exit(idx); diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c index ac0d2ce3f8..0e8355063e 100644 --- a/drivers/gpu/drm/drm_mode_object.c +++ b/drivers/gpu/drm/drm_mode_object.c @@ -538,7 +538,7 @@ retry: obj_to_connector(obj), prop_value); } else { - ret = drm_atomic_set_property(state, file_priv, obj, prop, prop_value); + ret = drm_atomic_set_property(state, file_priv, obj, prop, prop_value, false); if (ret) goto out; ret = drm_atomic_commit(state); diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index ac9a406250..893f52ee49 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -2617,8 +2617,7 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out, break; } - strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN); - out->name[DRM_DISPLAY_MODE_LEN-1] = 0; + strscpy_pad(out->name, in->name, sizeof(out->name)); } /** @@ -2659,8 +2658,7 @@ int drm_mode_convert_umode(struct drm_device *dev, * useful for the kernel->userspace direction anyway. */ out->type = in->type & DRM_MODE_TYPE_ALL; - strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN); - out->name[DRM_DISPLAY_MODE_LEN-1] = 0; + strscpy_pad(out->name, in->name, sizeof(out->name)); /* Clearing picture aspect ratio bits from out flags, * as the aspect-ratio information is not stored in diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c index f858dfedf2..2c582020cb 100644 --- a/drivers/gpu/drm/drm_modeset_helper.c +++ b/drivers/gpu/drm/drm_modeset_helper.c @@ -193,13 +193,22 @@ int drm_mode_config_helper_suspend(struct drm_device *dev) if (!dev) return 0; + /* + * Don't disable polling if it was never initialized + */ + if (dev->mode_config.poll_enabled) + drm_kms_helper_poll_disable(dev); - drm_kms_helper_poll_disable(dev); drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); state = drm_atomic_helper_suspend(dev); if (IS_ERR(state)) { drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); - drm_kms_helper_poll_enable(dev); + /* + * Don't enable polling if it was never initialized + */ + if (dev->mode_config.poll_enabled) + drm_kms_helper_poll_enable(dev); + return PTR_ERR(state); } @@ -239,7 +248,11 @@ int drm_mode_config_helper_resume(struct drm_device *dev) dev->mode_config.suspend_state = NULL; drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); - drm_kms_helper_poll_enable(dev); + /* + * Don't enable polling if it is not initialized + */ + if (dev->mode_config.poll_enabled) + drm_kms_helper_poll_enable(dev); return ret; } diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 3d92f66e55..aa93129c33 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -117,6 +117,12 @@ static const struct drm_dmi_panel_orientation_data lcd1080x1920_leftside_up = { .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP, }; +static const struct drm_dmi_panel_orientation_data lcd1080x1920_rightside_up = { + .width = 1080, + .height = 1920, + .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, +}; + static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = { .width = 1200, .height = 1920, @@ -279,6 +285,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1618-03") }, .driver_data = (void *)&lcd720x1280_rightside_up, + }, { /* GPD Win Mini */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1617-01") + }, + .driver_data = (void *)&lcd1080x1920_rightside_up, }, { /* I.T.Works TW891 */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."), diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 39d35fc3a4..c585f1e880 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -29,18 +29,12 @@ #include #include +#include #include #include #include #include "drm_internal.h" -#include "drm_legacy.h" - -#ifdef CONFIG_DRM_LEGACY -/* List of devices hanging off drivers with stealth attach. */ -static LIST_HEAD(legacy_dev_list); -static DEFINE_MUTEX(legacy_dev_list_lock); -#endif static int drm_get_pci_domain(struct drm_device *dev) { @@ -71,199 +65,3 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master) master->unique_len = strlen(master->unique); return 0; } - -#ifdef CONFIG_DRM_LEGACY - -static int drm_legacy_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) -{ - struct pci_dev *pdev = to_pci_dev(dev->dev); - - if ((p->busnum >> 8) != drm_get_pci_domain(dev) || - (p->busnum & 0xff) != pdev->bus->number || - p->devnum != PCI_SLOT(pdev->devfn) || p->funcnum != PCI_FUNC(pdev->devfn)) - return -EINVAL; - - p->irq = pdev->irq; - - DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, - p->irq); - return 0; -} - -/** - * drm_legacy_irq_by_busid - Get interrupt from bus ID - * @dev: DRM device - * @data: IOCTL parameter pointing to a drm_irq_busid structure - * @file_priv: DRM file private. - * - * Finds the PCI device with the specified bus id and gets its IRQ number. - * This IOCTL is deprecated, and will now return EINVAL for any busid not equal - * to that of the device that this DRM instance attached to. - * - * Return: 0 on success or a negative error code on failure. - */ -int drm_legacy_irq_by_busid(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_irq_busid *p = data; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - /* UMS was only ever support on PCI devices. */ - if (WARN_ON(!dev_is_pci(dev->dev))) - return -EINVAL; - - if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) - return -EOPNOTSUPP; - - return drm_legacy_pci_irq_by_busid(dev, p); -} - -void drm_legacy_pci_agp_destroy(struct drm_device *dev) -{ - if (dev->agp) { - arch_phys_wc_del(dev->agp->agp_mtrr); - drm_legacy_agp_clear(dev); - kfree(dev->agp); - dev->agp = NULL; - } -} - -static void drm_legacy_pci_agp_init(struct drm_device *dev) -{ - if (drm_core_check_feature(dev, DRIVER_USE_AGP)) { - if (pci_find_capability(to_pci_dev(dev->dev), PCI_CAP_ID_AGP)) - dev->agp = drm_legacy_agp_init(dev); - if (dev->agp) { - dev->agp->agp_mtrr = arch_phys_wc_add( - dev->agp->agp_info.aper_base, - dev->agp->agp_info.aper_size * - 1024 * 1024); - } - } -} - -static int drm_legacy_get_pci_dev(struct pci_dev *pdev, - const struct pci_device_id *ent, - const struct drm_driver *driver) -{ - struct drm_device *dev; - int ret; - - DRM_DEBUG("\n"); - - dev = drm_dev_alloc(driver, &pdev->dev); - if (IS_ERR(dev)) - return PTR_ERR(dev); - - ret = pci_enable_device(pdev); - if (ret) - goto err_free; - -#ifdef __alpha__ - dev->hose = pdev->sysdata; -#endif - - drm_legacy_pci_agp_init(dev); - - ret = drm_dev_register(dev, ent->driver_data); - if (ret) - goto err_agp; - - if (drm_core_check_feature(dev, DRIVER_LEGACY)) { - mutex_lock(&legacy_dev_list_lock); - list_add_tail(&dev->legacy_dev_list, &legacy_dev_list); - mutex_unlock(&legacy_dev_list_lock); - } - - return 0; - -err_agp: - drm_legacy_pci_agp_destroy(dev); - pci_disable_device(pdev); -err_free: - drm_dev_put(dev); - return ret; -} - -/** - * drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver - * @driver: DRM device driver - * @pdriver: PCI device driver - * - * This is only used by legacy dri1 drivers and deprecated. - * - * Return: 0 on success or a negative error code on failure. - */ -int drm_legacy_pci_init(const struct drm_driver *driver, - struct pci_driver *pdriver) -{ - struct pci_dev *pdev = NULL; - const struct pci_device_id *pid; - int i; - - DRM_DEBUG("\n"); - - if (WARN_ON(!(driver->driver_features & DRIVER_LEGACY))) - return -EINVAL; - - /* If not using KMS, fall back to stealth mode manual scanning. */ - for (i = 0; pdriver->id_table[i].vendor != 0; i++) { - pid = &pdriver->id_table[i]; - - /* Loop around setting up a DRM device for each PCI device - * matching our ID and device class. If we had the internal - * function that pci_get_subsys and pci_get_class used, we'd - * be able to just pass pid in instead of doing a two-stage - * thing. - */ - pdev = NULL; - while ((pdev = - pci_get_subsys(pid->vendor, pid->device, pid->subvendor, - pid->subdevice, pdev)) != NULL) { - if ((pdev->class & pid->class_mask) != pid->class) - continue; - - /* stealth mode requires a manual probe */ - pci_dev_get(pdev); - drm_legacy_get_pci_dev(pdev, pid, driver); - } - } - return 0; -} -EXPORT_SYMBOL(drm_legacy_pci_init); - -/** - * drm_legacy_pci_exit - unregister shadow-attach legacy DRM driver - * @driver: DRM device driver - * @pdriver: PCI device driver - * - * Unregister a DRM driver shadow-attached through drm_legacy_pci_init(). This - * is deprecated and only used by dri1 drivers. - */ -void drm_legacy_pci_exit(const struct drm_driver *driver, - struct pci_driver *pdriver) -{ - struct drm_device *dev, *tmp; - - DRM_DEBUG("\n"); - - if (!(driver->driver_features & DRIVER_LEGACY)) { - WARN_ON(1); - } else { - mutex_lock(&legacy_dev_list_lock); - list_for_each_entry_safe(dev, tmp, &legacy_dev_list, - legacy_dev_list) { - if (dev->driver == driver) { - list_del(&dev->legacy_dev_list); - drm_put_dev(dev); - } - } - mutex_unlock(&legacy_dev_list_lock); - } - DRM_INFO("Module unloaded\n"); -} -EXPORT_SYMBOL(drm_legacy_pci_exit); - -#endif diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index 311e179904..672c655c7a 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -230,6 +230,103 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane return 0; } +/** + * DOC: hotspot properties + * + * HOTSPOT_X: property to set mouse hotspot x offset. + * HOTSPOT_Y: property to set mouse hotspot y offset. + * + * When the plane is being used as a cursor image to display a mouse pointer, + * the "hotspot" is the offset within the cursor image where mouse events + * are expected to go. + * + * Positive values move the hotspot from the top-left corner of the cursor + * plane towards the right and bottom. + * + * Most display drivers do not need this information because the + * hotspot is not actually connected to anything visible on screen. + * However, this is necessary for display drivers like the para-virtualized + * drivers (eg qxl, vbox, virtio, vmwgfx), that are attached to a user console + * with a mouse pointer. Since these consoles are often being remoted over a + * network, they would otherwise have to wait to display the pointer movement to + * the user until a full network round-trip has occurred. New mouse events have + * to be sent from the user's console, over the network to the virtual input + * devices, forwarded to the desktop for processing, and then the cursor plane's + * position can be updated and sent back to the user's console over the network. + * Instead, with the hotspot information, the console can anticipate the new + * location, and draw the mouse cursor there before the confirmation comes in. + * To do that correctly, the user's console must be able predict how the + * desktop will process mouse events, which normally requires the desktop's + * mouse topology information, ie where each CRTC sits in the mouse coordinate + * space. This is typically sent to the para-virtualized drivers using some + * driver-specific method, and the driver then forwards it to the console by + * way of the virtual display device or hypervisor. + * + * The assumption is generally made that there is only one cursor plane being + * used this way at a time, and that the desktop is feeding all mouse devices + * into the same global pointer. Para-virtualized drivers that require this + * should only be exposing a single cursor plane, or find some other way + * to coordinate with a userspace desktop that supports multiple pointers. + * If the hotspot properties are set, the cursor plane is therefore assumed to be + * used only for displaying a mouse cursor image, and the position of the combined + * cursor plane + offset can therefore be used for coordinating with input from a + * mouse device. + * + * The cursor will then be drawn either at the location of the plane in the CRTC + * console, or as a free-floating cursor plane on the user's console + * corresponding to their desktop mouse position. + * + * DRM clients which would like to work correctly on drivers which expose + * hotspot properties should advertise DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT. + * Setting this property on drivers which do not special case + * cursor planes will return EOPNOTSUPP, which can be used by userspace to + * gauge requirements of the hardware/drivers they're running on. Advertising + * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT implies that the userspace client will be + * correctly setting the hotspot properties. + */ + +/** + * drm_plane_create_hotspot_properties - creates the mouse hotspot + * properties and attaches them to the given cursor plane + * + * @plane: drm cursor plane + * + * This function enables the mouse hotspot property on a given + * cursor plane. Look at the documentation for hotspot properties + * to get a better understanding for what they're used for. + * + * RETURNS: + * Zero for success or -errno + */ +static int drm_plane_create_hotspot_properties(struct drm_plane *plane) +{ + struct drm_property *prop_x; + struct drm_property *prop_y; + + drm_WARN_ON(plane->dev, + !drm_core_check_feature(plane->dev, + DRIVER_CURSOR_HOTSPOT)); + + prop_x = drm_property_create_signed_range(plane->dev, 0, "HOTSPOT_X", + INT_MIN, INT_MAX); + if (IS_ERR(prop_x)) + return PTR_ERR(prop_x); + + prop_y = drm_property_create_signed_range(plane->dev, 0, "HOTSPOT_Y", + INT_MIN, INT_MAX); + if (IS_ERR(prop_y)) { + drm_property_destroy(plane->dev, prop_x); + return PTR_ERR(prop_y); + } + + drm_object_attach_property(&plane->base, prop_x, 0); + drm_object_attach_property(&plane->base, prop_y, 0); + plane->hotspot_x_property = prop_x; + plane->hotspot_y_property = prop_y; + + return 0; +} + __printf(9, 0) static int __drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, @@ -348,6 +445,10 @@ static int __drm_universal_plane_init(struct drm_device *dev, drm_object_attach_property(&plane->base, config->prop_src_w, 0); drm_object_attach_property(&plane->base, config->prop_src_h, 0); } + if (drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT) && + type == DRM_PLANE_TYPE_CURSOR) { + drm_plane_create_hotspot_properties(plane); + } if (format_modifier_count) create_in_format_blob(dev, plane); @@ -1065,8 +1166,10 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc, return PTR_ERR(fb); } - fb->hot_x = req->hot_x; - fb->hot_y = req->hot_y; + if (plane->hotspot_x_property && plane->state) + plane->state->hotspot_x = req->hot_x; + if (plane->hotspot_y_property && plane->state) + plane->state->hotspot_y = req->hot_y; } else { fb = NULL; } @@ -1456,6 +1559,36 @@ out: * Drivers implementing damage can use drm_atomic_helper_damage_iter_init() and * drm_atomic_helper_damage_iter_next() helper iterator function to get damage * rectangles clipped to &drm_plane_state.src. + * + * Note that there are two types of damage handling: frame damage and buffer + * damage, the type of damage handling implemented depends on a driver's upload + * target. Drivers implementing a per-plane or per-CRTC upload target need to + * handle frame damage, while drivers implementing a per-buffer upload target + * need to handle buffer damage. + * + * The existing damage helpers only support the frame damage type, there is no + * buffer age support or similar damage accumulation algorithm implemented yet. + * + * Only drivers handling frame damage can use the mentioned damage helpers to + * iterate over the damaged regions. Drivers that handle buffer damage, must set + * &drm_plane_state.ignore_damage_clips for drm_atomic_helper_damage_iter_init() + * to know that damage clips should be ignored and return &drm_plane_state.src + * as the damage rectangle, to force a full plane update. + * + * Drivers with a per-buffer upload target could compare the &drm_plane_state.fb + * of the old and new plane states to determine if the framebuffer attached to a + * plane has changed or not since the last plane update. If &drm_plane_state.fb + * has changed, then &drm_plane_state.ignore_damage_clips must be set to true. + * + * That is because drivers with a per-plane upload target, expect the backing + * storage buffer to not change for a given plane. If the upload buffer changes + * between page flips, the new upload buffer has to be updated as a whole. This + * can be improved in the future if support for frame damage is added to the DRM + * damage helpers, similarly to how user-space already handle this case as it is + * explained in the following documents: + * + * https://registry.khronos.org/EGL/extensions/KHR/EGL_KHR_swap_buffers_with_damage.txt + * https://emersion.fr/blog/2019/intro-to-damage-tracking/ */ /** diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index 5e95089676..7982be4b03 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -279,35 +279,3 @@ void drm_plane_helper_destroy(struct drm_plane *plane) kfree(plane); } EXPORT_SYMBOL(drm_plane_helper_destroy); - -/** - * drm_plane_helper_atomic_check() - Helper to check plane atomic-state - * @plane: plane to check - * @state: atomic state object - * - * Provides a default plane-state check handler for planes whose atomic-state - * scale and positioning are not expected to change since the plane is always - * a fullscreen scanout buffer. - * - * This is often the case for the primary plane of simple framebuffers. See - * also drm_crtc_helper_atomic_check() for the respective CRTC-state check - * helper function. - * - * RETURNS: - * Zero on success, or an errno code otherwise. - */ -int drm_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) -{ - struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); - struct drm_crtc *new_crtc = new_plane_state->crtc; - struct drm_crtc_state *new_crtc_state = NULL; - - if (new_crtc) - new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc); - - return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state, - DRM_PLANE_NO_SCALING, - DRM_PLANE_NO_SCALING, - false, false); -} -EXPORT_SYMBOL(drm_plane_helper_atomic_check); diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 7352bde299..03bd3c7bd0 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -582,7 +582,12 @@ int drm_gem_map_attach(struct dma_buf *dma_buf, { struct drm_gem_object *obj = dma_buf->priv; - if (!obj->funcs->get_sg_table) + /* + * drm_gem_map_dma_buf() requires obj->get_sg_table(), but drivers + * that implement their own ->map_dma_buf() do not. + */ + if (dma_buf->ops->map_dma_buf == drm_gem_map_dma_buf && + !obj->funcs->get_sg_table) return -ENOSYS; return drm_gem_pin(obj); diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 15ed974bcb..36433fa681 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -293,14 +293,17 @@ static void reschedule_output_poll_work(struct drm_device *dev) * Drivers can call this helper from their device resume implementation. It is * not an error to call this even when output polling isn't enabled. * + * If device polling was never initialized before, this call will trigger a + * warning and return. + * * Note that calls to enable and disable polling must be strictly ordered, which * is automatically the case when they're only call from suspend/resume * callbacks. */ void drm_kms_helper_poll_enable(struct drm_device *dev) { - if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll || - dev->mode_config.poll_running) + if (drm_WARN_ON_ONCE(dev, !dev->mode_config.poll_enabled) || + !drm_kms_helper_poll || dev->mode_config.poll_running) return; if (drm_kms_helper_enable_hpd(dev) || @@ -626,8 +629,12 @@ retry: 0); } - /* Re-enable polling in case the global poll config changed. */ - drm_kms_helper_poll_enable(dev); + /* + * Re-enable polling in case the global poll config changed but polling + * is still initialized. + */ + if (dev->mode_config.poll_enabled) + drm_kms_helper_poll_enable(dev); if (connector->status == connector_status_disconnected) { DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", @@ -767,9 +774,11 @@ static void output_poll_execute(struct work_struct *work) changed = dev->mode_config.delayed_event; dev->mode_config.delayed_event = false; - if (!drm_kms_helper_poll && dev->mode_config.poll_running) { - drm_kms_helper_disable_hpd(dev); - dev->mode_config.poll_running = false; + if (!drm_kms_helper_poll) { + if (dev->mode_config.poll_running) { + drm_kms_helper_disable_hpd(dev); + dev->mode_config.poll_running = false; + } goto out; } @@ -878,12 +887,18 @@ EXPORT_SYMBOL(drm_kms_helper_is_poll_worker); * not an error to call this even when output polling isn't enabled or already * disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable(). * + * If however, the polling was never initialized, this call will trigger a + * warning and return + * * Note that calls to enable and disable polling must be strictly ordered, which * is automatically the case when they're only call from suspend/resume * callbacks. */ void drm_kms_helper_poll_disable(struct drm_device *dev) { + if (drm_WARN_ON(dev, !dev->mode_config.poll_enabled)) + return; + if (dev->mode_config.poll_running) drm_kms_helper_disable_hpd(dev); diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c index dfec479830..596272149a 100644 --- a/drivers/gpu/drm/drm_property.c +++ b/drivers/gpu/drm/drm_property.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include "drm_crtc_internal.h" @@ -751,6 +752,64 @@ bool drm_property_replace_blob(struct drm_property_blob **blob, } EXPORT_SYMBOL(drm_property_replace_blob); +/** + * drm_property_replace_blob_from_id - replace a blob property taking a reference + * @dev: DRM device + * @blob: a pointer to the member blob to be replaced + * @blob_id: the id of the new blob to replace with + * @expected_size: expected size of the blob property + * @expected_elem_size: expected size of an element in the blob property + * @replaced: if the blob was in fact replaced + * + * Look up the new blob from id, take its reference, check expected sizes of + * the blob and its element and replace the old blob by the new one. Advertise + * if the replacement operation was successful. + * + * Return: true if the blob was in fact replaced. -EINVAL if the new blob was + * not found or sizes don't match. + */ +int drm_property_replace_blob_from_id(struct drm_device *dev, + struct drm_property_blob **blob, + uint64_t blob_id, + ssize_t expected_size, + ssize_t expected_elem_size, + bool *replaced) +{ + struct drm_property_blob *new_blob = NULL; + + if (blob_id != 0) { + new_blob = drm_property_lookup_blob(dev, blob_id); + if (new_blob == NULL) { + drm_dbg_atomic(dev, + "cannot find blob ID %llu\n", blob_id); + return -EINVAL; + } + + if (expected_size > 0 && + new_blob->length != expected_size) { + drm_dbg_atomic(dev, + "[BLOB:%d] length %zu different from expected %zu\n", + new_blob->base.id, new_blob->length, expected_size); + drm_property_blob_put(new_blob); + return -EINVAL; + } + if (expected_elem_size > 0 && + new_blob->length % expected_elem_size != 0) { + drm_dbg_atomic(dev, + "[BLOB:%d] length %zu not divisible by element size %zu\n", + new_blob->base.id, new_blob->length, expected_elem_size); + drm_property_blob_put(new_blob); + return -EINVAL; + } + } + + *replaced |= drm_property_replace_blob(blob, new_blob); + drm_property_blob_put(new_blob); + + return 0; +} +EXPORT_SYMBOL(drm_property_replace_blob_from_id); + int drm_mode_getblob_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c deleted file mode 100644 index f4e6184d18..0000000000 --- a/drivers/gpu/drm/drm_scatter.c +++ /dev/null @@ -1,220 +0,0 @@ -/* - * \file drm_scatter.c - * IOCTLs to manage scatter/gather memory - * - * \author Gareth Hughes - */ - -/* - * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com - * - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#include -#include -#include - -#include -#include -#include - -#include "drm_legacy.h" - -#define DEBUG_SCATTER 0 - -static void drm_sg_cleanup(struct drm_sg_mem * entry) -{ - struct page *page; - int i; - - for (i = 0; i < entry->pages; i++) { - page = entry->pagelist[i]; - if (page) - ClearPageReserved(page); - } - - vfree(entry->virtual); - - kfree(entry->busaddr); - kfree(entry->pagelist); - kfree(entry); -} - -void drm_legacy_sg_cleanup(struct drm_device *dev) -{ - if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg && - drm_core_check_feature(dev, DRIVER_LEGACY)) { - drm_sg_cleanup(dev->sg); - dev->sg = NULL; - } -} -#ifdef _LP64 -# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1))) -#else -# define ScatterHandle(x) (unsigned int)(x) -#endif - -int drm_legacy_sg_alloc(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_scatter_gather *request = data; - struct drm_sg_mem *entry; - unsigned long pages, i, j; - - DRM_DEBUG("\n"); - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (!drm_core_check_feature(dev, DRIVER_SG)) - return -EOPNOTSUPP; - - if (request->size > SIZE_MAX - PAGE_SIZE) - return -EINVAL; - - if (dev->sg) - return -EINVAL; - - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; - - pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; - DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages); - - entry->pages = pages; - entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL); - if (!entry->pagelist) { - kfree(entry); - return -ENOMEM; - } - - entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL); - if (!entry->busaddr) { - kfree(entry->pagelist); - kfree(entry); - return -ENOMEM; - } - - entry->virtual = vmalloc_32(pages << PAGE_SHIFT); - if (!entry->virtual) { - kfree(entry->busaddr); - kfree(entry->pagelist); - kfree(entry); - return -ENOMEM; - } - - /* This also forces the mapping of COW pages, so our page list - * will be valid. Please don't remove it... - */ - memset(entry->virtual, 0, pages << PAGE_SHIFT); - - entry->handle = ScatterHandle((unsigned long)entry->virtual); - - DRM_DEBUG("handle = %08lx\n", entry->handle); - DRM_DEBUG("virtual = %p\n", entry->virtual); - - for (i = (unsigned long)entry->virtual, j = 0; j < pages; - i += PAGE_SIZE, j++) { - entry->pagelist[j] = vmalloc_to_page((void *)i); - if (!entry->pagelist[j]) - goto failed; - SetPageReserved(entry->pagelist[j]); - } - - request->handle = entry->handle; - - dev->sg = entry; - -#if DEBUG_SCATTER - /* Verify that each page points to its virtual address, and vice - * versa. - */ - { - int error = 0; - - for (i = 0; i < pages; i++) { - unsigned long *tmp; - - tmp = page_address(entry->pagelist[i]); - for (j = 0; - j < PAGE_SIZE / sizeof(unsigned long); - j++, tmp++) { - *tmp = 0xcafebabe; - } - tmp = (unsigned long *)((u8 *) entry->virtual + - (PAGE_SIZE * i)); - for (j = 0; - j < PAGE_SIZE / sizeof(unsigned long); - j++, tmp++) { - if (*tmp != 0xcafebabe && error == 0) { - error = 1; - DRM_ERROR("Scatter allocation error, " - "pagelist does not match " - "virtual mapping\n"); - } - } - tmp = page_address(entry->pagelist[i]); - for (j = 0; - j < PAGE_SIZE / sizeof(unsigned long); - j++, tmp++) { - *tmp = 0; - } - } - if (error == 0) - DRM_ERROR("Scatter allocation matches pagelist\n"); - } -#endif - - return 0; - - failed: - drm_sg_cleanup(entry); - return -ENOMEM; -} - -int drm_legacy_sg_free(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_scatter_gather *request = data; - struct drm_sg_mem *entry; - - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return -EOPNOTSUPP; - - if (!drm_core_check_feature(dev, DRIVER_SG)) - return -EOPNOTSUPP; - - entry = dev->sg; - dev->sg = NULL; - - if (!entry || entry->handle != request->handle) - return -EINVAL; - - DRM_DEBUG("virtual = %p\n", entry->virtual); - - drm_sg_cleanup(entry); - - return 0; -} diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index f0bdcfc0c2..a6c19de462 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -126,6 +126,11 @@ * synchronize between the two. * This requirement is inherited from the Vulkan fence API. * + * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE is set, the ioctl will also set + * a fence deadline hint on the backing fences before waiting, to provide the + * fence signaler with an appropriate sense of urgency. The deadline is + * specified as an absolute &CLOCK_MONOTONIC value in units of ns. + * * Similarly, &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT takes an array of syncobj * handles as well as an array of u64 points and does a host-side wait on all * of syncobj fences at the given points simultaneously. @@ -1027,7 +1032,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, uint32_t count, uint32_t flags, signed long timeout, - uint32_t *idx) + uint32_t *idx, + ktime_t *deadline) { struct syncobj_wait_entry *entries; struct dma_fence *fence; @@ -1110,6 +1116,15 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]); } + if (deadline) { + for (i = 0; i < count; ++i) { + fence = entries[i].fence; + if (!fence) + continue; + dma_fence_set_deadline(fence, *deadline); + } + } + do { set_current_state(TASK_INTERRUPTIBLE); @@ -1208,7 +1223,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev, struct drm_file *file_private, struct drm_syncobj_wait *wait, struct drm_syncobj_timeline_wait *timeline_wait, - struct drm_syncobj **syncobjs, bool timeline) + struct drm_syncobj **syncobjs, bool timeline, + ktime_t *deadline) { signed long timeout = 0; uint32_t first = ~0; @@ -1219,7 +1235,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev, NULL, wait->count_handles, wait->flags, - timeout, &first); + timeout, &first, + deadline); if (timeout < 0) return timeout; wait->first_signaled = first; @@ -1229,7 +1246,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev, u64_to_user_ptr(timeline_wait->points), timeline_wait->count_handles, timeline_wait->flags, - timeout, &first); + timeout, &first, + deadline); if (timeout < 0) return timeout; timeline_wait->first_signaled = first; @@ -1300,17 +1318,22 @@ drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, { struct drm_syncobj_wait *args = data; struct drm_syncobj **syncobjs; + unsigned int possible_flags; + ktime_t t, *tp = NULL; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) return -EOPNOTSUPP; - if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | - DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)) + possible_flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE; + + if (args->flags & ~possible_flags) return -EINVAL; if (args->count_handles == 0) - return -EINVAL; + return 0; ret = drm_syncobj_array_find(file_private, u64_to_user_ptr(args->handles), @@ -1319,8 +1342,13 @@ drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, if (ret < 0) return ret; + if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE) { + t = ns_to_ktime(args->deadline_nsec); + tp = &t; + } + ret = drm_syncobj_array_wait(dev, file_private, - args, NULL, syncobjs, false); + args, NULL, syncobjs, false, tp); drm_syncobj_array_free(syncobjs, args->count_handles); @@ -1333,18 +1361,23 @@ drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data, { struct drm_syncobj_timeline_wait *args = data; struct drm_syncobj **syncobjs; + unsigned int possible_flags; + ktime_t t, *tp = NULL; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) return -EOPNOTSUPP; - if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | - DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT | - DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) + possible_flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE; + + if (args->flags & ~possible_flags) return -EINVAL; if (args->count_handles == 0) - return -EINVAL; + return 0; ret = drm_syncobj_array_find(file_private, u64_to_user_ptr(args->handles), @@ -1353,8 +1386,13 @@ drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data, if (ret < 0) return ret; + if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE) { + t = ns_to_ktime(args->deadline_nsec); + tp = &t; + } + ret = drm_syncobj_array_wait(dev, file_private, - NULL, args, syncobjs, true); + NULL, args, syncobjs, true, tp); drm_syncobj_array_free(syncobjs, args->count_handles); diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 877e206753..702a12bc93 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -210,11 +210,6 @@ static u32 __get_vblank_counter(struct drm_device *dev, unsigned int pipe) if (crtc->funcs->get_vblank_counter) return crtc->funcs->get_vblank_counter(crtc); } -#ifdef CONFIG_DRM_LEGACY - else if (dev->driver->get_vblank_counter) { - return dev->driver->get_vblank_counter(dev, pipe); - } -#endif return drm_vblank_no_hw_counter(dev, pipe); } @@ -433,11 +428,6 @@ static void __disable_vblank(struct drm_device *dev, unsigned int pipe) if (crtc->funcs->disable_vblank) crtc->funcs->disable_vblank(crtc); } -#ifdef CONFIG_DRM_LEGACY - else { - dev->driver->disable_vblank(dev, pipe); - } -#endif } /* @@ -1151,11 +1141,6 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe) if (crtc->funcs->enable_vblank) return crtc->funcs->enable_vblank(crtc); } -#ifdef CONFIG_DRM_LEGACY - else if (dev->driver->enable_vblank) { - return dev->driver->enable_vblank(dev, pipe); - } -#endif return -EINVAL; } @@ -1574,88 +1559,6 @@ void drm_crtc_vblank_restore(struct drm_crtc *crtc) } EXPORT_SYMBOL(drm_crtc_vblank_restore); -static void drm_legacy_vblank_pre_modeset(struct drm_device *dev, - unsigned int pipe) -{ - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; - - /* vblank is not initialized (IRQ not installed ?), or has been freed */ - if (!drm_dev_has_vblank(dev)) - return; - - if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) - return; - - /* - * To avoid all the problems that might happen if interrupts - * were enabled/disabled around or between these calls, we just - * have the kernel take a reference on the CRTC (just once though - * to avoid corrupting the count if multiple, mismatch calls occur), - * so that interrupts remain enabled in the interim. - */ - if (!vblank->inmodeset) { - vblank->inmodeset = 0x1; - if (drm_vblank_get(dev, pipe) == 0) - vblank->inmodeset |= 0x2; - } -} - -static void drm_legacy_vblank_post_modeset(struct drm_device *dev, - unsigned int pipe) -{ - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; - - /* vblank is not initialized (IRQ not installed ?), or has been freed */ - if (!drm_dev_has_vblank(dev)) - return; - - if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) - return; - - if (vblank->inmodeset) { - spin_lock_irq(&dev->vbl_lock); - drm_reset_vblank_timestamp(dev, pipe); - spin_unlock_irq(&dev->vbl_lock); - - if (vblank->inmodeset & 0x2) - drm_vblank_put(dev, pipe); - - vblank->inmodeset = 0; - } -} - -int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_modeset_ctl *modeset = data; - unsigned int pipe; - - /* If drm_vblank_init() hasn't been called yet, just no-op */ - if (!drm_dev_has_vblank(dev)) - return 0; - - /* KMS drivers handle this internally */ - if (!drm_core_check_feature(dev, DRIVER_LEGACY)) - return 0; - - pipe = modeset->crtc; - if (pipe >= dev->num_crtcs) - return -EINVAL; - - switch (modeset->cmd) { - case _DRM_PRE_MODESET: - drm_legacy_vblank_pre_modeset(dev, pipe); - break; - case _DRM_POST_MODESET: - drm_legacy_vblank_post_modeset(dev, pipe); - break; - default: - return -EINVAL; - } - - return 0; -} - static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe, u64 req_seq, union drm_wait_vblank *vblwait, @@ -1780,10 +1683,6 @@ static void drm_wait_vblank_reply(struct drm_device *dev, unsigned int pipe, static bool drm_wait_vblank_supported(struct drm_device *dev) { -#if IS_ENABLED(CONFIG_DRM_LEGACY) - if (unlikely(drm_core_check_feature(dev, DRIVER_LEGACY))) - return dev->irq_enabled; -#endif return drm_dev_has_vblank(dev); } diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c deleted file mode 100644 index 87c9fe55de..0000000000 --- a/drivers/gpu/drm/drm_vm.c +++ /dev/null @@ -1,665 +0,0 @@ -/* - * \file drm_vm.c - * Memory mapping for DRM - * - * \author Rickard E. (Rik) Faith - * \author Gareth Hughes - */ - -/* - * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com - * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include -#include -#include -#include - -#if defined(__ia64__) -#include -#include -#endif -#include - -#include -#include -#include -#include -#include - -#include "drm_internal.h" -#include "drm_legacy.h" - -struct drm_vma_entry { - struct list_head head; - struct vm_area_struct *vma; - pid_t pid; -}; - -static void drm_vm_open(struct vm_area_struct *vma); -static void drm_vm_close(struct vm_area_struct *vma); - -static pgprot_t drm_io_prot(struct drm_local_map *map, - struct vm_area_struct *vma) -{ - pgprot_t tmp = vm_get_page_prot(vma->vm_flags); - -#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \ - defined(__mips__) || defined(__loongarch__) - if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING)) - tmp = pgprot_noncached(tmp); - else - tmp = pgprot_writecombine(tmp); -#elif defined(__ia64__) - if (efi_range_is_wc(vma->vm_start, vma->vm_end - - vma->vm_start)) - tmp = pgprot_writecombine(tmp); - else - tmp = pgprot_noncached(tmp); -#elif defined(__sparc__) || defined(__arm__) - tmp = pgprot_noncached(tmp); -#endif - return tmp; -} - -static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) -{ - pgprot_t tmp = vm_get_page_prot(vma->vm_flags); - -#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) - tmp = pgprot_noncached_wc(tmp); -#endif - return tmp; -} - -/* - * \c fault method for AGP virtual memory. - * - * \param vma virtual memory area. - * \param address access address. - * \return pointer to the page structure. - * - * Find the right map and if it's AGP memory find the real physical page to - * map, get the page, increment the use count and return it. - */ -#if IS_ENABLED(CONFIG_AGP) -static vm_fault_t drm_vm_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct drm_file *priv = vma->vm_file->private_data; - struct drm_device *dev = priv->minor->dev; - struct drm_local_map *map = NULL; - struct drm_map_list *r_list; - struct drm_hash_item *hash; - - /* - * Find the right map - */ - if (!dev->agp) - goto vm_fault_error; - - if (!dev->agp || !dev->agp->cant_use_aperture) - goto vm_fault_error; - - if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) - goto vm_fault_error; - - r_list = drm_hash_entry(hash, struct drm_map_list, hash); - map = r_list->map; - - if (map && map->type == _DRM_AGP) { - /* - * Using vm_pgoff as a selector forces us to use this unusual - * addressing scheme. - */ - resource_size_t offset = vmf->address - vma->vm_start; - resource_size_t baddr = map->offset + offset; - struct drm_agp_mem *agpmem; - struct page *page; - -#ifdef __alpha__ - /* - * Adjust to a bus-relative address - */ - baddr -= dev->hose->mem_space->start; -#endif - - /* - * It's AGP memory - find the real physical page to map - */ - list_for_each_entry(agpmem, &dev->agp->memory, head) { - if (agpmem->bound <= baddr && - agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) - break; - } - - if (&agpmem->head == &dev->agp->memory) - goto vm_fault_error; - - /* - * Get the page, inc the use count, and return it - */ - offset = (baddr - agpmem->bound) >> PAGE_SHIFT; - page = agpmem->memory->pages[offset]; - get_page(page); - vmf->page = page; - - DRM_DEBUG - ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n", - (unsigned long long)baddr, - agpmem->memory->pages[offset], - (unsigned long long)offset, - page_count(page)); - return 0; - } -vm_fault_error: - return VM_FAULT_SIGBUS; /* Disallow mremap */ -} -#else -static vm_fault_t drm_vm_fault(struct vm_fault *vmf) -{ - return VM_FAULT_SIGBUS; -} -#endif - -/* - * \c nopage method for shared virtual memory. - * - * \param vma virtual memory area. - * \param address access address. - * \return pointer to the page structure. - * - * Get the mapping, find the real physical page to map, get the page, and - * return it. - */ -static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct drm_local_map *map = vma->vm_private_data; - unsigned long offset; - unsigned long i; - struct page *page; - - if (!map) - return VM_FAULT_SIGBUS; /* Nothing allocated */ - - offset = vmf->address - vma->vm_start; - i = (unsigned long)map->handle + offset; - page = vmalloc_to_page((void *)i); - if (!page) - return VM_FAULT_SIGBUS; - get_page(page); - vmf->page = page; - - DRM_DEBUG("shm_fault 0x%lx\n", offset); - return 0; -} - -/* - * \c close method for shared virtual memory. - * - * \param vma virtual memory area. - * - * Deletes map information if we are the last - * person to close a mapping and it's not in the global maplist. - */ -static void drm_vm_shm_close(struct vm_area_struct *vma) -{ - struct drm_file *priv = vma->vm_file->private_data; - struct drm_device *dev = priv->minor->dev; - struct drm_vma_entry *pt, *temp; - struct drm_local_map *map; - struct drm_map_list *r_list; - int found_maps = 0; - - DRM_DEBUG("0x%08lx,0x%08lx\n", - vma->vm_start, vma->vm_end - vma->vm_start); - - map = vma->vm_private_data; - - mutex_lock(&dev->struct_mutex); - list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { - if (pt->vma->vm_private_data == map) - found_maps++; - if (pt->vma == vma) { - list_del(&pt->head); - kfree(pt); - } - } - - /* We were the only map that was found */ - if (found_maps == 1 && map->flags & _DRM_REMOVABLE) { - /* Check to see if we are in the maplist, if we are not, then - * we delete this mappings information. - */ - found_maps = 0; - list_for_each_entry(r_list, &dev->maplist, head) { - if (r_list->map == map) - found_maps++; - } - - if (!found_maps) { - switch (map->type) { - case _DRM_REGISTERS: - case _DRM_FRAME_BUFFER: - arch_phys_wc_del(map->mtrr); - iounmap(map->handle); - break; - case _DRM_SHM: - vfree(map->handle); - break; - case _DRM_AGP: - case _DRM_SCATTER_GATHER: - break; - case _DRM_CONSISTENT: - dma_free_coherent(dev->dev, - map->size, - map->handle, - map->offset); - break; - } - kfree(map); - } - } - mutex_unlock(&dev->struct_mutex); -} - -/* - * \c fault method for DMA virtual memory. - * - * \param address access address. - * \return pointer to the page structure. - * - * Determine the page number from the page offset and get it from drm_device_dma::pagelist. - */ -static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct drm_file *priv = vma->vm_file->private_data; - struct drm_device *dev = priv->minor->dev; - struct drm_device_dma *dma = dev->dma; - unsigned long offset; - unsigned long page_nr; - struct page *page; - - if (!dma) - return VM_FAULT_SIGBUS; /* Error */ - if (!dma->pagelist) - return VM_FAULT_SIGBUS; /* Nothing allocated */ - - offset = vmf->address - vma->vm_start; - /* vm_[pg]off[set] should be 0 */ - page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */ - page = virt_to_page((void *)dma->pagelist[page_nr]); - - get_page(page); - vmf->page = page; - - DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr); - return 0; -} - -/* - * \c fault method for scatter-gather virtual memory. - * - * \param address access address. - * \return pointer to the page structure. - * - * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. - */ -static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct drm_local_map *map = vma->vm_private_data; - struct drm_file *priv = vma->vm_file->private_data; - struct drm_device *dev = priv->minor->dev; - struct drm_sg_mem *entry = dev->sg; - unsigned long offset; - unsigned long map_offset; - unsigned long page_offset; - struct page *page; - - if (!entry) - return VM_FAULT_SIGBUS; /* Error */ - if (!entry->pagelist) - return VM_FAULT_SIGBUS; /* Nothing allocated */ - - offset = vmf->address - vma->vm_start; - map_offset = map->offset - (unsigned long)dev->sg->virtual; - page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); - page = entry->pagelist[page_offset]; - get_page(page); - vmf->page = page; - - return 0; -} - -/** AGP virtual memory operations */ -static const struct vm_operations_struct drm_vm_ops = { - .fault = drm_vm_fault, - .open = drm_vm_open, - .close = drm_vm_close, -}; - -/** Shared virtual memory operations */ -static const struct vm_operations_struct drm_vm_shm_ops = { - .fault = drm_vm_shm_fault, - .open = drm_vm_open, - .close = drm_vm_shm_close, -}; - -/** DMA virtual memory operations */ -static const struct vm_operations_struct drm_vm_dma_ops = { - .fault = drm_vm_dma_fault, - .open = drm_vm_open, - .close = drm_vm_close, -}; - -/** Scatter-gather virtual memory operations */ -static const struct vm_operations_struct drm_vm_sg_ops = { - .fault = drm_vm_sg_fault, - .open = drm_vm_open, - .close = drm_vm_close, -}; - -static void drm_vm_open_locked(struct drm_device *dev, - struct vm_area_struct *vma) -{ - struct drm_vma_entry *vma_entry; - - DRM_DEBUG("0x%08lx,0x%08lx\n", - vma->vm_start, vma->vm_end - vma->vm_start); - - vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL); - if (vma_entry) { - vma_entry->vma = vma; - vma_entry->pid = current->pid; - list_add(&vma_entry->head, &dev->vmalist); - } -} - -static void drm_vm_open(struct vm_area_struct *vma) -{ - struct drm_file *priv = vma->vm_file->private_data; - struct drm_device *dev = priv->minor->dev; - - mutex_lock(&dev->struct_mutex); - drm_vm_open_locked(dev, vma); - mutex_unlock(&dev->struct_mutex); -} - -static void drm_vm_close_locked(struct drm_device *dev, - struct vm_area_struct *vma) -{ - struct drm_vma_entry *pt, *temp; - - DRM_DEBUG("0x%08lx,0x%08lx\n", - vma->vm_start, vma->vm_end - vma->vm_start); - - list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { - if (pt->vma == vma) { - list_del(&pt->head); - kfree(pt); - break; - } - } -} - -/* - * \c close method for all virtual memory types. - * - * \param vma virtual memory area. - * - * Search the \p vma private data entry in drm_device::vmalist, unlink it, and - * free it. - */ -static void drm_vm_close(struct vm_area_struct *vma) -{ - struct drm_file *priv = vma->vm_file->private_data; - struct drm_device *dev = priv->minor->dev; - - mutex_lock(&dev->struct_mutex); - drm_vm_close_locked(dev, vma); - mutex_unlock(&dev->struct_mutex); -} - -/* - * mmap DMA memory. - * - * \param file_priv DRM file private. - * \param vma virtual memory area. - * \return zero on success or a negative number on failure. - * - * Sets the virtual memory area operations structure to vm_dma_ops, the file - * pointer, and calls vm_open(). - */ -static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_file *priv = filp->private_data; - struct drm_device *dev; - struct drm_device_dma *dma; - unsigned long length = vma->vm_end - vma->vm_start; - - dev = priv->minor->dev; - dma = dev->dma; - DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", - vma->vm_start, vma->vm_end, vma->vm_pgoff); - - /* Length must match exact page count */ - if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { - return -EINVAL; - } - - if (!capable(CAP_SYS_ADMIN) && - (dma->flags & _DRM_DMA_USE_PCI_RO)) { - vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE); -#if defined(__i386__) || defined(__x86_64__) - pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; -#else - /* Ye gads this is ugly. With more thought - we could move this up higher and use - `protection_map' instead. */ - vma->vm_page_prot = - __pgprot(pte_val - (pte_wrprotect - (__pte(pgprot_val(vma->vm_page_prot))))); -#endif - } - - vma->vm_ops = &drm_vm_dma_ops; - - vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); - - drm_vm_open_locked(dev, vma); - return 0; -} - -static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev) -{ -#ifdef __alpha__ - return dev->hose->dense_mem_base; -#else - return 0; -#endif -} - -/* - * mmap DMA memory. - * - * \param file_priv DRM file private. - * \param vma virtual memory area. - * \return zero on success or a negative number on failure. - * - * If the virtual memory area has no offset associated with it then it's a DMA - * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist, - * checks that the restricted flag is not set, sets the virtual memory operations - * according to the mapping type and remaps the pages. Finally sets the file - * pointer and calls vm_open(). - */ -static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->minor->dev; - struct drm_local_map *map = NULL; - resource_size_t offset = 0; - struct drm_hash_item *hash; - - DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", - vma->vm_start, vma->vm_end, vma->vm_pgoff); - - if (!priv->authenticated) - return -EACCES; - - /* We check for "dma". On Apple's UniNorth, it's valid to have - * the AGP mapped at physical address 0 - * --BenH. - */ - if (!vma->vm_pgoff -#if IS_ENABLED(CONFIG_AGP) - && (!dev->agp - || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE) -#endif - ) - return drm_mmap_dma(filp, vma); - - if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) { - DRM_ERROR("Could not find map\n"); - return -EINVAL; - } - - map = drm_hash_entry(hash, struct drm_map_list, hash)->map; - if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) - return -EPERM; - - /* Check for valid size. */ - if (map->size < vma->vm_end - vma->vm_start) - return -EINVAL; - - if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { - vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE); -#if defined(__i386__) || defined(__x86_64__) - pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; -#else - /* Ye gads this is ugly. With more thought - we could move this up higher and use - `protection_map' instead. */ - vma->vm_page_prot = - __pgprot(pte_val - (pte_wrprotect - (__pte(pgprot_val(vma->vm_page_prot))))); -#endif - } - - switch (map->type) { -#if !defined(__arm__) - case _DRM_AGP: - if (dev->agp && dev->agp->cant_use_aperture) { - /* - * On some platforms we can't talk to bus dma address from the CPU, so for - * memory of type DRM_AGP, we'll deal with sorting out the real physical - * pages and mappings in fault() - */ -#if defined(__powerpc__) - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); -#endif - vma->vm_ops = &drm_vm_ops; - break; - } - fallthrough; /* to _DRM_FRAME_BUFFER... */ -#endif - case _DRM_FRAME_BUFFER: - case _DRM_REGISTERS: - offset = drm_core_get_reg_ofs(dev); - vma->vm_page_prot = drm_io_prot(map, vma); - if (io_remap_pfn_range(vma, vma->vm_start, - (map->offset + offset) >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) - return -EAGAIN; - DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," - " offset = 0x%llx\n", - map->type, - vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset)); - - vma->vm_ops = &drm_vm_ops; - break; - case _DRM_CONSISTENT: - /* Consistent memory is really like shared memory. But - * it's allocated in a different way, so avoid fault */ - if (remap_pfn_range(vma, vma->vm_start, - page_to_pfn(virt_to_page(map->handle)), - vma->vm_end - vma->vm_start, vma->vm_page_prot)) - return -EAGAIN; - vma->vm_page_prot = drm_dma_prot(map->type, vma); - fallthrough; /* to _DRM_SHM */ - case _DRM_SHM: - vma->vm_ops = &drm_vm_shm_ops; - vma->vm_private_data = (void *)map; - break; - case _DRM_SCATTER_GATHER: - vma->vm_ops = &drm_vm_sg_ops; - vma->vm_private_data = (void *)map; - vma->vm_page_prot = drm_dma_prot(map->type, vma); - break; - default: - return -EINVAL; /* This should never happen. */ - } - vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); - - drm_vm_open_locked(dev, vma); - return 0; -} - -int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->minor->dev; - int ret; - - if (drm_dev_is_unplugged(dev)) - return -ENODEV; - - mutex_lock(&dev->struct_mutex); - ret = drm_mmap_locked(filp, vma); - mutex_unlock(&dev->struct_mutex); - - return ret; -} -EXPORT_SYMBOL(drm_legacy_mmap); - -#if IS_ENABLED(CONFIG_DRM_LEGACY) -void drm_legacy_vma_flush(struct drm_device *dev) -{ - struct drm_vma_entry *vma, *vma_temp; - - /* Clear vma list (only needed for legacy drivers) */ - list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { - list_del(&vma->head); - kfree(vma); - } -} -#endif diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index f9bc837e22..9a2965741d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -640,16 +640,14 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) return component_master_add_with_match(dev, &etnaviv_master_ops, match); } -static int etnaviv_pdev_remove(struct platform_device *pdev) +static void etnaviv_pdev_remove(struct platform_device *pdev) { component_master_del(&pdev->dev, &etnaviv_master_ops); - - return 0; } static struct platform_driver etnaviv_platform_driver = { .probe = etnaviv_pdev_probe, - .remove = etnaviv_pdev_remove, + .remove_new = etnaviv_pdev_remove, .driver = { .name = "etnaviv", }, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 2416c526f9..3d0f8d1825 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -535,7 +535,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, ret = drm_sched_job_init(&submit->sched_job, &ctx->sched_entity[args->pipe], - submit->ctx); + 1, submit->ctx); if (ret) goto err_submit_put; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 9276756e13..9b8445d2a1 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -1904,11 +1904,10 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) return 0; } -static int etnaviv_gpu_platform_remove(struct platform_device *pdev) +static void etnaviv_gpu_platform_remove(struct platform_device *pdev) { component_del(&pdev->dev, &gpu_ops); pm_runtime_disable(&pdev->dev); - return 0; } static int etnaviv_gpu_rpm_suspend(struct device *dev) @@ -1917,7 +1916,7 @@ static int etnaviv_gpu_rpm_suspend(struct device *dev) u32 idle, mask; /* If there are any jobs in the HW queue, we're not idle */ - if (atomic_read(&gpu->sched.hw_rq_count)) + if (atomic_read(&gpu->sched.credit_count)) return -EBUSY; /* Check whether the hardware (except FE and MC) is idle */ @@ -1970,6 +1969,6 @@ struct platform_driver etnaviv_gpu_driver = { .of_match_table = etnaviv_gpu_match, }, .probe = etnaviv_gpu_platform_probe, - .remove = etnaviv_gpu_platform_remove, + .remove_new = etnaviv_gpu_platform_remove, .id_table = gpu_ids, }; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index 9b79f218e2..c4b04b0dee 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -134,7 +134,7 @@ int etnaviv_sched_init(struct etnaviv_gpu *gpu) { int ret; - ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, + ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT, etnaviv_hw_jobs_limit, etnaviv_job_hang_limit, msecs_to_jiffies(500), NULL, NULL, diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index bce0275524..0ef7bc8848 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -862,18 +862,16 @@ err_disable_pm_runtime: return ret; } -static int exynos5433_decon_remove(struct platform_device *pdev) +static void exynos5433_decon_remove(struct platform_device *pdev) { pm_runtime_disable(&pdev->dev); component_del(&pdev->dev, &decon_component_ops); - - return 0; } struct platform_driver exynos5433_decon_driver = { .probe = exynos5433_decon_probe, - .remove = exynos5433_decon_remove, + .remove_new = exynos5433_decon_remove, .driver = { .name = "exynos5433-decon", .pm = pm_ptr(&exynos5433_decon_pm_ops), diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index 0156a5e944..0d185c0564 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -765,7 +765,7 @@ err_iounmap: return ret; } -static int decon_remove(struct platform_device *pdev) +static void decon_remove(struct platform_device *pdev) { struct decon_context *ctx = dev_get_drvdata(&pdev->dev); @@ -774,8 +774,6 @@ static int decon_remove(struct platform_device *pdev) iounmap(ctx->regs); component_del(&pdev->dev, &decon_component_ops); - - return 0; } static int exynos7_decon_suspend(struct device *dev) @@ -840,7 +838,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(exynos7_decon_pm_ops, exynos7_decon_suspend, struct platform_driver decon_driver = { .probe = decon_probe, - .remove = decon_remove, + .remove_new = decon_remove, .driver = { .name = "exynos-decon", .pm = pm_ptr(&exynos7_decon_pm_ops), diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c index 3404ec1367..ca31bad6c5 100644 --- a/drivers/gpu/drm/exynos/exynos_dp.c +++ b/drivers/gpu/drm/exynos/exynos_dp.c @@ -250,14 +250,12 @@ out: return component_add(&pdev->dev, &exynos_dp_ops); } -static int exynos_dp_remove(struct platform_device *pdev) +static void exynos_dp_remove(struct platform_device *pdev) { struct exynos_dp_device *dp = platform_get_drvdata(pdev); component_del(&pdev->dev, &exynos_dp_ops); analogix_dp_remove(dp->adp); - - return 0; } static int exynos_dp_suspend(struct device *dev) @@ -285,7 +283,7 @@ MODULE_DEVICE_TABLE(of, exynos_dp_match); struct platform_driver dp_driver = { .probe = exynos_dp_probe, - .remove = exynos_dp_remove, + .remove_new = exynos_dp_remove, .driver = { .name = "exynos-dp", .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c index 378e538197..0dc36df6ad 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c @@ -101,7 +101,7 @@ static int exynos_dpi_create_connector(struct drm_encoder *encoder) ret = drm_connector_init(encoder->dev, connector, &exynos_dpi_connector_funcs, - DRM_MODE_CONNECTOR_VGA); + DRM_MODE_CONNECTOR_DPI); if (ret) { DRM_DEV_ERROR(ctx->dev, "failed to initialize connector with drm\n"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 5380fb6c55..7c59e1164a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -346,10 +346,9 @@ static int exynos_drm_platform_probe(struct platform_device *pdev) match); } -static int exynos_drm_platform_remove(struct platform_device *pdev) +static void exynos_drm_platform_remove(struct platform_device *pdev) { component_master_del(&pdev->dev, &exynos_drm_ops); - return 0; } static void exynos_drm_platform_shutdown(struct platform_device *pdev) @@ -362,7 +361,7 @@ static void exynos_drm_platform_shutdown(struct platform_device *pdev) static struct platform_driver exynos_drm_platform_driver = { .probe = exynos_drm_platform_probe, - .remove = exynos_drm_platform_remove, + .remove_new = exynos_drm_platform_remove, .shutdown = exynos_drm_platform_shutdown, .driver = { .name = "exynos-drm", diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 8de2714599..e81a576de3 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -1367,7 +1367,7 @@ err_pm_dis: return ret; } -static int fimc_remove(struct platform_device *pdev) +static void fimc_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct fimc_context *ctx = get_fimc_context(dev); @@ -1377,8 +1377,6 @@ static int fimc_remove(struct platform_device *pdev) pm_runtime_disable(dev); fimc_put_clocks(ctx); - - return 0; } static int fimc_runtime_suspend(struct device *dev) @@ -1410,7 +1408,7 @@ MODULE_DEVICE_TABLE(of, fimc_of_match); struct platform_driver fimc_driver = { .probe = fimc_probe, - .remove = fimc_remove, + .remove_new = fimc_remove, .driver = { .of_match_table = fimc_of_match, .name = "exynos-drm-fimc", diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 5bdc246f5f..f2145227a1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -480,7 +480,7 @@ static void fimd_commit(struct exynos_drm_crtc *crtc) struct fimd_context *ctx = crtc->ctx; struct drm_display_mode *mode = &crtc->base.state->adjusted_mode; const struct fimd_driver_data *driver_data = ctx->driver_data; - void *timing_base = ctx->regs + driver_data->timing_base; + void __iomem *timing_base = ctx->regs + driver_data->timing_base; u32 val; if (ctx->suspended) @@ -1277,13 +1277,11 @@ err_disable_pm_runtime: return ret; } -static int fimd_remove(struct platform_device *pdev) +static void fimd_remove(struct platform_device *pdev) { pm_runtime_disable(&pdev->dev); component_del(&pdev->dev, &fimd_component_ops); - - return 0; } static int exynos_fimd_suspend(struct device *dev) @@ -1325,7 +1323,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(exynos_fimd_pm_ops, exynos_fimd_suspend, struct platform_driver fimd_driver = { .probe = fimd_probe, - .remove = fimd_remove, + .remove_new = fimd_remove, .driver = { .name = "exynos4-fb", .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 414e585ec7..f313842361 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -1530,7 +1530,7 @@ err_destroy_slab: return ret; } -static int g2d_remove(struct platform_device *pdev) +static void g2d_remove(struct platform_device *pdev) { struct g2d_data *g2d = platform_get_drvdata(pdev); @@ -1545,8 +1545,6 @@ static int g2d_remove(struct platform_device *pdev) g2d_fini_cmdlist(g2d); destroy_workqueue(g2d->g2d_workq); kmem_cache_destroy(g2d->runqueue_slab); - - return 0; } static int g2d_suspend(struct device *dev) @@ -1609,7 +1607,7 @@ MODULE_DEVICE_TABLE(of, exynos_g2d_match); struct platform_driver g2d_driver = { .probe = g2d_probe, - .remove = g2d_remove, + .remove_new = g2d_remove, .driver = { .name = "exynos-drm-g2d", .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 5302bebbe3..180507a477 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -11,9 +11,10 @@ #include #include #include -#include +#include #include #include +#include #include #include @@ -103,7 +104,7 @@ struct gsc_context { unsigned int num_formats; void __iomem *regs; - const char **clk_names; + const char *const *clk_names; struct clk *clocks[GSC_MAX_CLOCKS]; int num_clocks; struct gsc_scaler sc; @@ -1217,7 +1218,7 @@ static const unsigned int gsc_tiled_formats[] = { static int gsc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct gsc_driverdata *driver_data; + const struct gsc_driverdata *driver_data; struct exynos_drm_ipp_formats *formats; struct gsc_context *ctx; int num_formats, ret, i, j; @@ -1226,7 +1227,7 @@ static int gsc_probe(struct platform_device *pdev) if (!ctx) return -ENOMEM; - driver_data = (struct gsc_driverdata *)of_device_get_match_data(dev); + driver_data = device_get_match_data(dev); ctx->dev = dev; ctx->num_clocks = driver_data->num_clocks; ctx->clk_names = driver_data->clk_names; @@ -1308,15 +1309,13 @@ err_pm_dis: return ret; } -static int gsc_remove(struct platform_device *pdev) +static void gsc_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; component_del(dev, &gsc_component_ops); pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); - - return 0; } static int __maybe_unused gsc_runtime_suspend(struct device *dev) @@ -1421,7 +1420,7 @@ MODULE_DEVICE_TABLE(of, exynos_drm_gsc_of_match); struct platform_driver gsc_driver = { .probe = gsc_probe, - .remove = gsc_remove, + .remove_new = gsc_remove, .driver = { .name = "exynos-drm-gsc", .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c index 17bab5b166..e292096018 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_mic.c +++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c @@ -442,7 +442,7 @@ err: return ret; } -static int exynos_mic_remove(struct platform_device *pdev) +static void exynos_mic_remove(struct platform_device *pdev) { struct exynos_mic *mic = platform_get_drvdata(pdev); @@ -450,8 +450,6 @@ static int exynos_mic_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); drm_bridge_remove(&mic->bridge); - - return 0; } static const struct of_device_id exynos_mic_of_match[] = { @@ -462,7 +460,7 @@ MODULE_DEVICE_TABLE(of, exynos_mic_of_match); struct platform_driver mic_driver = { .probe = exynos_mic_probe, - .remove = exynos_mic_remove, + .remove_new = exynos_mic_remove, .driver = { .name = "exynos-mic", .pm = pm_ptr(&exynos_mic_pm_ops), diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index ffb327c513..5f7516655b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -329,15 +329,13 @@ err_component: return ret; } -static int rotator_remove(struct platform_device *pdev) +static void rotator_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; component_del(dev, &rotator_component_ops); pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); - - return 0; } static int rotator_runtime_suspend(struct device *dev) @@ -453,7 +451,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(rotator_pm_ops, rotator_runtime_suspend, struct platform_driver rotator_driver = { .probe = rotator_probe, - .remove = rotator_remove, + .remove_new = rotator_remove, .driver = { .name = "exynos-rotator", .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c index f2b8b09a6b..392f721f13 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c +++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c @@ -539,15 +539,13 @@ err_ippdrv_register: return ret; } -static int scaler_remove(struct platform_device *pdev) +static void scaler_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; component_del(dev, &scaler_component_ops); pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); - - return 0; } static int clk_disable_unprepare_wrapper(struct clk *clk) @@ -721,7 +719,7 @@ MODULE_DEVICE_TABLE(of, exynos_scaler_match); struct platform_driver scaler_driver = { .probe = scaler_probe, - .remove = scaler_remove, + .remove_new = scaler_remove, .driver = { .name = "exynos-scaler", .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index fb941a8c99..f5bbba9ad2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -462,7 +462,7 @@ static int vidi_probe(struct platform_device *pdev) return component_add(dev, &vidi_component_ops); } -static int vidi_remove(struct platform_device *pdev) +static void vidi_remove(struct platform_device *pdev) { struct vidi_context *ctx = platform_get_drvdata(pdev); @@ -472,13 +472,11 @@ static int vidi_remove(struct platform_device *pdev) } component_del(&pdev->dev, &vidi_component_ops); - - return 0; } struct platform_driver vidi_driver = { .probe = vidi_probe, - .remove = vidi_remove, + .remove_new = vidi_remove, .driver = { .name = "exynos-drm-vidi", .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index eff51bfc46..b1d02dec37 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -2069,7 +2069,7 @@ err_ddc: return ret; } -static int hdmi_remove(struct platform_device *pdev) +static void hdmi_remove(struct platform_device *pdev) { struct hdmi_context *hdata = platform_get_drvdata(pdev); @@ -2092,8 +2092,6 @@ static int hdmi_remove(struct platform_device *pdev) put_device(&hdata->ddc_adpt->dev); mutex_destroy(&hdata->mutex); - - return 0; } static int __maybe_unused exynos_hdmi_suspend(struct device *dev) @@ -2125,7 +2123,7 @@ static const struct dev_pm_ops exynos_hdmi_pm_ops = { struct platform_driver hdmi_driver = { .probe = hdmi_probe, - .remove = hdmi_remove, + .remove_new = hdmi_remove, .driver = { .name = "exynos-hdmi", .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index b302392ff0..6822333fd0 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -1258,13 +1258,11 @@ static int mixer_probe(struct platform_device *pdev) return ret; } -static int mixer_remove(struct platform_device *pdev) +static void mixer_remove(struct platform_device *pdev) { pm_runtime_disable(&pdev->dev); component_del(&pdev->dev, &mixer_component_ops); - - return 0; } static int __maybe_unused exynos_mixer_suspend(struct device *dev) @@ -1338,5 +1336,5 @@ struct platform_driver mixer_driver = { .of_match_table = mixer_match_types, }, .probe = mixer_probe, - .remove = mixer_remove, + .remove_new = mixer_remove, }; diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile index 4f302cd5e1..58fed80c73 100644 --- a/drivers/gpu/drm/gma500/Makefile +++ b/drivers/gpu/drm/gma500/Makefile @@ -34,7 +34,6 @@ gma500_gfx-y += \ psb_intel_lvds.o \ psb_intel_modes.o \ psb_intel_sdvo.o \ - psb_lid.o \ psb_irq.o gma500_gfx-$(CONFIG_ACPI) += opregion.o diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index 8992a95076..dd1eb7e987 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -855,7 +855,6 @@ cdv_intel_dp_i2c_init(struct gma_connector *connector, memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter)); intel_dp->adapter.owner = THIS_MODULE; - intel_dp->adapter.class = I2C_CLASS_DDC; strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; intel_dp->adapter.algo_data = &intel_dp->algo; diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c index 09cedabf47..aa45509859 100644 --- a/drivers/gpu/drm/gma500/intel_gmbus.c +++ b/drivers/gpu/drm/gma500/intel_gmbus.c @@ -411,7 +411,6 @@ int gma_intel_setup_gmbus(struct drm_device *dev) struct intel_gmbus *bus = &dev_priv->gmbus[i]; bus->adapter.owner = THIS_MODULE; - bus->adapter.class = I2C_CLASS_DDC; snprintf(bus->adapter.name, sizeof(bus->adapter.name), "gma500 gmbus %s", diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c index fc9a34ed58..6daa6669ed 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c @@ -168,7 +168,6 @@ static struct i2c_adapter oaktrail_hdmi_i2c_adapter = { .name = "oaktrail_hdmi_i2c", .nr = 3, .owner = THIS_MODULE, - .class = I2C_CLASS_DDC, .algo = &oaktrail_hdmi_i2c_algorithm, }; diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c index dcfcd7b89d..6dece8f0e3 100644 --- a/drivers/gpu/drm/gma500/psb_device.c +++ b/drivers/gpu/drm/gma500/psb_device.c @@ -73,8 +73,7 @@ static int psb_backlight_setup(struct drm_device *dev) } psb_intel_lvds_set_brightness(dev, PSB_MAX_BRIGHTNESS); - /* This must occur after the backlight is properly initialised */ - psb_lid_timer_init(dev_priv); + return 0; } @@ -259,8 +258,6 @@ static int psb_chip_setup(struct drm_device *dev) static void psb_chip_teardown(struct drm_device *dev) { - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - psb_lid_timer_takedown(dev_priv); gma_intel_teardown_gmbus(dev); } diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index c5edfa4aa4..83c17689c4 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -162,7 +162,6 @@ #define PSB_NUM_VBLANKS 2 #define PSB_WATCHDOG_DELAY (HZ * 2) -#define PSB_LID_DELAY (HZ / 10) #define PSB_MAX_BRIGHTNESS 100 @@ -491,11 +490,7 @@ struct drm_psb_private { /* Hotplug handling */ struct work_struct hotplug_work; - /* LID-Switch */ - spinlock_t lid_lock; - struct timer_list lid_timer; struct psb_intel_opregion opregion; - u32 lid_last_state; /* Watchdog */ uint32_t apm_reg; @@ -591,10 +586,6 @@ struct psb_ops { int i2c_bus; /* I2C bus identifier for Moorestown */ }; -/* psb_lid.c */ -extern void psb_lid_timer_init(struct drm_psb_private *dev_priv); -extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv); - /* modesetting */ extern void psb_modeset_init(struct drm_device *dev); extern void psb_modeset_cleanup(struct drm_device *dev); diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index d6fd5d7262..e4f914dece 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -2426,7 +2426,6 @@ psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo, struct drm_device *dev) { sdvo->ddc.owner = THIS_MODULE; - sdvo->ddc.class = I2C_CLASS_DDC; snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy"); sdvo->ddc.dev.parent = dev->dev; sdvo->ddc.algo_data = sdvo; diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c deleted file mode 100644 index 58a7fe3926..0000000000 --- a/drivers/gpu/drm/gma500/psb_lid.c +++ /dev/null @@ -1,80 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/************************************************************************** - * Copyright (c) 2007, Intel Corporation. - * - * Authors: Thomas Hellstrom - **************************************************************************/ - -#include - -#include "psb_drv.h" -#include "psb_intel_reg.h" -#include "psb_reg.h" - -static void psb_lid_timer_func(struct timer_list *t) -{ - struct drm_psb_private *dev_priv = from_timer(dev_priv, t, lid_timer); - struct drm_device *dev = (struct drm_device *)&dev_priv->dev; - struct timer_list *lid_timer = &dev_priv->lid_timer; - unsigned long irq_flags; - u32 __iomem *lid_state = dev_priv->opregion.lid_state; - u32 pp_status; - - if (readl(lid_state) == dev_priv->lid_last_state) - goto lid_timer_schedule; - - if ((readl(lid_state)) & 0x01) { - /*lid state is open*/ - REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON); - do { - pp_status = REG_READ(PP_STATUS); - } while ((pp_status & PP_ON) == 0 && - (pp_status & PP_SEQUENCE_MASK) != 0); - - if (REG_READ(PP_STATUS) & PP_ON) { - /*FIXME: should be backlight level before*/ - psb_intel_lvds_set_brightness(dev, 100); - } else { - DRM_DEBUG("LVDS panel never powered up"); - return; - } - } else { - psb_intel_lvds_set_brightness(dev, 0); - - REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON); - do { - pp_status = REG_READ(PP_STATUS); - } while ((pp_status & PP_ON) == 0); - } - dev_priv->lid_last_state = readl(lid_state); - -lid_timer_schedule: - spin_lock_irqsave(&dev_priv->lid_lock, irq_flags); - if (!timer_pending(lid_timer)) { - lid_timer->expires = jiffies + PSB_LID_DELAY; - add_timer(lid_timer); - } - spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags); -} - -void psb_lid_timer_init(struct drm_psb_private *dev_priv) -{ - struct timer_list *lid_timer = &dev_priv->lid_timer; - unsigned long irq_flags; - - spin_lock_init(&dev_priv->lid_lock); - spin_lock_irqsave(&dev_priv->lid_lock, irq_flags); - - timer_setup(lid_timer, psb_lid_timer_func, 0); - - lid_timer->expires = jiffies + PSB_LID_DELAY; - - add_timer(lid_timer); - spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags); -} - -void psb_lid_timer_takedown(struct drm_psb_private *dev_priv) -{ - del_timer_sync(&dev_priv->lid_timer); -} - diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c index a02f75be81..e163649816 100644 --- a/drivers/gpu/drm/gud/gud_pipe.c +++ b/drivers/gpu/drm/gud/gud_pipe.c @@ -51,7 +51,8 @@ static bool gud_is_big_endian(void) static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format, void *src, struct drm_framebuffer *fb, - struct drm_rect *rect) + struct drm_rect *rect, + struct drm_format_conv_state *fmtcnv_state) { unsigned int block_width = drm_format_info_block_width(format, 0); unsigned int bits_per_pixel = 8 / block_width; @@ -75,7 +76,7 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format iosys_map_set_vaddr(&dst_map, buf); iosys_map_set_vaddr(&vmap, src); - drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, rect); + drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, rect, fmtcnv_state); pix8 = buf; for (y = 0; y < height; y++) { @@ -152,7 +153,8 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb, const struct iosys_map *src, bool cached_reads, const struct drm_format_info *format, struct drm_rect *rect, - struct gud_set_buffer_req *req) + struct gud_set_buffer_req *req, + struct drm_format_conv_state *fmtcnv_state) { u8 compression = gdrm->compression; struct iosys_map dst; @@ -178,23 +180,23 @@ retry: */ if (format != fb->format) { if (format->format == GUD_DRM_FORMAT_R1) { - len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect); + len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect, fmtcnv_state); if (!len) return -ENOMEM; } else if (format->format == DRM_FORMAT_R8) { - drm_fb_xrgb8888_to_gray8(&dst, NULL, src, fb, rect); + drm_fb_xrgb8888_to_gray8(&dst, NULL, src, fb, rect, fmtcnv_state); } else if (format->format == DRM_FORMAT_RGB332) { - drm_fb_xrgb8888_to_rgb332(&dst, NULL, src, fb, rect); + drm_fb_xrgb8888_to_rgb332(&dst, NULL, src, fb, rect, fmtcnv_state); } else if (format->format == DRM_FORMAT_RGB565) { - drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect, + drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect, fmtcnv_state, gud_is_big_endian()); } else if (format->format == DRM_FORMAT_RGB888) { - drm_fb_xrgb8888_to_rgb888(&dst, NULL, src, fb, rect); + drm_fb_xrgb8888_to_rgb888(&dst, NULL, src, fb, rect, fmtcnv_state); } else { len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect); } } else if (gud_is_big_endian() && format->cpp[0] > 1) { - drm_fb_swab(&dst, NULL, src, fb, rect, cached_reads); + drm_fb_swab(&dst, NULL, src, fb, rect, cached_reads, fmtcnv_state); } else if (compression && cached_reads && pitch == fb->pitches[0]) { /* can compress directly from the framebuffer */ buf = vaddr + rect->y1 * pitch; @@ -266,7 +268,8 @@ static int gud_usb_bulk(struct gud_device *gdrm, size_t len) static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb, const struct iosys_map *src, bool cached_reads, - const struct drm_format_info *format, struct drm_rect *rect) + const struct drm_format_info *format, struct drm_rect *rect, + struct drm_format_conv_state *fmtcnv_state) { struct gud_set_buffer_req req; size_t len, trlen; @@ -274,7 +277,7 @@ static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb, drm_dbg(&gdrm->drm, "Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect)); - ret = gud_prep_flush(gdrm, fb, src, cached_reads, format, rect, &req); + ret = gud_prep_flush(gdrm, fb, src, cached_reads, format, rect, &req, fmtcnv_state); if (ret) return ret; @@ -318,6 +321,7 @@ static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb const struct iosys_map *src, bool cached_reads, struct drm_rect *damage) { + struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT; const struct drm_format_info *format; unsigned int i, lines; size_t pitch; @@ -340,7 +344,7 @@ static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb rect.y1 += i * lines; rect.y2 = min_t(u32, rect.y1 + lines, damage->y2); - ret = gud_flush_rect(gdrm, fb, src, cached_reads, format, &rect); + ret = gud_flush_rect(gdrm, fb, src, cached_reads, format, &rect, &fmtcnv_state); if (ret) { if (ret != -ENODEV && ret != -ECONNRESET && ret != -ESHUTDOWN && ret != -EPROTO) @@ -350,6 +354,8 @@ static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb break; } } + + drm_format_conv_state_release(&fmtcnv_state); } void gud_flush_work(struct work_struct *work) diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c index 410bd019bb..e6e48651c1 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c @@ -81,7 +81,6 @@ int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector) { connector->adapter.owner = THIS_MODULE; - connector->adapter.class = I2C_CLASS_DDC; snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus"); connector->adapter.dev.parent = drm_dev->dev; i2c_set_adapdata(&connector->adapter, connector); diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c index d511d17c5b..cff85086f2 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c @@ -7,7 +7,6 @@ #include #include #include -#include #include #include @@ -73,11 +72,6 @@ static int hyperv_setup_vram(struct hyperv_drm_device *hv, struct drm_device *dev = &hv->dev; int ret; - if (IS_ENABLED(CONFIG_SYSFB)) - drm_aperture_remove_conflicting_framebuffers(screen_info.lfb_base, - screen_info.lfb_size, - &hyperv_driver); - hv->fb_size = (unsigned long)hv->mmio_megabytes * 1024 * 1024; ret = vmbus_allocate_mmio(&hv->mem, hdev, 0, -1, hv->fb_size, 0x100000, @@ -130,6 +124,8 @@ static int hyperv_vmbus_probe(struct hv_device *hdev, goto err_hv_set_drv_data; } + drm_aperture_remove_framebuffers(&hyperv_driver); + ret = hyperv_setup_vram(hv, hdev); if (ret) goto err_vmbus_close; diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index ce397a8797..3089029abb 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -94,7 +94,7 @@ config DRM_I915_CAPTURE_ERROR This option enables capturing the GPU state when a hang is detected. This information is vital for triaging hangs and assists in debugging. Please report any hang for triaging according to: - https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs + https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html If in doubt, say "Y". @@ -140,7 +140,7 @@ config DRM_I915_GVT_KVMGT Note that this driver only supports newer device from Broadwell on. For further information and setup guide, you can visit: - http://01.org/igvt-g. + https://github.com/intel/gvt-linux/wiki. If in doubt, say "N". diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 2d21930d55..5b71620768 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -24,7 +24,9 @@ config DRM_I915_DEBUG select DEBUG_FS select PREEMPT_COUNT select I2C_CHARDEV + select REF_TRACKER select STACKDEPOT + select STACKTRACE select DRM_DP_AUX_CHARDEV select X86_MSR # used by igt/pm_rpm select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks) @@ -38,6 +40,7 @@ config DRM_I915_DEBUG select DRM_I915_DEBUG_GEM_ONCE select DRM_I915_DEBUG_MMIO select DRM_I915_DEBUG_RUNTIME_PM + select DRM_I915_DEBUG_WAKEREF select DRM_I915_SW_FENCE_DEBUG_OBJECTS select DRM_I915_SELFTEST default n @@ -231,7 +234,9 @@ config DRM_I915_DEBUG_RUNTIME_PM bool "Enable extra state checking for runtime PM" depends on DRM_I915 default n + select REF_TRACKER select STACKDEPOT + select STACKTRACE help Choose this option to turn on extra state checking for the runtime PM functionality. This may introduce overhead during @@ -240,3 +245,16 @@ config DRM_I915_DEBUG_RUNTIME_PM Recommended for driver developers only. If in doubt, say "N" + +config DRM_I915_DEBUG_WAKEREF + bool "Enable extra tracking for wakerefs" + depends on DRM_I915 + select REF_TRACKER + select STACKDEPOT + select STACKTRACE + help + Choose this option to turn on extra state checking and usage + tracking for the wakerefPM functionality. This may introduce + overhead during driver runtime. + + If in doubt, say "N" diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 88b2bb0050..2d08baa91d 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -17,7 +17,6 @@ subdir-ccflags-y += $(call cc-option, -Wunused-const-variable) subdir-ccflags-y += $(call cc-option, -Wpacked-not-aligned) subdir-ccflags-y += $(call cc-option, -Wformat-overflow) subdir-ccflags-y += $(call cc-option, -Wformat-truncation) -subdir-ccflags-y += $(call cc-option, -Wstringop-overflow) subdir-ccflags-y += $(call cc-option, -Wstringop-truncation) # The following turn off the warnings enabled by -Wextra ifeq ($(findstring 2, $(KBUILD_EXTRA_WARN)),) @@ -34,9 +33,9 @@ endif subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror # Fine grained warnings disable -CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init) -CFLAGS_display/intel_display_device.o = $(call cc-disable-warning, override-init) -CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init) +CFLAGS_i915_pci.o = -Wno-override-init +CFLAGS_display/intel_display_device.o = -Wno-override-init +CFLAGS_display/intel_fbdev.o = -Wno-override-init # Support compiling the display code separately for both i915 and xe # drivers. Define I915 when building i915. @@ -47,33 +46,34 @@ subdir-ccflags-y += -I$(srctree)/$(src) # Please keep these build lists sorted! # core driver code -i915-y += i915_driver.o \ - i915_drm_client.o \ - i915_config.o \ - i915_getparam.o \ - i915_ioctl.o \ - i915_irq.o \ - i915_mitigations.o \ - i915_module.o \ - i915_params.o \ - i915_pci.o \ - i915_scatterlist.o \ - i915_suspend.o \ - i915_switcheroo.o \ - i915_sysfs.o \ - i915_utils.o \ - intel_clock_gating.o \ - intel_device_info.o \ - intel_memory_region.o \ - intel_pcode.o \ - intel_region_ttm.o \ - intel_runtime_pm.o \ - intel_sbi.o \ - intel_step.o \ - intel_uncore.o \ - intel_wakeref.o \ - vlv_sideband.o \ - vlv_suspend.o +i915-y += \ + i915_config.o \ + i915_driver.o \ + i915_drm_client.o \ + i915_getparam.o \ + i915_ioctl.o \ + i915_irq.o \ + i915_mitigations.o \ + i915_module.o \ + i915_params.o \ + i915_pci.o \ + i915_scatterlist.o \ + i915_suspend.o \ + i915_switcheroo.o \ + i915_sysfs.o \ + i915_utils.o \ + intel_clock_gating.o \ + intel_device_info.o \ + intel_memory_region.o \ + intel_pcode.o \ + intel_region_ttm.o \ + intel_runtime_pm.o \ + intel_sbi.o \ + intel_step.o \ + intel_uncore.o \ + intel_wakeref.o \ + vlv_sideband.o \ + vlv_suspend.o # core peripheral code i915-y += \ @@ -90,13 +90,13 @@ i915-y += \ i915_syncmap.o \ i915_user_extensions.o -i915-$(CONFIG_COMPAT) += i915_ioc32.o +i915-$(CONFIG_COMPAT) += \ + i915_ioc32.o i915-$(CONFIG_DEBUG_FS) += \ i915_debugfs.o \ - i915_debugfs_params.o \ - display/intel_display_debugfs.o \ - display/intel_pipe_crc.o -i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o + i915_debugfs_params.o +i915-$(CONFIG_PERF_EVENTS) += \ + i915_pmu.o # "Graphics Technology" (aka we talk to the gpu) gt-y += \ @@ -118,6 +118,7 @@ gt-y += \ gt/intel_ggtt_fencing.o \ gt/intel_gt.o \ gt/intel_gt_buffer_pool.o \ + gt/intel_gt_ccs_mode.o \ gt/intel_gt_clock_utils.o \ gt/intel_gt_debugfs.o \ gt/intel_gt_engines_debugfs.o \ @@ -153,7 +154,8 @@ gt-y += \ gt/sysfs_engines.o # x86 intel-gtt module support -gt-$(CONFIG_X86) += gt/intel_ggtt_gmch.o +gt-$(CONFIG_X86) += \ + gt/intel_ggtt_gmch.o # autogenerated null render state gt-y += \ gt/gen6_renderstate.o \ @@ -172,9 +174,9 @@ gem-y += \ gem/i915_gem_domain.o \ gem/i915_gem_execbuffer.o \ gem/i915_gem_internal.o \ - gem/i915_gem_object.o \ gem/i915_gem_lmem.o \ gem/i915_gem_mman.o \ + gem/i915_gem_object.o \ gem/i915_gem_pages.o \ gem/i915_gem_phys.o \ gem/i915_gem_pm.o \ @@ -191,57 +193,61 @@ gem-y += \ gem/i915_gem_wait.o \ gem/i915_gemfs.o i915-y += \ - $(gem-y) \ - i915_active.o \ - i915_cmd_parser.o \ - i915_deps.o \ - i915_gem_evict.o \ - i915_gem_gtt.o \ - i915_gem_ww.o \ - i915_gem.o \ - i915_query.o \ - i915_request.o \ - i915_scheduler.o \ - i915_trace_points.o \ - i915_ttm_buddy_manager.o \ - i915_vma.o \ - i915_vma_resource.o + $(gem-y) \ + i915_active.o \ + i915_cmd_parser.o \ + i915_deps.o \ + i915_gem.o \ + i915_gem_evict.o \ + i915_gem_gtt.o \ + i915_gem_ww.o \ + i915_query.o \ + i915_request.o \ + i915_scheduler.o \ + i915_trace_points.o \ + i915_ttm_buddy_manager.o \ + i915_vma.o \ + i915_vma_resource.o # general-purpose microcontroller (GuC) support i915-y += \ - gt/uc/intel_gsc_fw.o \ - gt/uc/intel_gsc_proxy.o \ - gt/uc/intel_gsc_uc.o \ - gt/uc/intel_gsc_uc_debugfs.o \ - gt/uc/intel_gsc_uc_heci_cmd_submit.o \ - gt/uc/intel_guc.o \ - gt/uc/intel_guc_ads.o \ - gt/uc/intel_guc_capture.o \ - gt/uc/intel_guc_ct.o \ - gt/uc/intel_guc_debugfs.o \ - gt/uc/intel_guc_fw.o \ - gt/uc/intel_guc_hwconfig.o \ - gt/uc/intel_guc_log.o \ - gt/uc/intel_guc_log_debugfs.o \ - gt/uc/intel_guc_rc.o \ - gt/uc/intel_guc_slpc.o \ - gt/uc/intel_guc_submission.o \ - gt/uc/intel_huc.o \ - gt/uc/intel_huc_debugfs.o \ - gt/uc/intel_huc_fw.o \ - gt/uc/intel_uc.o \ - gt/uc/intel_uc_debugfs.o \ - gt/uc/intel_uc_fw.o + gt/uc/intel_gsc_fw.o \ + gt/uc/intel_gsc_proxy.o \ + gt/uc/intel_gsc_uc.o \ + gt/uc/intel_gsc_uc_debugfs.o \ + gt/uc/intel_gsc_uc_heci_cmd_submit.o\ + gt/uc/intel_guc.o \ + gt/uc/intel_guc_ads.o \ + gt/uc/intel_guc_capture.o \ + gt/uc/intel_guc_ct.o \ + gt/uc/intel_guc_debugfs.o \ + gt/uc/intel_guc_fw.o \ + gt/uc/intel_guc_hwconfig.o \ + gt/uc/intel_guc_log.o \ + gt/uc/intel_guc_log_debugfs.o \ + gt/uc/intel_guc_rc.o \ + gt/uc/intel_guc_slpc.o \ + gt/uc/intel_guc_submission.o \ + gt/uc/intel_huc.o \ + gt/uc/intel_huc_debugfs.o \ + gt/uc/intel_huc_fw.o \ + gt/uc/intel_uc.o \ + gt/uc/intel_uc_debugfs.o \ + gt/uc/intel_uc_fw.o # graphics system controller (GSC) support -i915-y += gt/intel_gsc.o +i915-y += \ + gt/intel_gsc.o # graphics hardware monitoring (HWMON) support -i915-$(CONFIG_HWMON) += i915_hwmon.o +i915-$(CONFIG_HWMON) += \ + i915_hwmon.o # modesetting core code i915-y += \ display/hsw_ips.o \ + display/i9xx_plane.o \ + display/i9xx_wm.o \ display/intel_atomic.o \ display/intel_atomic_plane.o \ display/intel_audio.o \ @@ -257,6 +263,7 @@ i915-y += \ display/intel_display.o \ display/intel_display_driver.o \ display/intel_display_irq.o \ + display/intel_display_params.o \ display/intel_display_power.o \ display/intel_display_power_map.o \ display/intel_display_power_well.o \ @@ -268,9 +275,12 @@ i915-y += \ display/intel_dpll.o \ display/intel_dpll_mgr.o \ display/intel_dpt.o \ + display/intel_dpt_common.o \ display/intel_drrs.o \ display/intel_dsb.o \ + display/intel_dsb_buffer.o \ display/intel_fb.o \ + display/intel_fb_bo.o \ display/intel_fb_pin.o \ display/intel_fbc.o \ display/intel_fdi.o \ @@ -287,8 +297,8 @@ i915-y += \ display/intel_load_detect.o \ display/intel_lpe_audio.o \ display/intel_modeset_lock.o \ - display/intel_modeset_verify.o \ display/intel_modeset_setup.o \ + display/intel_modeset_verify.o \ display/intel_overlay.o \ display/intel_pch_display.o \ display/intel_pch_refclk.o \ @@ -302,8 +312,6 @@ i915-y += \ display/intel_vblank.o \ display/intel_vga.o \ display/intel_wm.o \ - display/i9xx_plane.o \ - display/i9xx_wm.o \ display/skl_scaler.o \ display/skl_universal_plane.o \ display/skl_watermark.o @@ -311,7 +319,12 @@ i915-$(CONFIG_ACPI) += \ display/intel_acpi.o \ display/intel_opregion.o i915-$(CONFIG_DRM_FBDEV_EMULATION) += \ - display/intel_fbdev.o + display/intel_fbdev.o \ + display/intel_fbdev_fb.o +i915-$(CONFIG_DEBUG_FS) += \ + display/intel_display_debugfs.o \ + display/intel_display_debugfs_params.o \ + display/intel_pipe_crc.o # modesetting output/encoder code i915-y += \ @@ -357,13 +370,14 @@ i915-y += \ display/vlv_dsi.o \ display/vlv_dsi_pll.o -i915-y += i915_perf.o +i915-y += \ + i915_perf.o # Protected execution platform (PXP) support. Base support is required for HuC i915-y += \ pxp/intel_pxp.o \ - pxp/intel_pxp_tee.o \ - pxp/intel_pxp_huc.o + pxp/intel_pxp_huc.o \ + pxp/intel_pxp_tee.o i915-$(CONFIG_DRM_I915_PXP) += \ pxp/intel_pxp_cmd.o \ @@ -374,11 +388,11 @@ i915-$(CONFIG_DRM_I915_PXP) += \ pxp/intel_pxp_session.o # Post-mortem debug and GPU hang state capture -i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o +i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += \ + i915_gpu_error.o i915-$(CONFIG_DRM_I915_SELFTEST) += \ gem/selftests/i915_gem_client_blt.o \ gem/selftests/igt_gem_utils.o \ - selftests/intel_scheduler_helpers.o \ selftests/i915_random.o \ selftests/i915_selftest.o \ selftests/igt_atomic.o \ @@ -387,10 +401,12 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \ selftests/igt_mmap.o \ selftests/igt_reset.o \ selftests/igt_spinner.o \ + selftests/intel_scheduler_helpers.o \ selftests/librapl.o # virtual gpu code -i915-y += i915_vgpu.o +i915-y += \ + i915_vgpu.o i915-$(CONFIG_DRM_I915_GVT) += \ intel_gvt.o \ diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index e8ee0a0894..06ec04e667 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -432,7 +432,7 @@ intel_dp_link_down(struct intel_encoder *encoder, intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); intel_de_posting_read(dev_priv, intel_dp->output_reg); - intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); + intel_dp->DP &= ~DP_PORT_EN; intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); intel_de_posting_read(dev_priv, intel_dp->output_reg); @@ -475,6 +475,40 @@ intel_dp_link_down(struct intel_encoder *encoder, } } +static void g4x_dp_audio_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + if (!crtc_state->has_audio) + return; + + /* Enable audio presence detect */ + intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; + intel_de_write(i915, intel_dp->output_reg, intel_dp->DP); + + intel_audio_codec_enable(encoder, crtc_state, conn_state); +} + +static void g4x_dp_audio_disable(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + if (!old_crtc_state->has_audio) + return; + + intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); + + /* Disable audio presence detect */ + intel_dp->DP &= ~DP_AUDIO_OUTPUT_ENABLE; + intel_de_write(i915, intel_dp->output_reg, intel_dp->DP); +} + static void intel_disable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, @@ -484,8 +518,6 @@ static void intel_disable_dp(struct intel_atomic_state *state, intel_dp->link_trained = false; - intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); - /* * Make sure the panel is off before trying to change the mode. * But also ensure that we have vdd while we switch off the panel. @@ -631,8 +663,6 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp, * fail when the power sequencer is freshly used for this port. */ intel_dp->DP |= DP_PORT_EN; - if (crtc_state->has_audio) - intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); intel_de_posting_read(dev_priv, intel_dp->output_reg); @@ -686,7 +716,6 @@ static void g4x_enable_dp(struct intel_atomic_state *state, const struct drm_connector_state *conn_state) { intel_enable_dp(state, encoder, pipe_config, conn_state); - intel_audio_codec_enable(encoder, pipe_config, conn_state); intel_edp_backlight_on(pipe_config, conn_state); } @@ -695,7 +724,6 @@ static void vlv_enable_dp(struct intel_atomic_state *state, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - intel_audio_codec_enable(encoder, pipe_config, conn_state); intel_edp_backlight_on(pipe_config, conn_state); } @@ -1325,6 +1353,8 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv, intel_encoder->disable = g4x_disable_dp; intel_encoder->post_disable = g4x_post_disable_dp; } + intel_encoder->audio_enable = g4x_dp_audio_enable; + intel_encoder->audio_disable = g4x_dp_audio_disable; if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || (HAS_PCH_CPT(dev_priv) && port != PORT_A)) diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c index 45e044b4a8..8096492b3f 100644 --- a/drivers/gpu/drm/i915/display/g4x_hdmi.c +++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c @@ -228,25 +228,51 @@ static void g4x_hdmi_enable_port(struct intel_encoder *encoder, temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg); temp |= SDVO_ENABLE; - if (pipe_config->has_audio) - temp |= HDMI_AUDIO_ENABLE; intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); } +static void g4x_hdmi_audio_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder); + + if (!crtc_state->has_audio) + return; + + drm_WARN_ON(&i915->drm, !crtc_state->has_hdmi_sink); + + /* Enable audio presence detect */ + intel_de_rmw(i915, hdmi->hdmi_reg, 0, HDMI_AUDIO_ENABLE); + + intel_audio_codec_enable(encoder, crtc_state, conn_state); +} + +static void g4x_hdmi_audio_disable(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder); + + if (!old_crtc_state->has_audio) + return; + + intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); + + /* Disable audio presence detect */ + intel_de_rmw(i915, hdmi->hdmi_reg, HDMI_AUDIO_ENABLE, 0); +} + static void g4x_enable_hdmi(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - g4x_hdmi_enable_port(encoder, pipe_config); - - drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio && - !pipe_config->has_hdmi_sink); - intel_audio_codec_enable(encoder, pipe_config, conn_state); } static void ibx_enable_hdmi(struct intel_atomic_state *state, @@ -262,8 +288,6 @@ static void ibx_enable_hdmi(struct intel_atomic_state *state, temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg); temp |= SDVO_ENABLE; - if (pipe_config->has_audio) - temp |= HDMI_AUDIO_ENABLE; /* * HW workaround, need to write this twice for issue @@ -296,10 +320,6 @@ static void ibx_enable_hdmi(struct intel_atomic_state *state, intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); } - - drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio && - !pipe_config->has_hdmi_sink); - intel_audio_codec_enable(encoder, pipe_config, conn_state); } static void cpt_enable_hdmi(struct intel_atomic_state *state, @@ -317,8 +337,6 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state, temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg); temp |= SDVO_ENABLE; - if (pipe_config->has_audio) - temp |= HDMI_AUDIO_ENABLE; /* * WaEnableHDMI8bpcBefore12bpc:snb,ivb @@ -351,10 +369,6 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state, intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe), TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE, 0); } - - drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio && - !pipe_config->has_hdmi_sink); - intel_audio_codec_enable(encoder, pipe_config, conn_state); } static void vlv_enable_hdmi(struct intel_atomic_state *state, @@ -362,11 +376,6 @@ static void vlv_enable_hdmi(struct intel_atomic_state *state, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio && - !pipe_config->has_hdmi_sink); - intel_audio_codec_enable(encoder, pipe_config, conn_state); } static void intel_disable_hdmi(struct intel_atomic_state *state, @@ -384,7 +393,7 @@ static void intel_disable_hdmi(struct intel_atomic_state *state, temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg); - temp &= ~(SDVO_ENABLE | HDMI_AUDIO_ENABLE); + temp &= ~SDVO_ENABLE; intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); @@ -433,8 +442,6 @@ static void g4x_disable_hdmi(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); - intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state); } @@ -443,7 +450,6 @@ static void pch_disable_hdmi(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); } static void pch_post_disable_hdmi(struct intel_atomic_state *state, @@ -750,6 +756,8 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv, else intel_encoder->enable = g4x_enable_hdmi; } + intel_encoder->audio_enable = g4x_hdmi_audio_enable; + intel_encoder->audio_disable = g4x_hdmi_audio_disable; intel_encoder->shutdown = intel_hdmi_encoder_shutdown; intel_encoder->type = INTEL_OUTPUT_HDMI; diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c index 7dc38ac020..611a7d6ef8 100644 --- a/drivers/gpu/drm/i915/display/hsw_ips.c +++ b/drivers/gpu/drm/i915/display/hsw_ips.c @@ -193,7 +193,7 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) if (!hsw_crtc_supports_ips(crtc)) return false; - if (!i915->params.enable_ips) + if (!i915->display.params.enable_ips) return false; if (crtc_state->pipe_bpp > 24) @@ -329,7 +329,7 @@ static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused) wakeref = intel_runtime_pm_get(&i915->runtime_pm); seq_printf(m, "Enabled by kernel parameter: %s\n", - str_yes_no(i915->params.enable_ips)); + str_yes_no(i915->display.params.enable_ips)); if (DISPLAY_VER(i915) >= 8) { seq_puts(m, "Currently: unknown\n"); diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c index af0c79a4c9..11ca9572e8 100644 --- a/drivers/gpu/drm/i915/display/i9xx_wm.c +++ b/drivers/gpu/drm/i915/display/i9xx_wm.c @@ -608,7 +608,7 @@ static bool intel_crtc_active(struct intel_crtc *crtc) * crtc->state->active once we have proper CRTC states wired up * for atomic. */ - return crtc && crtc->active && crtc->base.primary->state->fb && + return crtc->active && crtc->base.primary->state->fb && crtc->config->hw.adjusted_mode.crtc_clock; } @@ -2477,7 +2477,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv, * FIFO size is only half of the self * refresh FIFO size on ILK/SNB. */ - if (DISPLAY_VER(dev_priv) <= 6) + if (DISPLAY_VER(dev_priv) < 7) fifo_size /= 2; } @@ -2818,7 +2818,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state, usable_level = dev_priv->display.wm.num_levels - 1; /* ILK/SNB: LP2+ watermarks only w/o sprites */ - if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled) + if (DISPLAY_VER(dev_priv) < 7 && pipe_wm->sprites_enabled) usable_level = 1; /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ @@ -2961,7 +2961,7 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv, int last_enabled_level = num_levels - 1; /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ - if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) && + if ((DISPLAY_VER(dev_priv) < 7 || IS_IVYBRIDGE(dev_priv)) && config->num_pipes_active > 1) last_enabled_level = 0; @@ -2993,7 +2993,7 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv, /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) && - dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) { + dev_priv->display.params.enable_fbc && !merged->fbc_wm_enabled) { for (level = 2; level < num_levels; level++) { struct intel_wm_level *wm = &merged->wm[level]; @@ -3060,7 +3060,7 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the * level is disabled. Doing otherwise could cause underruns. */ - if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) { + if (DISPLAY_VER(dev_priv) < 7 && r->spr_val) { drm_WARN_ON(&dev_priv->drm, wm_lp != 1); results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE; } diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 67143a0f51..ac456a2275 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -330,7 +330,7 @@ static int afe_clk(struct intel_encoder *encoder, int bpp; if (crtc_state->dsc.compression_enable) - bpp = crtc_state->dsc.compressed_bpp; + bpp = to_bpp_int(crtc_state->dsc.compressed_bpp_x16); else bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); @@ -860,7 +860,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, * compressed and non-compressed bpp. */ if (crtc_state->dsc.compression_enable) { - mul = crtc_state->dsc.compressed_bpp; + mul = to_bpp_int(crtc_state->dsc.compressed_bpp_x16); div = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); } @@ -884,7 +884,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, int bpp, line_time_us, byte_clk_period_ns; if (crtc_state->dsc.compression_enable) - bpp = crtc_state->dsc.compressed_bpp; + bpp = to_bpp_int(crtc_state->dsc.compressed_bpp_x16); else bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); @@ -1458,8 +1458,8 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder, struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; - if (pipe_config->dsc.compressed_bpp) { - int div = pipe_config->dsc.compressed_bpp; + if (pipe_config->dsc.compressed_bpp_x16) { + int div = to_bpp_int(pipe_config->dsc.compressed_bpp_x16); int mul = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); adjusted_mode->crtc_htotal = diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index 5d18145da2..ec0d5168b5 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -331,9 +331,6 @@ void intel_atomic_state_free(struct drm_atomic_state *_state) drm_atomic_state_default_release(&state->base); kfree(state->global_objs); - - i915_sw_fence_fini(&state->commit_ready); - kfree(state); } diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index b107435061..06c2455bdd 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -31,7 +31,10 @@ * prepare/check/commit/cleanup steps. */ +#include + #include +#include #include #include @@ -1012,6 +1015,41 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) return 0; } +static int add_dma_resv_fences(struct dma_resv *resv, + struct drm_plane_state *new_plane_state) +{ + struct dma_fence *fence = dma_fence_get(new_plane_state->fence); + struct dma_fence *new; + int ret; + + ret = dma_resv_get_singleton(resv, dma_resv_usage_rw(false), &new); + if (ret) + goto error; + + if (new && fence) { + struct dma_fence_chain *chain = dma_fence_chain_alloc(); + + if (!chain) { + ret = -ENOMEM; + goto error; + } + + dma_fence_chain_init(chain, fence, new, 1); + fence = &chain->base; + + } else if (new) { + fence = new; + } + + dma_fence_put(new_plane_state->fence); + new_plane_state->fence = fence; + return 0; + +error: + dma_fence_put(fence); + return ret; +} + /** * intel_prepare_plane_fb - Prepare fb for usage on plane * @_plane: drm plane to prepare for @@ -1035,7 +1073,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane, struct intel_atomic_state *state = to_intel_atomic_state(new_plane_state->uapi.state); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct intel_plane_state *old_plane_state = + struct intel_plane_state *old_plane_state = intel_atomic_get_old_plane_state(state, plane); struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb); struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb); @@ -1058,55 +1096,28 @@ intel_prepare_plane_fb(struct drm_plane *_plane, * can safely continue. */ if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) { - ret = i915_sw_fence_await_reservation(&state->commit_ready, - old_obj->base.resv, - false, 0, - GFP_KERNEL); + ret = add_dma_resv_fences(intel_bo_to_drm_bo(old_obj)->resv, + &new_plane_state->uapi); if (ret < 0) return ret; } } - if (new_plane_state->uapi.fence) { /* explicit fencing */ - i915_gem_fence_wait_priority(new_plane_state->uapi.fence, - &attr); - ret = i915_sw_fence_await_dma_fence(&state->commit_ready, - new_plane_state->uapi.fence, - i915_fence_timeout(dev_priv), - GFP_KERNEL); - if (ret < 0) - return ret; - } - if (!obj) return 0; - ret = intel_plane_pin_fb(new_plane_state); if (ret) return ret; - i915_gem_object_wait_priority(obj, 0, &attr); + ret = drm_gem_plane_helper_prepare_fb(&plane->base, &new_plane_state->uapi); + if (ret < 0) + goto unpin_fb; - if (!new_plane_state->uapi.fence) { /* implicit fencing */ - struct dma_resv_iter cursor; - struct dma_fence *fence; - - ret = i915_sw_fence_await_reservation(&state->commit_ready, - obj->base.resv, false, - i915_fence_timeout(dev_priv), - GFP_KERNEL); - if (ret < 0) - goto unpin_fb; + if (new_plane_state->uapi.fence) { + i915_gem_fence_wait_priority(new_plane_state->uapi.fence, + &attr); - dma_resv_iter_begin(&cursor, obj->base.resv, - DMA_RESV_USAGE_WRITE); - dma_resv_for_each_fence_unlocked(&cursor, fence) { - intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc, - fence); - } - dma_resv_iter_end(&cursor); - } else { intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc, new_plane_state->uapi.fence); } diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 19605264a3..07e0c73204 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -25,6 +25,7 @@ #include #include +#include #include #include "i915_drv.h" @@ -521,25 +522,25 @@ static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder, unsigned int link_clks_available, link_clks_required; unsigned int tu_data, tu_line, link_clks_active; unsigned int h_active, h_total, hblank_delta, pixel_clk; - unsigned int fec_coeff, cdclk, vdsc_bpp; + unsigned int fec_coeff, cdclk, vdsc_bppx16; unsigned int link_clk, lanes; unsigned int hblank_rise; h_active = crtc_state->hw.adjusted_mode.crtc_hdisplay; h_total = crtc_state->hw.adjusted_mode.crtc_htotal; pixel_clk = crtc_state->hw.adjusted_mode.crtc_clock; - vdsc_bpp = crtc_state->dsc.compressed_bpp; + vdsc_bppx16 = crtc_state->dsc.compressed_bpp_x16; cdclk = i915->display.cdclk.hw.cdclk; /* fec= 0.972261, using rounding multiplier of 1000000 */ fec_coeff = 972261; link_clk = crtc_state->port_clock; lanes = crtc_state->lane_count; - drm_dbg_kms(&i915->drm, "h_active = %u link_clk = %u :" - "lanes = %u vdsc_bpp = %u cdclk = %u\n", - h_active, link_clk, lanes, vdsc_bpp, cdclk); + drm_dbg_kms(&i915->drm, + "h_active = %u link_clk = %u : lanes = %u vdsc_bpp = " BPP_X16_FMT " cdclk = %u\n", + h_active, link_clk, lanes, BPP_X16_ARGS(vdsc_bppx16), cdclk); - if (WARN_ON(!link_clk || !pixel_clk || !lanes || !vdsc_bpp || !cdclk)) + if (WARN_ON(!link_clk || !pixel_clk || !lanes || !vdsc_bppx16 || !cdclk)) return 0; link_clks_available = (h_total - h_active) * link_clk / pixel_clk - 28; @@ -551,8 +552,8 @@ static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder, hblank_delta = DIV64_U64_ROUND_UP(mul_u32_u32(5 * (link_clk + cdclk), pixel_clk), mul_u32_u32(link_clk, cdclk)); - tu_data = div64_u64(mul_u32_u32(pixel_clk * vdsc_bpp * 8, 1000000), - mul_u32_u32(link_clk * lanes, fec_coeff)); + tu_data = div64_u64(mul_u32_u32(pixel_clk * vdsc_bppx16 * 8, 1000000), + mul_u32_u32(link_clk * lanes * 16, fec_coeff)); tu_line = div64_u64(h_active * mul_u32_u32(link_clk, fec_coeff), mul_u32_u32(64 * pixel_clk, 1000000)); link_clks_active = (tu_line - 1) * 64 + tu_data; diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c index 2e8f17c045..3f3cd944a1 100644 --- a/drivers/gpu/drm/i915/display/intel_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_backlight.c @@ -88,10 +88,10 @@ u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val) drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0); - if (i915->params.invert_brightness < 0) + if (i915->display.params.invert_brightness < 0) return val; - if (i915->params.invert_brightness > 0 || + if (i915->display.params.invert_brightness > 0 || intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)) { return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min; } @@ -132,8 +132,9 @@ u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val) drm_WARN_ON_ONCE(&i915->drm, panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0); - if (i915->params.invert_brightness > 0 || - (i915->params.invert_brightness == 0 && intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS))) + if (i915->display.params.invert_brightness > 0 || + (i915->display.params.invert_brightness == 0 && + intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS))) val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min); return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max, @@ -274,7 +275,7 @@ static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state, struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel; pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100); - pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); + pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state); } static void @@ -427,7 +428,7 @@ static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn intel_backlight_set_pwm_level(old_conn_state, level); panel->backlight.pwm_state.enabled = false; - pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); + pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state); } void intel_backlight_disable(const struct drm_connector_state *old_conn_state) @@ -749,7 +750,7 @@ static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state, pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100); panel->backlight.pwm_state.enabled = true; - pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); + pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state); } static void __intel_backlight_enable(const struct intel_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index e7ea287845..eb9835d48d 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -1116,7 +1116,7 @@ parse_sdvo_panel_data(struct drm_i915_private *i915, struct drm_display_mode *panel_fixed_mode; int index; - index = i915->params.vbt_sdvo_panel_type; + index = i915->display.params.vbt_sdvo_panel_type; if (index == -2) { drm_dbg_kms(&i915->drm, "Ignore SDVO panel mode from BIOS VBT tables.\n"); @@ -1514,9 +1514,9 @@ parse_edp(struct drm_i915_private *i915, u8 vswing; /* Don't read from VBT if module parameter has valid value*/ - if (i915->params.edp_vswing) { + if (i915->display.params.edp_vswing) { panel->vbt.edp.low_vswing = - i915->params.edp_vswing == 1; + i915->display.params.edp_vswing == 1; } else { vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF; panel->vbt.edp.low_vswing = vswing == 0; @@ -2232,6 +2232,9 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin) const u8 *ddc_pin_map; int i, n_entries; + if (IS_DGFX(i915)) + return vbt_pin; + if (INTEL_PCH_TYPE(i915) >= PCH_LNL || HAS_PCH_MTP(i915) || IS_ALDERLAKE_P(i915)) { ddc_pin_map = adlp_ddc_pin_map; @@ -2239,8 +2242,6 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin) } else if (IS_ALDERLAKE_S(i915)) { ddc_pin_map = adls_ddc_pin_map; n_entries = ARRAY_SIZE(adls_ddc_pin_map); - } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) { - return vbt_pin; } else if (IS_ROCKETLAKE(i915) && INTEL_PCH_TYPE(i915) == PCH_TGP) { ddc_pin_map = rkl_pch_tgp_ddc_pin_map; n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map); @@ -2504,6 +2505,27 @@ static void sanitize_device_type(struct intel_bios_encoder_data *devdata, devdata->child.device_type |= DEVICE_TYPE_NOT_HDMI_OUTPUT; } +static void sanitize_hdmi_level_shift(struct intel_bios_encoder_data *devdata, + enum port port) +{ + struct drm_i915_private *i915 = devdata->i915; + + if (!intel_bios_encoder_supports_dvi(devdata)) + return; + + /* + * Some BDW machines (eg. HP Pavilion 15-ab) shipped + * with a HSW VBT where the level shifter value goes + * up to 11, whereas the BDW max is 9. + */ + if (IS_BROADWELL(i915) && devdata->child.hdmi_level_shifter_value > 9) { + drm_dbg_kms(&i915->drm, "Bogus port %c VBT HDMI level shift %d, adjusting to %d\n", + port_name(port), devdata->child.hdmi_level_shifter_value, 9); + + devdata->child.hdmi_level_shifter_value = 9; + } +} + static bool intel_bios_encoder_supports_crt(const struct intel_bios_encoder_data *devdata) { @@ -2683,6 +2705,7 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata) } sanitize_device_type(devdata, port); + sanitize_hdmi_level_shift(devdata, port); } static bool has_ddi_port_info(struct drm_i915_private *i915) @@ -3426,8 +3449,8 @@ static void fill_dsc(struct intel_crtc_state *crtc_state, crtc_state->pipe_bpp = bpc * 3; - crtc_state->dsc.compressed_bpp = min(crtc_state->pipe_bpp, - VBT_DSC_MAX_BPP(dsc->max_bpp)); + crtc_state->dsc.compressed_bpp_x16 = to_bpp_x16(min(crtc_state->pipe_bpp, + VBT_DSC_MAX_BPP(dsc->max_bpp))); /* * FIXME: This is ugly, and slice count should take DSC engine @@ -3486,8 +3509,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder, if (!devdata->dsc) return false; - if (crtc_state) - fill_dsc(crtc_state, devdata->dsc, dsc_max_bpc); + fill_dsc(crtc_state, devdata->dsc, dsc_max_bpc); return true; } diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index bef96db62c..7f2a50b4f4 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -87,7 +87,8 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv, return ret; dclk = val & 0xffff; - sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) > 11 ? 500 : 0), 1000); + sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) >= 12 ? 500 : 0), + 1000); sp->t_rp = (val & 0xff0000) >> 16; sp->t_rcd = (val & 0xff000000) >> 24; @@ -480,7 +481,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel if (num_channels < qi.max_numchannels && DISPLAY_VER(dev_priv) >= 12) qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1); - if (DISPLAY_VER(dev_priv) > 11 && num_channels > qi.max_numchannels) + if (DISPLAY_VER(dev_priv) >= 12 && num_channels > qi.max_numchannels) drm_warn(&dev_priv->drm, "Number of channels exceeds max number of channels."); if (qi.max_numchannels != 0) num_channels = min_t(u8, num_channels, qi.max_numchannels); @@ -897,7 +898,7 @@ static int icl_find_qgv_points(struct drm_i915_private *i915, unsigned int idx; unsigned int max_data_rate; - if (DISPLAY_VER(i915) > 11) + if (DISPLAY_VER(i915) >= 12) idx = tgl_max_bw_index(i915, num_active_planes, i); else idx = icl_max_bw_index(i915, num_active_planes, i); diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index c4839c67cb..7ba30d26f6 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1180,7 +1180,7 @@ sanitize: /* force cdclk programming */ dev_priv->display.cdclk.hw.cdclk = 0; /* force full PLL disable + enable */ - dev_priv->display.cdclk.hw.vco = -1; + dev_priv->display.cdclk.hw.vco = ~0; } static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv) @@ -1446,50 +1446,77 @@ static u8 bxt_calc_voltage_level(int cdclk) return DIV_ROUND_UP(cdclk, 25000); } +static u8 calc_voltage_level(int cdclk, int num_voltage_levels, + const int voltage_level_max_cdclk[]) +{ + int voltage_level; + + for (voltage_level = 0; voltage_level < num_voltage_levels; voltage_level++) { + if (cdclk <= voltage_level_max_cdclk[voltage_level]) + return voltage_level; + } + + MISSING_CASE(cdclk); + return num_voltage_levels - 1; +} + static u8 icl_calc_voltage_level(int cdclk) { - if (cdclk > 556800) - return 2; - else if (cdclk > 312000) - return 1; - else - return 0; + static const int icl_voltage_level_max_cdclk[] = { + [0] = 312000, + [1] = 556800, + [2] = 652800, + }; + + return calc_voltage_level(cdclk, + ARRAY_SIZE(icl_voltage_level_max_cdclk), + icl_voltage_level_max_cdclk); } static u8 ehl_calc_voltage_level(int cdclk) { - if (cdclk > 326400) - return 3; - else if (cdclk > 312000) - return 2; - else if (cdclk > 180000) - return 1; - else - return 0; + static const int ehl_voltage_level_max_cdclk[] = { + [0] = 180000, + [1] = 312000, + [2] = 326400, + /* + * Bspec lists the limit as 556.8 MHz, but some JSL + * development boards (at least) boot with 652.8 MHz + */ + [3] = 652800, + }; + + return calc_voltage_level(cdclk, + ARRAY_SIZE(ehl_voltage_level_max_cdclk), + ehl_voltage_level_max_cdclk); } static u8 tgl_calc_voltage_level(int cdclk) { - if (cdclk > 556800) - return 3; - else if (cdclk > 326400) - return 2; - else if (cdclk > 312000) - return 1; - else - return 0; + static const int tgl_voltage_level_max_cdclk[] = { + [0] = 312000, + [1] = 326400, + [2] = 556800, + [3] = 652800, + }; + + return calc_voltage_level(cdclk, + ARRAY_SIZE(tgl_voltage_level_max_cdclk), + tgl_voltage_level_max_cdclk); } static u8 rplu_calc_voltage_level(int cdclk) { - if (cdclk > 556800) - return 3; - else if (cdclk > 480000) - return 2; - else if (cdclk > 312000) - return 1; - else - return 0; + static const int rplu_voltage_level_max_cdclk[] = { + [0] = 312000, + [1] = 480000, + [2] = 556800, + [3] = 652800, + }; + + return calc_voltage_level(cdclk, + ARRAY_SIZE(rplu_voltage_level_max_cdclk), + rplu_voltage_level_max_cdclk); } static void icl_readout_refclk(struct drm_i915_private *dev_priv, @@ -1800,6 +1827,8 @@ static bool cdclk_pll_is_unknown(unsigned int vco) return vco == ~0; } +static const int cdclk_squash_len = 16; + static int cdclk_squash_divider(u16 waveform) { return hweight16(waveform ?: 0xffff); @@ -1811,7 +1840,6 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91 struct intel_cdclk_config *mid_cdclk_config) { u16 old_waveform, new_waveform, mid_waveform; - int size = 16; int div = 2; /* Return if PLL is in an unknown state, force a complete disable and re-enable. */ @@ -1850,7 +1878,8 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91 } mid_cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_squash_divider(mid_waveform) * - mid_cdclk_config->vco, size * div); + mid_cdclk_config->vco, + cdclk_squash_len * div); /* make sure the mid clock came out sane */ @@ -1878,9 +1907,9 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv, { int cdclk = cdclk_config->cdclk; int vco = cdclk_config->vco; - u32 val; + int unsquashed_cdclk; u16 waveform; - int clock; + u32 val; if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0 && !cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) { @@ -1897,15 +1926,13 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv, waveform = cdclk_squash_waveform(dev_priv, cdclk); - if (waveform) - clock = vco / 2; - else - clock = cdclk; + unsquashed_cdclk = DIV_ROUND_CLOSEST(cdclk * cdclk_squash_len, + cdclk_squash_divider(waveform)); if (HAS_CDCLK_SQUASH(dev_priv)) dg2_cdclk_squash_program(dev_priv, waveform); - val = bxt_cdclk_cd2x_div_sel(dev_priv, clock, vco) | + val = bxt_cdclk_cd2x_div_sel(dev_priv, unsquashed_cdclk, vco) | bxt_cdclk_cd2x_pipe(dev_priv, pipe); /* @@ -2075,7 +2102,7 @@ sanitize: dev_priv->display.cdclk.hw.cdclk = 0; /* force full PLL disable + enable */ - dev_priv->display.cdclk.hw.vco = -1; + dev_priv->display.cdclk.hw.vco = ~0; } static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv) @@ -2485,7 +2512,8 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state) intel_atomic_get_old_cdclk_state(state); const struct intel_cdclk_state *new_cdclk_state = intel_atomic_get_new_cdclk_state(state); - enum pipe pipe = new_cdclk_state->pipe; + struct intel_cdclk_config cdclk_config; + enum pipe pipe; if (!intel_cdclk_changed(&old_cdclk_state->actual, &new_cdclk_state->actual)) @@ -2494,12 +2522,25 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state) if (IS_DG2(i915)) intel_cdclk_pcode_pre_notify(state); - if (pipe == INVALID_PIPE || - old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) { - drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed); + if (new_cdclk_state->disable_pipes) { + cdclk_config = new_cdclk_state->actual; + pipe = INVALID_PIPE; + } else { + if (new_cdclk_state->actual.cdclk >= old_cdclk_state->actual.cdclk) { + cdclk_config = new_cdclk_state->actual; + pipe = new_cdclk_state->pipe; + } else { + cdclk_config = old_cdclk_state->actual; + pipe = INVALID_PIPE; + } - intel_set_cdclk(i915, &new_cdclk_state->actual, pipe); + cdclk_config.voltage_level = max(new_cdclk_state->actual.voltage_level, + old_cdclk_state->actual.voltage_level); } + + drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed); + + intel_set_cdclk(i915, &cdclk_config, pipe); } /** @@ -2517,7 +2558,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state) intel_atomic_get_old_cdclk_state(state); const struct intel_cdclk_state *new_cdclk_state = intel_atomic_get_new_cdclk_state(state); - enum pipe pipe = new_cdclk_state->pipe; + enum pipe pipe; if (!intel_cdclk_changed(&old_cdclk_state->actual, &new_cdclk_state->actual)) @@ -2526,12 +2567,15 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state) if (IS_DG2(i915)) intel_cdclk_pcode_post_notify(state); - if (pipe != INVALID_PIPE && - old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) { - drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed); + if (!new_cdclk_state->disable_pipes && + new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk) + pipe = new_cdclk_state->pipe; + else + pipe = INVALID_PIPE; - intel_set_cdclk(i915, &new_cdclk_state->actual, pipe); - } + drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed); + + intel_set_cdclk(i915, &new_cdclk_state->actual, pipe); } static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state) @@ -2597,9 +2641,10 @@ static int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state) * Since PPC = 2 with bigjoiner * => CDCLK >= compressed_bpp * Pixel clock / 2 * Bigjoiner Interface bits */ - int bigjoiner_interface_bits = DISPLAY_VER(i915) > 13 ? 36 : 24; - int min_cdclk_bj = (crtc_state->dsc.compressed_bpp * pixel_clock) / - (2 * bigjoiner_interface_bits); + int bigjoiner_interface_bits = DISPLAY_VER(i915) >= 14 ? 36 : 24; + int min_cdclk_bj = + (to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) * + pixel_clock) / (2 * bigjoiner_interface_bits); min_cdclk = max(min_cdclk, min_cdclk_bj); } @@ -3008,6 +3053,7 @@ static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_globa return NULL; cdclk_state->pipe = INVALID_PIPE; + cdclk_state->disable_pipes = false; return &cdclk_state->base; } @@ -3186,6 +3232,8 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) if (ret) return ret; + new_cdclk_state->disable_pipes = true; + drm_dbg_kms(&dev_priv->drm, "Modeset required for cdclk change\n"); } @@ -3488,7 +3536,7 @@ static const struct intel_cdclk_funcs mtl_cdclk_funcs = { .get_cdclk = bxt_get_cdclk, .set_cdclk = bxt_set_cdclk, .modeset_calc_cdclk = bxt_modeset_calc_cdclk, - .calc_voltage_level = tgl_calc_voltage_level, + .calc_voltage_level = rplu_calc_voltage_level, }; static const struct intel_cdclk_funcs rplu_cdclk_funcs = { diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h index 48fd7d39e0..71bc032bfe 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.h +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h @@ -51,6 +51,9 @@ struct intel_cdclk_state { /* bitmask of active pipes */ u8 active_pipes; + + /* update cdclk with pipes disabled */ + bool disable_pipes; }; int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 1d26be54dd..c5092b7e87 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -785,14 +785,12 @@ static void chv_assign_csc(struct intel_crtc_state *crtc_state) /* convert hw value with given bit_precision to lut property val */ static u32 intel_color_lut_pack(u32 val, int bit_precision) { - u32 max = 0xffff >> (16 - bit_precision); - - val = clamp_val(val, 0, max); - - if (bit_precision < 16) - val <<= 16 - bit_precision; - - return val; + if (bit_precision > 16) + return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(val, (1 << 16) - 1), + (1 << bit_precision) - 1); + else + return DIV_ROUND_CLOSEST(val * ((1 << 16) - 1), + (1 << bit_precision) - 1); } static u32 i9xx_lut_8(const struct drm_color_lut *color) @@ -911,7 +909,7 @@ static void i965_lut_10p6_pack(struct drm_color_lut *entry, u32 ldw, u32 udw) static u16 i965_lut_11p6_max_pack(u32 val) { /* PIPEGCMAX is 11.6, clamp to 10.6 */ - return clamp_val(val, 0, 0xffff); + return min(val, 0xffffu); } static u32 ilk_lut_10(const struct drm_color_lut *color) @@ -1528,14 +1526,27 @@ static int glk_degamma_lut_size(struct drm_i915_private *i915) return 35; } -/* - * change_lut_val_precision: helper function to upscale or downscale lut values. - * Parameters 'to' and 'from' needs to be less than 32. This should be sufficient - * as currently there are no lut values exceeding 32 bit. - */ -static u32 change_lut_val_precision(u32 lut_val, int to, int from) +static u32 glk_degamma_lut(const struct drm_color_lut *color) +{ + return color->green; +} + +static void glk_degamma_lut_pack(struct drm_color_lut *entry, u32 val) +{ + /* PRE_CSC_GAMC_DATA is 3.16, clamp to 0.16 */ + entry->red = entry->green = entry->blue = min(val, 0xffffu); +} + +static u32 mtl_degamma_lut(const struct drm_color_lut *color) +{ + return drm_color_lut_extract(color->green, 24); +} + +static void mtl_degamma_lut_pack(struct drm_color_lut *entry, u32 val) { - return mul_u32_u32(lut_val, (1 << to)) / (1 << from); + /* PRE_CSC_GAMC_DATA is 3.24, clamp to 0.16 */ + entry->red = entry->green = entry->blue = + intel_color_lut_pack(min(val, 0xffffffu), 24); } static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state, @@ -1572,20 +1583,16 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state, * ToDo: Extend to max 7.0. Enable 32 bit input value * as compared to just 16 to achieve this. */ - u32 lut_val; - - if (DISPLAY_VER(i915) >= 14) - lut_val = change_lut_val_precision(lut[i].green, 24, 16); - else - lut_val = lut[i].green; - ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe), - lut_val); + DISPLAY_VER(i915) >= 14 ? + mtl_degamma_lut(&lut[i]) : glk_degamma_lut(&lut[i])); } /* Clamp values > 1.0. */ while (i++ < glk_degamma_lut_size(i915)) - ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe), 1 << 16); + ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe), + DISPLAY_VER(i915) >= 14 ? + 1 << 24 : 1 << 16); ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe), 0); } @@ -3572,17 +3579,10 @@ static struct drm_property_blob *glk_read_degamma_lut(struct intel_crtc *crtc) for (i = 0; i < lut_size; i++) { u32 val = intel_de_read_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe)); - /* - * For MTL and beyond, convert back the 24 bit lut values - * read from HW to 16 bit values to maintain parity with - * userspace values - */ if (DISPLAY_VER(dev_priv) >= 14) - val = change_lut_val_precision(val, 16, 24); - - lut[i].red = val; - lut[i].green = val; - lut[i].blue = val; + mtl_degamma_lut_pack(&lut[i], val); + else + glk_degamma_lut_pack(&lut[i], val); } intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 6f6b348b8a..abaacea5c2 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -846,7 +846,7 @@ intel_crt_detect(struct drm_connector *connector, if (!intel_display_device_enabled(dev_priv)) return connector_status_disconnected; - if (dev_priv->params.load_detect_test) { + if (dev_priv->display.params.load_detect_test) { wakeref = intel_display_power_get(dev_priv, intel_encoder->power_domain); goto load_detect; @@ -906,7 +906,7 @@ load_detect: else if (DISPLAY_VER(dev_priv) < 4) status = intel_crt_load_detect(crt, to_intel_crtc(connector->state->crtc)->pipe); - else if (dev_priv->params.load_detect_test) + else if (dev_priv->display.params.load_detect_test) status = connector_status_disconnected; else status = connector_status_unknown; diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c index 1fd068e6e2..8a84a31c7b 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.c +++ b/drivers/gpu/drm/i915/display/intel_crtc.c @@ -553,8 +553,15 @@ void intel_pipe_update_start(struct intel_atomic_state *state, intel_psr_lock(new_crtc_state); - if (new_crtc_state->do_async_flip) + if (new_crtc_state->do_async_flip) { + spin_lock_irq(&crtc->base.dev->event_lock); + /* arm the event for the flip done irq handler */ + crtc->flip_done_event = new_crtc_state->uapi.event; + spin_unlock_irq(&crtc->base.dev->event_lock); + + new_crtc_state->uapi.event = NULL; return; + } if (intel_crtc_needs_vblank_work(new_crtc_state)) intel_crtc_vblank_work_init(new_crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c index 66fe880af8..49fd100ec9 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c +++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c @@ -4,6 +4,7 @@ */ #include +#include #include "i915_drv.h" #include "intel_crtc_state_dump.h" @@ -261,6 +262,15 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, drm_dbg_kms(&i915->drm, "fec: %s, enhanced framing: %s\n", str_enabled_disabled(pipe_config->fec_enable), str_enabled_disabled(pipe_config->enhanced_framing)); + + drm_dbg_kms(&i915->drm, "sdp split: %s\n", + str_enabled_disabled(pipe_config->sdp_split_enable)); + + drm_dbg_kms(&i915->drm, "psr: %s, psr2: %s, panel replay: %s, selective fetch: %s\n", + str_enabled_disabled(pipe_config->has_psr), + str_enabled_disabled(pipe_config->has_psr2), + str_enabled_disabled(pipe_config->has_panel_replay), + str_enabled_disabled(pipe_config->enable_psr2_sel_fetch)); } drm_dbg_kms(&i915->drm, "framestart delay: %d, MSA timing delay: %d\n", diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c index b342fad180..2dde7ac882 100644 --- a/drivers/gpu/drm/i915/display/intel_cursor.c +++ b/drivers/gpu/drm/i915/display/intel_cursor.c @@ -21,8 +21,11 @@ #include "intel_fb_pin.h" #include "intel_frontbuffer.h" #include "intel_psr.h" +#include "intel_psr_regs.h" #include "skl_watermark.h" +#include "gem/i915_gem_object.h" + /* Cursor formats */ static const u32 intel_cursor_formats[] = { DRM_FORMAT_ARGB8888, @@ -32,12 +35,10 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); - const struct drm_framebuffer *fb = plane_state->hw.fb; - const struct drm_i915_gem_object *obj = intel_fb_obj(fb); u32 base; if (DISPLAY_INFO(dev_priv)->cursor_needs_physical) - base = sg_dma_address(obj->mm.pages->sgl); + base = plane_state->phys_dma_addr; else base = intel_plane_ggtt_offset(plane_state); @@ -484,6 +485,35 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, return 0; } +static void i9xx_cursor_disable_sel_fetch_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + + if (!crtc_state->enable_psr2_sel_fetch) + return; + + intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0); +} + +static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + + if (!crtc_state->enable_psr2_sel_fetch) + return; + + if (drm_rect_height(&plane_state->psr2_sel_fetch_area) > 0) + intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id), + plane_state->ctl); + else + i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state); +} + /* TODO: split into noarm+arm pair */ static void i9xx_cursor_update_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, @@ -531,10 +561,10 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane, skl_write_cursor_wm(plane, crtc_state); if (plane_state) - intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state, - plane_state); + i9xx_cursor_update_sel_fetch_arm(plane, crtc_state, + plane_state); else - intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state); + i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state); if (plane->cursor.base != base || plane->cursor.size != fbc_ctl || diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c index ccf225afeb..6b25e19523 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c @@ -31,7 +31,7 @@ bool intel_is_c10phy(struct drm_i915_private *i915, enum phy phy) { - if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0) && phy < PHY_C) + if ((IS_LUNARLAKE(i915) || IS_METEORLAKE(i915)) && phy < PHY_C) return true; return false; @@ -206,6 +206,13 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port, intel_clear_response_ready_flag(i915, port, lane); + /* + * FIXME: Workaround to let HW to settle + * down and let the message bus to end up + * in a known state + */ + intel_cx0_bus_reset(i915, port, lane); + return REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val); } @@ -285,6 +292,13 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port, intel_clear_response_ready_flag(i915, port, lane); + /* + * FIXME: Workaround to let HW to settle + * down and let the message bus to end up + * in a known state + */ + intel_cx0_bus_reset(i915, port, lane); + return 0; } @@ -401,9 +415,15 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder, struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_ddi_buf_trans *trans; enum phy phy = intel_port_to_phy(i915, encoder->port); - u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder); + u8 owned_lane_mask; intel_wakeref_t wakeref; int n_entries, ln; + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + + if (intel_tc_port_in_tbt_alt_mode(dig_port)) + return; + + owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder); wakeref = intel_cx0_phy_transaction_begin(encoder); @@ -725,7 +745,6 @@ static const struct intel_c10pll_state * const mtl_c10_edp_tables[] = { /* C20 basic DP 1.4 tables */ static const struct intel_c20pll_state mtl_c20_dp_rbr = { - .link_bit_rate = 162000, .clock = 162000, .tx = { 0xbe88, /* tx cfg0 */ 0x5800, /* tx cfg1 */ @@ -751,7 +770,6 @@ static const struct intel_c20pll_state mtl_c20_dp_rbr = { }; static const struct intel_c20pll_state mtl_c20_dp_hbr1 = { - .link_bit_rate = 270000, .clock = 270000, .tx = { 0xbe88, /* tx cfg0 */ 0x4800, /* tx cfg1 */ @@ -777,7 +795,6 @@ static const struct intel_c20pll_state mtl_c20_dp_hbr1 = { }; static const struct intel_c20pll_state mtl_c20_dp_hbr2 = { - .link_bit_rate = 540000, .clock = 540000, .tx = { 0xbe88, /* tx cfg0 */ 0x4800, /* tx cfg1 */ @@ -803,7 +820,6 @@ static const struct intel_c20pll_state mtl_c20_dp_hbr2 = { }; static const struct intel_c20pll_state mtl_c20_dp_hbr3 = { - .link_bit_rate = 810000, .clock = 810000, .tx = { 0xbe88, /* tx cfg0 */ 0x4800, /* tx cfg1 */ @@ -830,8 +846,7 @@ static const struct intel_c20pll_state mtl_c20_dp_hbr3 = { /* C20 basic DP 2.0 tables */ static const struct intel_c20pll_state mtl_c20_dp_uhbr10 = { - .link_bit_rate = 1000000, /* 10 Gbps */ - .clock = 312500, + .clock = 1000000, /* 10 Gbps */ .tx = { 0xbe21, /* tx cfg0 */ 0x4800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ @@ -855,8 +870,7 @@ static const struct intel_c20pll_state mtl_c20_dp_uhbr10 = { }; static const struct intel_c20pll_state mtl_c20_dp_uhbr13_5 = { - .link_bit_rate = 1350000, /* 13.5 Gbps */ - .clock = 421875, + .clock = 1350000, /* 13.5 Gbps */ .tx = { 0xbea0, /* tx cfg0 */ 0x4800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ @@ -881,8 +895,7 @@ static const struct intel_c20pll_state mtl_c20_dp_uhbr13_5 = { }; static const struct intel_c20pll_state mtl_c20_dp_uhbr20 = { - .link_bit_rate = 2000000, /* 20 Gbps */ - .clock = 625000, + .clock = 2000000, /* 20 Gbps */ .tx = { 0xbe20, /* tx cfg0 */ 0x4800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ @@ -1501,7 +1514,6 @@ static const struct intel_c10pll_state * const mtl_c10_hdmi_tables[] = { }; static const struct intel_c20pll_state mtl_c20_hdmi_25_175 = { - .link_bit_rate = 25175, .clock = 25175, .tx = { 0xbe88, /* tx cfg0 */ 0x9800, /* tx cfg1 */ @@ -1527,7 +1539,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_25_175 = { }; static const struct intel_c20pll_state mtl_c20_hdmi_27_0 = { - .link_bit_rate = 27000, .clock = 27000, .tx = { 0xbe88, /* tx cfg0 */ 0x9800, /* tx cfg1 */ @@ -1553,7 +1564,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_27_0 = { }; static const struct intel_c20pll_state mtl_c20_hdmi_74_25 = { - .link_bit_rate = 74250, .clock = 74250, .tx = { 0xbe88, /* tx cfg0 */ 0x9800, /* tx cfg1 */ @@ -1579,7 +1589,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_74_25 = { }; static const struct intel_c20pll_state mtl_c20_hdmi_148_5 = { - .link_bit_rate = 148500, .clock = 148500, .tx = { 0xbe88, /* tx cfg0 */ 0x9800, /* tx cfg1 */ @@ -1605,7 +1614,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_148_5 = { }; static const struct intel_c20pll_state mtl_c20_hdmi_594 = { - .link_bit_rate = 594000, .clock = 594000, .tx = { 0xbe88, /* tx cfg0 */ 0x9800, /* tx cfg1 */ @@ -1631,8 +1639,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_594 = { }; static const struct intel_c20pll_state mtl_c20_hdmi_300 = { - .link_bit_rate = 3000000, - .clock = 166670, + .clock = 3000000, .tx = { 0xbe98, /* tx cfg0 */ 0x9800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ @@ -1657,8 +1664,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_300 = { }; static const struct intel_c20pll_state mtl_c20_hdmi_600 = { - .link_bit_rate = 6000000, - .clock = 333330, + .clock = 6000000, .tx = { 0xbe98, /* tx cfg0 */ 0x9800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ @@ -1683,8 +1689,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_600 = { }; static const struct intel_c20pll_state mtl_c20_hdmi_800 = { - .link_bit_rate = 8000000, - .clock = 444440, + .clock = 8000000, .tx = { 0xbe98, /* tx cfg0 */ 0x9800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ @@ -1709,8 +1714,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_800 = { }; static const struct intel_c20pll_state mtl_c20_hdmi_1000 = { - .link_bit_rate = 10000000, - .clock = 555560, + .clock = 10000000, .tx = { 0xbe98, /* tx cfg0 */ 0x9800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ @@ -1735,8 +1739,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_1000 = { }; static const struct intel_c20pll_state mtl_c20_hdmi_1200 = { - .link_bit_rate = 12000000, - .clock = 666670, + .clock = 12000000, .tx = { 0xbe98, /* tx cfg0 */ 0x9800, /* tx cfg1 */ 0x0000, /* tx cfg2 */ @@ -1850,8 +1853,8 @@ static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state, return -EINVAL; } -void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, - struct intel_c10pll_state *pll_state) +static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, + struct intel_c10pll_state *pll_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); u8 lane = INTEL_CX0_LANE0; @@ -1985,7 +1988,6 @@ static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_ else mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_0; - pll_state->link_bit_rate = pixel_clock; pll_state->clock = pixel_clock; pll_state->tx[0] = 0xbe88; pll_state->tx[1] = 0x9800; @@ -2022,7 +2024,7 @@ static int intel_c20_phy_check_hdmi_link_rate(int clock) int i; for (i = 0; tables[i]; i++) { - if (clock == tables[i]->link_bit_rate) + if (clock == tables[i]->clock) return MODE_OK; } @@ -2074,7 +2076,7 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state, return -EINVAL; for (i = 0; tables[i]; i++) { - if (crtc_state->port_clock == tables[i]->link_bit_rate) { + if (crtc_state->port_clock == tables[i]->clock) { crtc_state->cx0pll_state.c20 = *tables[i]; return 0; } @@ -2097,14 +2099,14 @@ int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state, static bool intel_c20_use_mplla(u32 clock) { /* 10G and 20G rates use MPLLA */ - if (clock == 312500 || clock == 625000) + if (clock == 1000000 || clock == 2000000) return true; return false; } -void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, - struct intel_c20pll_state *pll_state) +static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, + struct intel_c20pll_state *pll_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); bool cntx; @@ -2200,11 +2202,11 @@ static u8 intel_c20_get_dp_rate(u32 clock) return 6; case 432000: /* 4.32 Gbps eDP */ return 7; - case 312500: /* 10 Gbps DP2.0 */ + case 1000000: /* 10 Gbps DP2.0 */ return 8; - case 421875: /* 13.5 Gbps DP2.0 */ + case 1350000: /* 13.5 Gbps DP2.0 */ return 9; - case 625000: /* 20 Gbps DP2.0*/ + case 2000000: /* 20 Gbps DP2.0 */ return 10; case 648000: /* 6.48 Gbps eDP*/ return 11; @@ -2222,13 +2224,13 @@ static u8 intel_c20_get_hdmi_rate(u32 clock) return 0; switch (clock) { - case 166670: /* 3 Gbps */ - case 333330: /* 6 Gbps */ - case 666670: /* 12 Gbps */ + case 300000: /* 3 Gbps */ + case 600000: /* 6 Gbps */ + case 1200000: /* 12 Gbps */ return 1; - case 444440: /* 8 Gbps */ + case 800000: /* 8 Gbps */ return 2; - case 555560: /* 10 Gbps */ + case 1000000: /* 10 Gbps */ return 3; default: MISSING_CASE(clock); @@ -2239,7 +2241,7 @@ static u8 intel_c20_get_hdmi_rate(u32 clock) static bool is_dp2(u32 clock) { /* DP2.0 clock rates */ - if (clock == 312500 || clock == 421875 || clock == 625000) + if (clock == 1000000 || clock == 1350000 || clock == 2000000) return true; return false; @@ -2248,11 +2250,11 @@ static bool is_dp2(u32 clock) static bool is_hdmi_frl(u32 clock) { switch (clock) { - case 166670: /* 3 Gbps */ - case 333330: /* 6 Gbps */ - case 444440: /* 8 Gbps */ - case 555560: /* 10 Gbps */ - case 666670: /* 12 Gbps */ + case 300000: /* 3 Gbps */ + case 600000: /* 6 Gbps */ + case 800000: /* 8 Gbps */ + case 1000000: /* 10 Gbps */ + case 1200000: /* 12 Gbps */ return true; default: return false; @@ -2285,6 +2287,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915, const struct intel_c20pll_state *pll_state = &crtc_state->cx0pll_state.c20; bool dp = false; int lane = crtc_state->lane_count > 2 ? INTEL_CX0_BOTH_LANES : INTEL_CX0_LANE0; + u32 clock = crtc_state->port_clock; bool cntx; int i; @@ -2323,7 +2326,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915, } /* 3.3 mpllb or mplla configuration */ - if (intel_c20_use_mplla(pll_state->clock)) { + if (intel_c20_use_mplla(clock)) { for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) { if (cntx) intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, @@ -2350,23 +2353,23 @@ static void intel_c20_pll_program(struct drm_i915_private *i915, /* 4. Program custom width to match the link protocol */ intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_WIDTH, PHY_C20_CUSTOM_WIDTH_MASK, - PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(pll_state->clock, dp)), + PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(clock, dp)), MB_WRITE_COMMITTED); /* 5. For DP or 6. For HDMI */ if (dp) { intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE, BIT(6) | PHY_C20_CUSTOM_SERDES_MASK, - BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(pll_state->clock)), + BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(clock)), MB_WRITE_COMMITTED); } else { intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE, BIT(7) | PHY_C20_CUSTOM_SERDES_MASK, - is_hdmi_frl(pll_state->clock) ? BIT(7) : 0, + is_hdmi_frl(clock) ? BIT(7) : 0, MB_WRITE_COMMITTED); intel_cx0_write(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE, - intel_c20_get_hdmi_rate(pll_state->clock), + intel_c20_get_hdmi_rate(clock), MB_WRITE_COMMITTED); } @@ -2378,8 +2381,8 @@ static void intel_c20_pll_program(struct drm_i915_private *i915, BIT(0), cntx ? 0 : 1, MB_WRITE_COMMITTED); } -int intel_c10pll_calc_port_clock(struct intel_encoder *encoder, - const struct intel_c10pll_state *pll_state) +static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder, + const struct intel_c10pll_state *pll_state) { unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1; unsigned int multiplier, tx_clk_div, hdmi_div, refclk = 38400; @@ -2405,8 +2408,8 @@ int intel_c10pll_calc_port_clock(struct intel_encoder *encoder, return tmpclk; } -int intel_c20pll_calc_port_clock(struct intel_encoder *encoder, - const struct intel_c20pll_state *pll_state) +static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder, + const struct intel_c20pll_state *pll_state) { unsigned int frac, frac_en, frac_quot, frac_rem, frac_den; unsigned int multiplier, refclk = 38400; @@ -3004,17 +3007,115 @@ intel_mtl_port_pll_type(struct intel_encoder *encoder, return ICL_PORT_DPLL_DEFAULT; } -void intel_c10pll_state_verify(struct intel_atomic_state *state, +static void intel_c10pll_state_verify(const struct intel_crtc_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder, + struct intel_c10pll_state *mpllb_hw_state) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + const struct intel_c10pll_state *mpllb_sw_state = &state->cx0pll_state.c10; + int i; + + for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) { + u8 expected = mpllb_sw_state->pll[i]; + + I915_STATE_WARN(i915, mpllb_hw_state->pll[i] != expected, + "[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)", + crtc->base.base.id, crtc->base.name, i, + expected, mpllb_hw_state->pll[i]); + } + + I915_STATE_WARN(i915, mpllb_hw_state->tx != mpllb_sw_state->tx, + "[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)", + crtc->base.base.id, crtc->base.name, + mpllb_sw_state->tx, mpllb_hw_state->tx); + + I915_STATE_WARN(i915, mpllb_hw_state->cmn != mpllb_sw_state->cmn, + "[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)", + crtc->base.base.id, crtc->base.name, + mpllb_sw_state->cmn, mpllb_hw_state->cmn); +} + +void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder, + struct intel_cx0pll_state *pll_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); + + if (intel_is_c10phy(i915, phy)) + intel_c10pll_readout_hw_state(encoder, &pll_state->c10); + else + intel_c20pll_readout_hw_state(encoder, &pll_state->c20); +} + +int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder, + const struct intel_cx0pll_state *pll_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); + + if (intel_is_c10phy(i915, phy)) + return intel_c10pll_calc_port_clock(encoder, &pll_state->c10); + + return intel_c20pll_calc_port_clock(encoder, &pll_state->c20); +} + +static void intel_c20pll_state_verify(const struct intel_crtc_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder, + struct intel_c20pll_state *mpll_hw_state) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + const struct intel_c20pll_state *mpll_sw_state = &state->cx0pll_state.c20; + bool sw_use_mpllb = mpll_sw_state->tx[0] & C20_PHY_USE_MPLLB; + bool hw_use_mpllb = mpll_hw_state->tx[0] & C20_PHY_USE_MPLLB; + int i; + + I915_STATE_WARN(i915, sw_use_mpllb != hw_use_mpllb, + "[CRTC:%d:%s] mismatch in C20: Register MPLLB selection (expected %d, found %d)", + crtc->base.base.id, crtc->base.name, + sw_use_mpllb, hw_use_mpllb); + + if (hw_use_mpllb) { + for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mpllb); i++) { + I915_STATE_WARN(i915, mpll_hw_state->mpllb[i] != mpll_sw_state->mpllb[i], + "[CRTC:%d:%s] mismatch in C20MPLLB: Register[%d] (expected 0x%04x, found 0x%04x)", + crtc->base.base.id, crtc->base.name, i, + mpll_sw_state->mpllb[i], mpll_hw_state->mpllb[i]); + } + } else { + for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mplla); i++) { + I915_STATE_WARN(i915, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i], + "[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)", + crtc->base.base.id, crtc->base.name, i, + mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]); + } + } + + for (i = 0; i < ARRAY_SIZE(mpll_sw_state->tx); i++) { + I915_STATE_WARN(i915, mpll_hw_state->tx[i] != mpll_sw_state->tx[i], + "[CRTC:%d:%s] mismatch in C20: Register TX[%i] (expected 0x%04x, found 0x%04x)", + crtc->base.base.id, crtc->base.name, i, + mpll_sw_state->tx[i], mpll_hw_state->tx[i]); + } + + for (i = 0; i < ARRAY_SIZE(mpll_sw_state->cmn); i++) { + I915_STATE_WARN(i915, mpll_hw_state->cmn[i] != mpll_sw_state->cmn[i], + "[CRTC:%d:%s] mismatch in C20: Register CMN[%i] (expected 0x%04x, found 0x%04x)", + crtc->base.base.id, crtc->base.name, i, + mpll_sw_state->cmn[i], mpll_hw_state->cmn[i]); + } +} + +void intel_cx0pll_state_verify(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - struct intel_c10pll_state mpllb_hw_state = {}; - const struct intel_c10pll_state *mpllb_sw_state = &new_crtc_state->cx0pll_state.c10; struct intel_encoder *encoder; + struct intel_cx0pll_state mpll_hw_state = {}; enum phy phy; - int i; if (DISPLAY_VER(i915) < 14) return; @@ -3030,27 +3131,13 @@ void intel_c10pll_state_verify(struct intel_atomic_state *state, encoder = intel_get_crtc_new_encoder(state, new_crtc_state); phy = intel_port_to_phy(i915, encoder->port); - if (!intel_is_c10phy(i915, phy)) + if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder))) return; - intel_c10pll_readout_hw_state(encoder, &mpllb_hw_state); + intel_cx0pll_readout_hw_state(encoder, &mpll_hw_state); - for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) { - u8 expected = mpllb_sw_state->pll[i]; - - I915_STATE_WARN(i915, mpllb_hw_state.pll[i] != expected, - "[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)", - crtc->base.base.id, crtc->base.name, i, - expected, mpllb_hw_state.pll[i]); - } - - I915_STATE_WARN(i915, mpllb_hw_state.tx != mpllb_sw_state->tx, - "[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)", - crtc->base.base.id, crtc->base.name, - mpllb_sw_state->tx, mpllb_hw_state.tx); - - I915_STATE_WARN(i915, mpllb_hw_state.cmn != mpllb_sw_state->cmn, - "[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)", - crtc->base.base.id, crtc->base.name, - mpllb_sw_state->cmn, mpllb_hw_state.cmn); + if (intel_is_c10phy(i915, phy)) + intel_c10pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c10); + else + intel_c20pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c20); } diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h index 0e0a38dac8..c668267725 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h @@ -16,6 +16,7 @@ struct drm_i915_private; struct intel_atomic_state; struct intel_c10pll_state; struct intel_c20pll_state; +struct intel_cx0pll_state; struct intel_crtc; struct intel_crtc_state; struct intel_encoder; @@ -28,20 +29,19 @@ void intel_mtl_pll_disable(struct intel_encoder *encoder); enum icl_port_dpll_id intel_mtl_port_pll_type(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); -void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, struct intel_c10pll_state *pll_state); + int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder); +void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder, + struct intel_cx0pll_state *pll_state); +int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder, + const struct intel_cx0pll_state *pll_state); + void intel_c10pll_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_c10pll_state *hw_state); -int intel_c10pll_calc_port_clock(struct intel_encoder *encoder, - const struct intel_c10pll_state *pll_state); -void intel_c10pll_state_verify(struct intel_atomic_state *state, +void intel_cx0pll_state_verify(struct intel_atomic_state *state, struct intel_crtc *crtc); -void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, - struct intel_c20pll_state *pll_state); void intel_c20pll_dump_hw_state(struct drm_i915_private *i915, const struct intel_c20pll_state *hw_state); -int intel_c20pll_calc_port_clock(struct intel_encoder *encoder, - const struct intel_c20pll_state *pll_state); void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock); diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 9151d5add9..31aa5d54fd 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -25,6 +25,7 @@ * */ +#include #include #include @@ -2210,16 +2211,87 @@ static void intel_dp_sink_set_msa_timing_par_ignore_state(struct intel_dp *intel } static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) + const struct intel_crtc_state *crtc_state, + bool enable) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); if (!crtc_state->fec_enable) return; - if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0) - drm_dbg_kms(&i915->drm, - "Failed to set FEC_READY in the sink\n"); + if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, + enable ? DP_FEC_READY : 0) <= 0) + drm_dbg_kms(&i915->drm, "Failed to set FEC_READY to %s in the sink\n", + enable ? "enabled" : "disabled"); + + if (enable && + drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_STATUS, + DP_FEC_DECODE_EN_DETECTED | DP_FEC_DECODE_DIS_DETECTED) <= 0) + drm_dbg_kms(&i915->drm, "Failed to clear FEC detected flags\n"); +} + +static int read_fec_detected_status(struct drm_dp_aux *aux) +{ + int ret; + u8 status; + + ret = drm_dp_dpcd_readb(aux, DP_FEC_STATUS, &status); + if (ret < 0) + return ret; + + return status; +} + +static void wait_for_fec_detected(struct drm_dp_aux *aux, bool enabled) +{ + struct drm_i915_private *i915 = to_i915(aux->drm_dev); + int mask = enabled ? DP_FEC_DECODE_EN_DETECTED : DP_FEC_DECODE_DIS_DETECTED; + int status; + int err; + + err = readx_poll_timeout(read_fec_detected_status, aux, status, + status & mask || status < 0, + 10000, 200000); + + if (!err && status >= 0) + return; + + if (err == -ETIMEDOUT) + drm_dbg_kms(&i915->drm, "Timeout waiting for FEC %s to get detected\n", + str_enabled_disabled(enabled)); + else + drm_dbg_kms(&i915->drm, "FEC detected status read error: %d\n", status); +} + +void intel_ddi_wait_for_fec_status(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + bool enabled) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + int ret; + + if (!crtc_state->fec_enable) + return; + + if (enabled) + ret = intel_de_wait_for_set(i915, dp_tp_status_reg(encoder, crtc_state), + DP_TP_STATUS_FEC_ENABLE_LIVE, 1); + else + ret = intel_de_wait_for_clear(i915, dp_tp_status_reg(encoder, crtc_state), + DP_TP_STATUS_FEC_ENABLE_LIVE, 1); + + if (ret) + drm_err(&i915->drm, + "Timeout waiting for FEC live state to get %s\n", + str_enabled_disabled(enabled)); + + /* + * At least the Synoptics MST hub doesn't set the detected flag for + * FEC decoding disabling so skip waiting for that. + */ + if (enabled) + wait_for_fec_detected(&intel_dp->aux, enabled); } static void intel_ddi_enable_fec(struct intel_encoder *encoder, @@ -2234,8 +2306,8 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder, 0, DP_TP_CTL_FEC_ENABLE); } -static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +static void intel_ddi_disable_fec(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -2466,13 +2538,17 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state, intel_dp_set_power(intel_dp, DP_SET_POWER_D0); intel_dp_configure_protocol_converter(intel_dp, crtc_state); - intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true); + if (!is_mst) + intel_dp_sink_enable_decompression(state, + to_intel_connector(conn_state->connector), + crtc_state); + /* * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit * in the FEC_CONFIGURATION register to 1 before initiating link * training */ - intel_dp_sink_set_fec_ready(intel_dp, crtc_state); + intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true); intel_dp_check_frl_training(intel_dp); intel_dp_pcon_dsc_configure(intel_dp, crtc_state); @@ -2505,7 +2581,8 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state, /* 6.o Configure and enable FEC if needed */ intel_ddi_enable_fec(encoder, crtc_state); - intel_dsc_dp_pps_write(encoder, crtc_state); + if (!is_mst) + intel_dsc_dp_pps_write(encoder, crtc_state); } static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, @@ -2616,13 +2693,16 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, intel_dp_set_power(intel_dp, DP_SET_POWER_D0); intel_dp_configure_protocol_converter(intel_dp, crtc_state); - intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true); + if (!is_mst) + intel_dp_sink_enable_decompression(state, + to_intel_connector(conn_state->connector), + crtc_state); /* * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit * in the FEC_CONFIGURATION register to 1 before initiating link * training */ - intel_dp_sink_set_fec_ready(intel_dp, crtc_state); + intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true); intel_dp_check_frl_training(intel_dp); intel_dp_pcon_dsc_configure(intel_dp, crtc_state); @@ -2643,7 +2723,8 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, /* 7.l Configure and enable FEC if needed */ intel_ddi_enable_fec(encoder, crtc_state); - intel_dsc_dp_pps_write(encoder, crtc_state); + if (!is_mst) + intel_dsc_dp_pps_write(encoder, crtc_state); } static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, @@ -2695,9 +2776,11 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, if (!is_mst) intel_dp_set_power(intel_dp, DP_SET_POWER_D0); intel_dp_configure_protocol_converter(intel_dp, crtc_state); - intel_dp_sink_set_decompression_state(intel_dp, crtc_state, - true); - intel_dp_sink_set_fec_ready(intel_dp, crtc_state); + if (!is_mst) + intel_dp_sink_enable_decompression(state, + to_intel_connector(conn_state->connector), + crtc_state); + intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true); intel_dp_start_link_train(intel_dp, crtc_state); if ((port != PORT_A || DISPLAY_VER(dev_priv) >= 9) && !is_trans_port_sync_mode(crtc_state)) @@ -2705,10 +2788,10 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, intel_ddi_enable_fec(encoder, crtc_state); - if (!is_mst) + if (!is_mst) { intel_ddi_enable_transcoder_clock(encoder, crtc_state); - - intel_dsc_dp_pps_write(encoder, crtc_state); + intel_dsc_dp_pps_write(encoder, crtc_state); + } } static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, @@ -2717,10 +2800,15 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (HAS_DP20(dev_priv)) + if (HAS_DP20(dev_priv)) { intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder), crtc_state); + if (crtc_state->has_panel_replay) + drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, + DP_PANEL_REPLAY_ENABLE); + } if (DISPLAY_VER(dev_priv) >= 14) mtl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); @@ -2866,8 +2954,7 @@ static void disable_ddi_buf(struct intel_encoder *encoder, intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), DP_TP_CTL_ENABLE, 0); - /* Disable FEC in DP Sink */ - intel_ddi_disable_fec_state(encoder, crtc_state); + intel_ddi_disable_fec(encoder, crtc_state); if (wait) intel_wait_ddi_buf_idle(dev_priv, port); @@ -2882,10 +2969,12 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder, mtl_disable_ddi_buf(encoder, crtc_state); /* 3.f Disable DP_TP_CTL FEC Enable if it is needed */ - intel_ddi_disable_fec_state(encoder, crtc_state); + intel_ddi_disable_fec(encoder, crtc_state); } else { disable_ddi_buf(encoder, crtc_state); } + + intel_ddi_wait_for_fec_status(encoder, crtc_state, false); } static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, @@ -2925,6 +3014,8 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, intel_disable_ddi_buf(encoder, old_crtc_state); + intel_dp_sink_set_fec_ready(intel_dp, old_crtc_state, false); + /* * From TGL spec: "If single stream or multi-stream master transcoder: * Configure Transcoder Clock select to direct no clock to the @@ -3110,11 +3201,18 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state, if (!dig_port->lspcon.active || intel_dp_has_hdmi_sink(&dig_port->dp)) intel_dp_set_infoframes(encoder, true, crtc_state, conn_state); - intel_audio_codec_enable(encoder, crtc_state, conn_state); - trans_port_sync_stop_link_train(state, encoder, crtc_state); } +/* FIXME bad home for this function */ +i915_reg_t hsw_chicken_trans_reg(struct drm_i915_private *i915, + enum transcoder cpu_transcoder) +{ + return DISPLAY_VER(i915) >= 14 ? + MTL_CHICKEN_TRANS(cpu_transcoder) : + CHICKEN_TRANS(cpu_transcoder); +} + static i915_reg_t gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv, enum port port) @@ -3233,8 +3331,6 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state, intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl); intel_wait_ddi_buf_active(dev_priv, port); - - intel_audio_codec_enable(encoder, crtc_state, conn_state); } static void intel_enable_ddi(struct intel_atomic_state *state, @@ -3252,6 +3348,8 @@ static void intel_enable_ddi(struct intel_atomic_state *state, intel_enable_transcoder(crtc_state); + intel_ddi_wait_for_fec_status(encoder, crtc_state, true); + intel_crtc_vblank_on(crtc_state); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) @@ -3259,10 +3357,8 @@ static void intel_enable_ddi(struct intel_atomic_state *state, else intel_enable_ddi_dp(state, encoder, crtc_state, conn_state); - /* Enable hdcp if it's desired */ - if (conn_state->content_protection == - DRM_MODE_CONTENT_PROTECTION_DESIRED) - intel_hdcp_enable(state, encoder, crtc_state, conn_state); + intel_hdcp_enable(state, encoder, crtc_state, conn_state); + } static void intel_disable_ddi_dp(struct intel_atomic_state *state, @@ -3271,16 +3367,16 @@ static void intel_disable_ddi_dp(struct intel_atomic_state *state, const struct drm_connector_state *old_conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_connector *connector = + to_intel_connector(old_conn_state->connector); intel_dp->link_trained = false; - intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); - intel_psr_disable(intel_dp, old_crtc_state); intel_edp_backlight_off(old_conn_state); /* Disable the decompression in DP Sink */ - intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state, - false); + intel_dp_sink_disable_decompression(state, + connector, old_crtc_state); /* Disable Ignore_MSA bit in DP Sink */ intel_dp_sink_set_msa_timing_par_ignore_state(intel_dp, old_crtc_state, false); @@ -3294,8 +3390,6 @@ static void intel_disable_ddi_hdmi(struct intel_atomic_state *state, struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct drm_connector *connector = old_conn_state->connector; - intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); - if (!intel_hdmi_handle_sink_scrambling(encoder, connector, false, false)) drm_dbg_kms(&i915->drm, @@ -3578,16 +3672,42 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, AUDIO_OUTPUT_ENABLE(cpu_transcoder); } -void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, - struct intel_crtc_state *crtc_state) +static int tgl_ddi_min_voltage_level(const struct intel_crtc_state *crtc_state) { - if (DISPLAY_VER(dev_priv) >= 12 && crtc_state->port_clock > 594000) - crtc_state->min_voltage_level = 2; - else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) && - crtc_state->port_clock > 594000) - crtc_state->min_voltage_level = 3; - else if (DISPLAY_VER(dev_priv) >= 11 && crtc_state->port_clock > 594000) - crtc_state->min_voltage_level = 1; + if (crtc_state->port_clock > 594000) + return 2; + else + return 0; +} + +static int jsl_ddi_min_voltage_level(const struct intel_crtc_state *crtc_state) +{ + if (crtc_state->port_clock > 594000) + return 3; + else + return 0; +} + +static int icl_ddi_min_voltage_level(const struct intel_crtc_state *crtc_state) +{ + if (crtc_state->port_clock > 594000) + return 1; + else + return 0; +} + +void intel_ddi_compute_min_voltage_level(struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + + if (DISPLAY_VER(dev_priv) >= 14) + crtc_state->min_voltage_level = icl_ddi_min_voltage_level(crtc_state); + else if (DISPLAY_VER(dev_priv) >= 12) + crtc_state->min_voltage_level = tgl_ddi_min_voltage_level(crtc_state); + else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) + crtc_state->min_voltage_level = jsl_ddi_min_voltage_level(crtc_state); + else if (DISPLAY_VER(dev_priv) >= 11) + crtc_state->min_voltage_level = icl_ddi_min_voltage_level(crtc_state); } static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *dev_priv, @@ -3801,7 +3921,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder, pipe_config->lane_lat_optim_mask = bxt_ddi_phy_get_lane_lat_optim_mask(encoder); - intel_ddi_compute_min_voltage_level(dev_priv, pipe_config); + intel_ddi_compute_min_voltage_level(pipe_config); intel_hdmi_read_gcp_infoframe(encoder, pipe_config); @@ -3854,18 +3974,13 @@ void intel_ddi_get_clock(struct intel_encoder *encoder, static void mtl_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(i915, encoder->port); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); if (intel_tc_port_in_tbt_alt_mode(dig_port)) { crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder); - } else if (intel_is_c10phy(i915, phy)) { - intel_c10pll_readout_hw_state(encoder, &crtc_state->cx0pll_state.c10); - crtc_state->port_clock = intel_c10pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c10); } else { - intel_c20pll_readout_hw_state(encoder, &crtc_state->cx0pll_state.c20); - crtc_state->port_clock = intel_c20pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c20); + intel_cx0pll_readout_hw_state(encoder, &crtc_state->cx0pll_state); + crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->cx0pll_state); } intel_ddi_get_config(encoder, crtc_state); @@ -4086,7 +4201,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder, pipe_config->lane_lat_optim_mask = bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count); - intel_ddi_compute_min_voltage_level(dev_priv, pipe_config); + intel_ddi_compute_min_voltage_level(pipe_config); return 0; } @@ -4114,7 +4229,12 @@ static bool m_n_equal(const struct intel_link_m_n *m_n_1, static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1, const struct intel_crtc_state *crtc_state2) { + /* + * FIXME the modeset sequence is currently wrong and + * can't deal with bigjoiner + port sync at the same time. + */ return crtc_state1->hw.active && crtc_state2->hw.active && + !crtc_state1->bigjoiner_pipes && !crtc_state2->bigjoiner_pipes && crtc_state1->output_types == crtc_state2->output_types && crtc_state1->output_format == crtc_state2->output_format && crtc_state1->lane_count == crtc_state2->lane_count && @@ -4844,6 +4964,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, encoder->post_pll_disable = intel_ddi_post_pll_disable; encoder->post_disable = intel_ddi_post_disable; encoder->update_pipe = intel_ddi_update_pipe; + encoder->audio_enable = intel_audio_codec_enable; + encoder->audio_disable = intel_audio_codec_disable; encoder->get_hw_state = intel_ddi_get_hw_state; encoder->sync_state = intel_ddi_sync_state; encoder->initial_fastset_check = intel_ddi_initial_fastset_check; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index 4999c0ee22..434de71968 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -27,6 +27,8 @@ i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); +i915_reg_t hsw_chicken_trans_reg(struct drm_i915_private *i915, + enum transcoder cpu_transcoder); void intel_ddi_fdi_post_disable(struct intel_atomic_state *state, struct intel_encoder *intel_encoder, const struct intel_crtc_state *old_crtc_state, @@ -60,13 +62,15 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state); +void intel_ddi_wait_for_fec_status(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + bool enabled); void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state, bool state); -void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, - struct intel_crtc_state *crtc_state); +void intel_ddi_compute_min_voltage_level(struct intel_crtc_state *crtc_state); int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder, enum transcoder cpu_transcoder, bool enable, u32 hdcp_mask); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index df582ff81b..b10aad15a6 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -48,6 +48,7 @@ #include "g4x_dp.h" #include "g4x_hdmi.h" #include "hsw_ips.h" +#include "i915_config.h" #include "i915_drv.h" #include "i915_reg.h" #include "i915_utils.h" @@ -72,10 +73,10 @@ #include "intel_dp.h" #include "intel_dp_link_training.h" #include "intel_dp_mst.h" -#include "intel_dpio_phy.h" #include "intel_dpll.h" #include "intel_dpll_mgr.h" #include "intel_dpt.h" +#include "intel_dpt_common.h" #include "intel_drrs.h" #include "intel_dsb.h" #include "intel_dsi.h" @@ -193,12 +194,9 @@ static bool is_hdr_mode(const struct intel_crtc_state *crtc_state) static void skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) { - if (enable) - intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), - 0, DUPS1_GATING_DIS | DUPS2_GATING_DIS); - else - intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), - DUPS1_GATING_DIS | DUPS2_GATING_DIS, 0); + intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), + DUPS1_GATING_DIS | DUPS2_GATING_DIS, + enable ? DUPS1_GATING_DIS | DUPS2_GATING_DIS : 0); } /* Wa_2006604312:icl,ehl */ @@ -206,10 +204,9 @@ static void icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) { - if (enable) - intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 0, DPFR_GATING_DIS); - else - intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), DPFR_GATING_DIS, 0); + intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), + DPFR_GATING_DIS, + enable ? DPFR_GATING_DIS : 0); } /* Wa_1604331009:icl,jsl,ehl */ @@ -217,7 +214,8 @@ static void icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) { - intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS, + intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), + CURSOR_GATING_DIS, enable ? CURSOR_GATING_DIS : 0); } @@ -397,7 +395,6 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state) struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; enum pipe pipe = crtc->pipe; - i915_reg_t reg; u32 val; drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe)); @@ -430,16 +427,16 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state) intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe), 0, PIPE_ARB_USE_PROG_SLOTS); - reg = TRANSCONF(cpu_transcoder); - val = intel_de_read(dev_priv, reg); + val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); if (val & TRANSCONF_ENABLE) { /* we keep both pipes enabled on 830 */ drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv)); return; } - intel_de_write(dev_priv, reg, val | TRANSCONF_ENABLE); - intel_de_posting_read(dev_priv, reg); + intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), + val | TRANSCONF_ENABLE); + intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); /* * Until the pipe starts PIPEDSL reads will return a stale value, @@ -458,7 +455,6 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; enum pipe pipe = crtc->pipe; - i915_reg_t reg; u32 val; drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe)); @@ -469,8 +465,7 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) */ assert_planes_disabled(crtc); - reg = TRANSCONF(cpu_transcoder); - val = intel_de_read(dev_priv, reg); + val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); if ((val & TRANSCONF_ENABLE) == 0) return; @@ -485,14 +480,12 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) if (!IS_I830(dev_priv)) val &= ~TRANSCONF_ENABLE; - if (DISPLAY_VER(dev_priv) >= 14) - intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), - FECSTALL_DIS_DPTSTREAM_DPTTG, 0); - else if (DISPLAY_VER(dev_priv) >= 12) - intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), + intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); + + if (DISPLAY_VER(dev_priv) >= 12) + intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder), FECSTALL_DIS_DPTSTREAM_DPTTG, 0); - intel_de_write(dev_priv, reg, val); if ((val & TRANSCONF_ENABLE) == 0) intel_wait_for_pipe_off(old_crtc_state); } @@ -896,6 +889,48 @@ static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state) (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915)); } +static void intel_encoders_audio_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct drm_connector_state *conn_state; + struct drm_connector *conn; + int i; + + for_each_new_connector_in_state(&state->base, conn, conn_state, i) { + struct intel_encoder *encoder = + to_intel_encoder(conn_state->best_encoder); + + if (conn_state->crtc != &crtc->base) + continue; + + if (encoder->audio_enable) + encoder->audio_enable(encoder, crtc_state, conn_state); + } +} + +static void intel_encoders_audio_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + const struct drm_connector_state *old_conn_state; + struct drm_connector *conn; + int i; + + for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { + struct intel_encoder *encoder = + to_intel_encoder(old_conn_state->best_encoder); + + if (old_conn_state->crtc != &crtc->base) + continue; + + if (encoder->audio_disable) + encoder->audio_disable(encoder, old_crtc_state, old_conn_state); + } +} + #define is_enabling(feature, old_crtc_state, new_crtc_state) \ ((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \ (new_crtc_state)->feature) @@ -955,6 +990,28 @@ static bool vrr_disabling(const struct intel_crtc_state *old_crtc_state, vrr_params_changed(old_crtc_state, new_crtc_state))); } +static bool audio_enabling(const struct intel_crtc_state *old_crtc_state, + const struct intel_crtc_state *new_crtc_state) +{ + if (!new_crtc_state->hw.active) + return false; + + return is_enabling(has_audio, old_crtc_state, new_crtc_state) || + (new_crtc_state->has_audio && + memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); +} + +static bool audio_disabling(const struct intel_crtc_state *old_crtc_state, + const struct intel_crtc_state *new_crtc_state) +{ + if (!old_crtc_state->hw.active) + return false; + + return is_disabling(has_audio, old_crtc_state, new_crtc_state) || + (old_crtc_state->has_audio && + memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); +} + #undef is_disabling #undef is_enabling @@ -995,6 +1052,9 @@ static void intel_post_plane_update(struct intel_atomic_state *state, if (intel_crtc_needs_color_update(new_crtc_state)) intel_color_post_update(new_crtc_state); + + if (audio_enabling(old_crtc_state, new_crtc_state)) + intel_encoders_audio_enable(state, crtc); } static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, @@ -1078,6 +1138,9 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, intel_crtc_update_active_timings(old_crtc_state, false); } + if (audio_disabling(old_crtc_state, new_crtc_state)) + intel_encoders_audio_disable(state, crtc); + intel_drrs_deactivate(old_crtc_state); intel_psr_pre_plane_update(state, crtc); @@ -1513,12 +1576,9 @@ static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state) static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum transcoder transcoder = crtc_state->cpu_transcoder; - i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) : - CHICKEN_TRANS(transcoder); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); - intel_de_rmw(dev_priv, reg, + intel_de_rmw(i915, hsw_chicken_trans_reg(i915, crtc_state->cpu_transcoder), HSW_FRAME_START_DELAY_MASK, HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1)); } @@ -1796,31 +1856,31 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) { + /* + * DG2's "TC1", although TC-capable output, doesn't share the same flow + * as other platforms on the display engine side and rather rely on the + * SNPS PHY, that is programmed separately + */ if (IS_DG2(dev_priv)) - /* DG2's "TC1" output uses a SNPS PHY */ return false; - else if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER_FULL(dev_priv) == IP_VER(14, 0)) + + if (DISPLAY_VER(dev_priv) >= 13) return phy >= PHY_F && phy <= PHY_I; else if (IS_TIGERLAKE(dev_priv)) return phy >= PHY_D && phy <= PHY_I; else if (IS_ICELAKE(dev_priv)) return phy >= PHY_C && phy <= PHY_F; - else - return false; + + return false; } bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy) { - if (phy == PHY_NONE) - return false; - else if (IS_DG2(dev_priv)) - /* - * All four "combo" ports and the TC1 port (PHY E) use - * Synopsis PHYs. - */ - return phy <= PHY_E; - - return false; + /* + * For DG2, and for DG2 only, all four "combo" ports and the TC1 port + * (PHY E) use Synopsis PHYs. See intel_phy_is_tc(). + */ + return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E; } enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) @@ -2409,15 +2469,15 @@ static void compute_m_n(u32 *ret_m, u32 *ret_n, } void -intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, +intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes, int pixel_clock, int link_clock, - struct intel_link_m_n *m_n, - bool fec_enable) + int bw_overhead, + struct intel_link_m_n *m_n) { - u32 data_clock = bits_per_pixel * pixel_clock; - - if (fec_enable) - data_clock = intel_dp_mode_to_fec_clock(data_clock); + u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock); + u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16, + bw_overhead); + u32 data_n = intel_dp_max_data_rate(link_clock, nlanes); /* * Windows/BIOS uses fixed M/N values always. Follow suit. @@ -2428,11 +2488,11 @@ intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, */ m_n->tu = 64; compute_m_n(&m_n->data_m, &m_n->data_n, - data_clock, link_clock * nlanes * 8, + data_m, data_n, 0x8000000); compute_m_n(&m_n->link_m, &m_n->link_n, - pixel_clock, link_clock, + pixel_clock, link_symbol_clock, 0x80000); } @@ -2567,7 +2627,7 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta crtc_vblank_start = 1; } - if (DISPLAY_VER(dev_priv) > 3) + if (DISPLAY_VER(dev_priv) >= 4) intel_de_write(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder), vsyncshift); @@ -2850,67 +2910,6 @@ static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state) intel_de_read(dev_priv, PFIT_PGM_RATIOS); } -static void vlv_crtc_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) -{ - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - enum pipe pipe = crtc->pipe; - struct dpll clock; - u32 mdiv; - int refclk = 100000; - - /* In case of DSI, DPLL will not be used */ - if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) - return; - - vlv_dpio_get(dev_priv); - mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); - vlv_dpio_put(dev_priv); - - clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; - clock.m2 = mdiv & DPIO_M2DIV_MASK; - clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; - clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; - clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; - - pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); -} - -static void chv_crtc_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) -{ - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - enum pipe pipe = crtc->pipe; - enum dpio_channel port = vlv_pipe_to_channel(pipe); - struct dpll clock; - u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; - int refclk = 100000; - - /* In case of DSI, DPLL will not be used */ - if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) - return; - - vlv_dpio_get(dev_priv); - cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); - pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); - pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); - pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); - pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); - vlv_dpio_put(dev_priv); - - clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; - clock.m2 = (pll_dw0 & 0xff) << 22; - if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) - clock.m2 |= pll_dw2 & 0x3fffff; - clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; - clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; - clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; - - pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); -} - static enum intel_output_format bdw_get_pipe_misc_output_format(struct intel_crtc *crtc) { @@ -3168,7 +3167,7 @@ static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state) break; case 36: /* Port output 12BPC defined for ADLP+ */ - if (DISPLAY_VER(dev_priv) > 12) + if (DISPLAY_VER(dev_priv) >= 13) val |= PIPE_MISC_BPC_12_ADLP; break; default: @@ -3225,7 +3224,7 @@ int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc) * MIPI DSI HW readout. */ case PIPE_MISC_BPC_12_ADLP: - if (DISPLAY_VER(dev_priv) > 12) + if (DISPLAY_VER(dev_priv) >= 13) return 36; fallthrough; default: @@ -3802,9 +3801,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, } if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { - tmp = intel_de_read(dev_priv, DISPLAY_VER(dev_priv) >= 14 ? - MTL_CHICKEN_TRANS(pipe_config->cpu_transcoder) : - CHICKEN_TRANS(pipe_config->cpu_transcoder)); + tmp = intel_de_read(dev_priv, hsw_chicken_trans_reg(dev_priv, pipe_config->cpu_transcoder)); pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1; } else { @@ -3833,133 +3830,27 @@ bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state) return true; } -static int i9xx_pll_refclk(struct drm_device *dev, - const struct intel_crtc_state *pipe_config) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - u32 dpll = pipe_config->dpll_hw_state.dpll; - - if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) - return dev_priv->display.vbt.lvds_ssc_freq; - else if (HAS_PCH_SPLIT(dev_priv)) - return 120000; - else if (DISPLAY_VER(dev_priv) != 2) - return 96000; - else - return 48000; -} - -/* Returns the clock of the currently programmed mode of the given pipe. */ -void i9xx_crtc_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) -{ - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - u32 dpll = pipe_config->dpll_hw_state.dpll; - u32 fp; - struct dpll clock; - int port_clock; - int refclk = i9xx_pll_refclk(dev, pipe_config); - - if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) - fp = pipe_config->dpll_hw_state.fp0; - else - fp = pipe_config->dpll_hw_state.fp1; - - clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; - if (IS_PINEVIEW(dev_priv)) { - clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; - clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; - } else { - clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; - clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; - } - - if (DISPLAY_VER(dev_priv) != 2) { - if (IS_PINEVIEW(dev_priv)) - clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> - DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); - else - clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> - DPLL_FPA01_P1_POST_DIV_SHIFT); - - switch (dpll & DPLL_MODE_MASK) { - case DPLLB_MODE_DAC_SERIAL: - clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? - 5 : 10; - break; - case DPLLB_MODE_LVDS: - clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? - 7 : 14; - break; - default: - drm_dbg_kms(&dev_priv->drm, - "Unknown DPLL mode %08x in programmed " - "mode\n", (int)(dpll & DPLL_MODE_MASK)); - return; - } - - if (IS_PINEVIEW(dev_priv)) - port_clock = pnv_calc_dpll_params(refclk, &clock); - else - port_clock = i9xx_calc_dpll_params(refclk, &clock); - } else { - enum pipe lvds_pipe; - - if (IS_I85X(dev_priv) && - intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) && - lvds_pipe == crtc->pipe) { - u32 lvds = intel_de_read(dev_priv, LVDS); - - clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> - DPLL_FPA01_P1_POST_DIV_SHIFT); - - if (lvds & LVDS_CLKB_POWER_UP) - clock.p2 = 7; - else - clock.p2 = 14; - } else { - if (dpll & PLL_P1_DIVIDE_BY_TWO) - clock.p1 = 2; - else { - clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> - DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; - } - if (dpll & PLL_P2_DIVIDE_BY_4) - clock.p2 = 4; - else - clock.p2 = 2; - } - - port_clock = i9xx_calc_dpll_params(refclk, &clock); - } - - /* - * This value includes pixel_multiplier. We will use - * port_clock to compute adjusted_mode.crtc_clock in the - * encoder's get_config() function. - */ - pipe_config->port_clock = port_clock; -} - int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n) { /* - * The calculation for the data clock is: + * The calculation for the data clock -> pixel clock is: * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp * But we want to avoid losing precison if possible, so: * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) * - * and the link clock is simpler: - * link_clock = (m * link_clock) / n + * and for link freq (10kbs units) -> pixel clock it is: + * link_symbol_clock = link_freq * 10 / link_symbol_size + * pixel_clock = (m * link_symbol_clock) / n + * or for more precision: + * pixel_clock = (m * link_freq * 10) / (n * link_symbol_size) */ if (!m_n->link_n) return 0; - return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq), - m_n->link_n); + return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq * 10), + m_n->link_n * intel_dp_link_symbol_size(link_freq)); } int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config) @@ -4691,6 +4582,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state, if (ret) return ret; + crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe); crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe]; if (crtc_state->pipe_bpp > to_bpp_int(crtc_state->max_link_bpp_x16)) { @@ -5031,6 +4923,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #define PIPE_CONF_CHECK_X(name) do { \ if (current_config->name != pipe_config->name) { \ + BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ + __stringify(name) " is bool"); \ pipe_config_mismatch(fastset, crtc, __stringify(name), \ "(expected 0x%08x, found 0x%08x)", \ current_config->name, \ @@ -5041,6 +4935,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \ if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \ + BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ + __stringify(name) " is bool"); \ pipe_config_mismatch(fastset, crtc, __stringify(name), \ "(expected 0x%08x, found 0x%08x)", \ current_config->name & (mask), \ @@ -5051,6 +4947,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #define PIPE_CONF_CHECK_I(name) do { \ if (current_config->name != pipe_config->name) { \ + BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \ + __stringify(name) " is bool"); \ pipe_config_mismatch(fastset, crtc, __stringify(name), \ "(expected %i, found %i)", \ current_config->name, \ @@ -5061,6 +4959,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #define PIPE_CONF_CHECK_BOOL(name) do { \ if (current_config->name != pipe_config->name) { \ + BUILD_BUG_ON_MSG(!__same_type(current_config->name, bool), \ + __stringify(name) " is not bool"); \ pipe_config_mismatch(fastset, crtc, __stringify(name), \ "(expected %s, found %s)", \ str_yes_no(current_config->name), \ @@ -5069,23 +4969,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, } \ } while (0) -/* - * Checks state where we only read out the enabling, but not the entire - * state itself (like full infoframes or ELD for audio). These states - * require a full modeset on bootup to fix up. - */ -#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \ - if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \ - PIPE_CONF_CHECK_BOOL(name); \ - } else { \ - pipe_config_mismatch(fastset, crtc, __stringify(name), \ - "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \ - str_yes_no(current_config->name), \ - str_yes_no(pipe_config->name)); \ - ret = false; \ - } \ -} while (0) - #define PIPE_CONF_CHECK_P(name) do { \ if (current_config->name != pipe_config->name) { \ pipe_config_mismatch(fastset, crtc, __stringify(name), \ @@ -5216,8 +5099,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #define PIPE_CONF_QUIRK(quirk) \ ((current_config->quirks | pipe_config->quirks) & (quirk)) - PIPE_CONF_CHECK_I(hw.enable); - PIPE_CONF_CHECK_I(hw.active); + PIPE_CONF_CHECK_BOOL(hw.enable); + PIPE_CONF_CHECK_BOOL(hw.active); PIPE_CONF_CHECK_I(cpu_transcoder); PIPE_CONF_CHECK_I(mst_master_transcoder); @@ -5273,8 +5156,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_BOOL(enhanced_framing); PIPE_CONF_CHECK_BOOL(fec_enable); - PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); - PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES); + if (!fastset) { + PIPE_CONF_CHECK_BOOL(has_audio); + PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES); + } PIPE_CONF_CHECK_X(gmch_pfit.control); /* pfit ratios are autocomputed by the hw on gen4+ */ @@ -5424,9 +5309,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_I(dsc.config.second_line_bpg_offset); PIPE_CONF_CHECK_I(dsc.config.nsl_bpg_offset); - PIPE_CONF_CHECK_I(dsc.compression_enable); - PIPE_CONF_CHECK_I(dsc.dsc_split); - PIPE_CONF_CHECK_I(dsc.compressed_bpp); + PIPE_CONF_CHECK_BOOL(dsc.compression_enable); + PIPE_CONF_CHECK_BOOL(dsc.dsc_split); + PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16); PIPE_CONF_CHECK_BOOL(splitter.enable); PIPE_CONF_CHECK_I(splitter.link_count); @@ -5444,7 +5329,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #undef PIPE_CONF_CHECK_X #undef PIPE_CONF_CHECK_I #undef PIPE_CONF_CHECK_BOOL -#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE #undef PIPE_CONF_CHECK_P #undef PIPE_CONF_CHECK_FLAGS #undef PIPE_CONF_CHECK_COLOR_LUT @@ -5535,6 +5419,16 @@ int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state, return 0; } +static void +intel_crtc_flag_modeset(struct intel_crtc_state *crtc_state) +{ + crtc_state->uapi.mode_changed = true; + + crtc_state->update_pipe = false; + crtc_state->update_m_n = false; + crtc_state->update_lrr = false; +} + /** * intel_modeset_all_pipes_late - force a full modeset on all pipes * @state: intel atomic state @@ -5568,9 +5462,8 @@ int intel_modeset_all_pipes_late(struct intel_atomic_state *state, if (ret) return ret; - crtc_state->update_pipe = false; - crtc_state->update_m_n = false; - crtc_state->update_lrr = false; + intel_crtc_flag_modeset(crtc_state); + crtc_state->update_planes |= crtc_state->active_planes; crtc_state->async_flip_planes = 0; crtc_state->do_async_flip = false; @@ -5683,17 +5576,17 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta else new_crtc_state->uapi.mode_changed = false; - if (intel_crtc_needs_modeset(new_crtc_state) || - intel_compare_link_m_n(&old_crtc_state->dp_m_n, + if (intel_compare_link_m_n(&old_crtc_state->dp_m_n, &new_crtc_state->dp_m_n)) new_crtc_state->update_m_n = false; - if (intel_crtc_needs_modeset(new_crtc_state) || - (old_crtc_state->hw.adjusted_mode.crtc_vtotal == new_crtc_state->hw.adjusted_mode.crtc_vtotal && + if ((old_crtc_state->hw.adjusted_mode.crtc_vtotal == new_crtc_state->hw.adjusted_mode.crtc_vtotal && old_crtc_state->hw.adjusted_mode.crtc_vblank_end == new_crtc_state->hw.adjusted_mode.crtc_vblank_end)) new_crtc_state->update_lrr = false; - if (!intel_crtc_needs_modeset(new_crtc_state)) + if (intel_crtc_needs_modeset(new_crtc_state)) + intel_crtc_flag_modeset(new_crtc_state); + else new_crtc_state->update_pipe = true; } @@ -6476,15 +6369,14 @@ int intel_atomic_check(struct drm_device *dev, if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state)) continue; + if (intel_dp_mst_crtc_needs_modeset(state, crtc)) + intel_crtc_flag_modeset(new_crtc_state); + if (intel_dp_mst_is_slave_trans(new_crtc_state)) { enum transcoder master = new_crtc_state->mst_master_transcoder; - if (intel_cpu_transcoders_need_modeset(state, BIT(master))) { - new_crtc_state->uapi.mode_changed = true; - new_crtc_state->update_pipe = false; - new_crtc_state->update_m_n = false; - new_crtc_state->update_lrr = false; - } + if (intel_cpu_transcoders_need_modeset(state, BIT(master))) + intel_crtc_flag_modeset(new_crtc_state); } if (is_trans_port_sync_mode(new_crtc_state)) { @@ -6493,21 +6385,13 @@ int intel_atomic_check(struct drm_device *dev, if (new_crtc_state->master_transcoder != INVALID_TRANSCODER) trans |= BIT(new_crtc_state->master_transcoder); - if (intel_cpu_transcoders_need_modeset(state, trans)) { - new_crtc_state->uapi.mode_changed = true; - new_crtc_state->update_pipe = false; - new_crtc_state->update_m_n = false; - new_crtc_state->update_lrr = false; - } + if (intel_cpu_transcoders_need_modeset(state, trans)) + intel_crtc_flag_modeset(new_crtc_state); } if (new_crtc_state->bigjoiner_pipes) { - if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) { - new_crtc_state->uapi.mode_changed = true; - new_crtc_state->update_pipe = false; - new_crtc_state->update_m_n = false; - new_crtc_state->update_lrr = false; - } + if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) + intel_crtc_flag_modeset(new_crtc_state); } } @@ -6528,10 +6412,6 @@ int intel_atomic_check(struct drm_device *dev, goto fail; } - ret = drm_dp_mst_atomic_check(&state->base); - if (ret) - goto fail; - ret = intel_atomic_check_planes(state); if (ret) goto fail; @@ -6767,8 +6647,8 @@ static void intel_enable_crtc(struct intel_atomic_state *state, intel_crtc_enable_pipe_crc(crtc); } -static void intel_update_crtc(struct intel_atomic_state *state, - struct intel_crtc *crtc) +static void intel_pre_update_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state = @@ -6810,6 +6690,15 @@ static void intel_update_crtc(struct intel_atomic_state *state, intel_color_commit_noarm(new_crtc_state); intel_crtc_planes_update_noarm(state, crtc); +} + +static void intel_update_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); /* Perform vblank evasion around commit operation */ intel_pipe_update_start(state, crtc); @@ -6838,7 +6727,7 @@ static void intel_update_crtc(struct intel_atomic_state *state, * valid pipe configuration from the BIOS we need to take care * of enabling them on the CRTC's first fastset. */ - if (intel_crtc_needs_fastset(new_crtc_state) && !modeset && + if (intel_crtc_needs_fastset(new_crtc_state) && old_crtc_state->inherited) intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); } @@ -6934,6 +6823,13 @@ static void intel_commit_modeset_enables(struct intel_atomic_state *state) continue; intel_enable_crtc(state, crtc); + intel_pre_update_crtc(state, crtc); + } + + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + if (!new_crtc_state->hw.active) + continue; + intel_update_crtc(state, crtc); } } @@ -6971,6 +6867,15 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) * So first lets enable all pipes that do not need a fullmodeset as * those don't have any external dependency. */ + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + enum pipe pipe = crtc->pipe; + + if ((update_pipes & BIT(pipe)) == 0) + continue; + + intel_pre_update_crtc(state, crtc); + } + while (update_pipes) { for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { @@ -7041,6 +6946,15 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) /* * Finally we do the plane updates/etc. for all pipes that got enabled. */ + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + enum pipe pipe = crtc->pipe; + + if ((update_pipes & BIT(pipe)) == 0) + continue; + + intel_pre_update_crtc(state, crtc); + } + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { enum pipe pipe = crtc->pipe; @@ -7060,49 +6974,24 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) drm_WARN_ON(&dev_priv->drm, update_pipes); } -static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) -{ - struct intel_atomic_state *state, *next; - struct llist_node *freed; - - freed = llist_del_all(&dev_priv->display.atomic_helper.free_list); - llist_for_each_entry_safe(state, next, freed, freed) - drm_atomic_state_put(&state->base); -} - -void intel_atomic_helper_free_state_worker(struct work_struct *work) -{ - struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), display.atomic_helper.free_work); - - intel_atomic_helper_free_state(dev_priv); -} - static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) { - struct wait_queue_entry wait_fence, wait_reset; - struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); - - init_wait_entry(&wait_fence, 0); - init_wait_entry(&wait_reset, 0); - for (;;) { - prepare_to_wait(&intel_state->commit_ready.wait, - &wait_fence, TASK_UNINTERRUPTIBLE); - prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags, - I915_RESET_MODESET), - &wait_reset, TASK_UNINTERRUPTIBLE); - + struct drm_i915_private *i915 = to_i915(intel_state->base.dev); + struct drm_plane *plane; + struct drm_plane_state *new_plane_state; + int ret, i; - if (i915_sw_fence_done(&intel_state->commit_ready) || - test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags)) - break; + for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) { + if (new_plane_state->fence) { + ret = dma_fence_wait_timeout(new_plane_state->fence, false, + i915_fence_timeout(i915)); + if (ret <= 0) + break; - schedule(); + dma_fence_put(new_plane_state->fence); + new_plane_state->fence = NULL; + } } - finish_wait(&intel_state->commit_ready.wait, &wait_fence); - finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags, - I915_RESET_MODESET), - &wait_reset); } static void intel_atomic_cleanup_work(struct work_struct *work) @@ -7120,8 +7009,6 @@ static void intel_atomic_cleanup_work(struct work_struct *work) drm_atomic_helper_cleanup_planes(&i915->drm, &state->base); drm_atomic_helper_commit_cleanup_done(&state->base); drm_atomic_state_put(&state->base); - - intel_atomic_helper_free_state(i915); } static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state) @@ -7394,32 +7281,6 @@ static void intel_atomic_commit_work(struct work_struct *work) intel_atomic_commit_tail(state); } -static int -intel_atomic_commit_ready(struct i915_sw_fence *fence, - enum i915_sw_fence_notify notify) -{ - struct intel_atomic_state *state = - container_of(fence, struct intel_atomic_state, commit_ready); - - switch (notify) { - case FENCE_COMPLETE: - /* we do blocking waits in the worker, nothing to do here */ - break; - case FENCE_FREE: - { - struct drm_i915_private *i915 = to_i915(state->base.dev); - struct intel_atomic_helper *helper = - &i915->display.atomic_helper; - - if (llist_add(&state->freed, &helper->free_list)) - queue_work(i915->unordered_wq, &helper->free_work); - break; - } - } - - return NOTIFY_DONE; -} - static void intel_atomic_track_fbs(struct intel_atomic_state *state) { struct intel_plane_state *old_plane_state, *new_plane_state; @@ -7442,10 +7303,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - drm_atomic_state_get(&state->base); - i915_sw_fence_init(&state->commit_ready, - intel_atomic_commit_ready); - /* * The intel_legacy_cursor_update() fast path takes care * of avoiding the vblank waits for simple cursor @@ -7478,7 +7335,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, if (ret) { drm_dbg_atomic(&dev_priv->drm, "Preparing state failed with %i\n", ret); - i915_sw_fence_commit(&state->commit_ready); intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); return ret; } @@ -7494,8 +7350,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, struct intel_crtc *crtc; int i; - i915_sw_fence_commit(&state->commit_ready); - for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) intel_color_cleanup_commit(new_crtc_state); @@ -7509,7 +7363,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, drm_atomic_state_get(&state->base); INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); - i915_sw_fence_commit(&state->commit_ready); if (nonblock && state->modeset) { queue_work(dev_priv->display.wq.modeset, &state->base.commit_work); } else if (nonblock) { @@ -7909,7 +7762,7 @@ enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *de * Cantiga+ cannot handle modes with a hsync front porch of 0. * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. */ - if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) && + if ((DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) && mode->hsync_start == mode->hdisplay) return MODE_H_ILLEGAL; diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index a05c7e2b78..f4a0773f0f 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -105,7 +105,6 @@ enum i9xx_plane_id { }; #define plane_name(p) ((p) + 'A') -#define sprite_name(p, s) ((p) * DISPLAY_RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A') #define for_each_plane_id_on_crtc(__crtc, __p) \ for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \ @@ -395,8 +394,8 @@ u8 intel_calc_active_pipes(struct intel_atomic_state *state, u8 active_pipes); void intel_link_compute_m_n(u16 bpp, int nlanes, int pixel_clock, int link_clock, - struct intel_link_m_n *m_n, - bool fec_enable); + int bw_overhead, + struct intel_link_m_n *m_n); u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, u32 pixel_format, u64 modifier); enum drm_mode_status @@ -485,8 +484,6 @@ void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc, void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, enum transcoder cpu_transcoder, struct intel_link_m_n *m_n); -void i9xx_crtc_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config); enum intel_display_power_domain intel_port_to_power_domain(struct intel_digital_port *dig_port); @@ -555,7 +552,7 @@ bool assert_port_valid(struct drm_i915_private *i915, enum port port); struct drm_device *drm = &(__i915)->drm; \ int __ret_warn_on = !!(condition); \ if (unlikely(__ret_warn_on)) \ - if (!drm_WARN(drm, i915_modparams.verbose_state_checks, format)) \ + if (!drm_WARN(drm, __i915->display.params.verbose_state_checks, format)) \ drm_err(drm, format); \ unlikely(__ret_warn_on); \ }) diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index ccfe27630f..47297ed858 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -19,6 +19,7 @@ #include "intel_cdclk.h" #include "intel_display_device.h" #include "intel_display_limits.h" +#include "intel_display_params.h" #include "intel_display_power.h" #include "intel_dpll_mgr.h" #include "intel_fbc.h" @@ -297,12 +298,6 @@ struct intel_display { const struct intel_audio_funcs *audio; } funcs; - /* Grouping using anonymous structs. Keep sorted. */ - struct intel_atomic_helper { - struct llist_head free_list; - struct work_struct free_work; - } atomic_helper; - struct { /* backlight registers and fields in struct intel_panel */ struct mutex lock; @@ -347,15 +342,6 @@ struct intel_display { struct intel_global_obj obj; } dbuf; - struct { - wait_queue_head_t waitqueue; - - /* mutex to protect pmdemand programming sequence */ - struct mutex lock; - - struct intel_global_obj obj; - } pmdemand; - struct { /* * dkl.phy_lock protects against concurrent access of the @@ -443,6 +429,15 @@ struct intel_display { bool false_color; } ips; + struct { + wait_queue_head_t waitqueue; + + /* mutex to protect pmdemand programming sequence */ + struct mutex lock; + + struct intel_global_obj obj; + } pmdemand; + struct { struct i915_power_domains domains; @@ -520,6 +515,7 @@ struct intel_display { struct intel_hotplug hotplug; struct intel_opregion opregion; struct intel_overlay *overlay; + struct intel_display_params params; struct intel_vbt_data vbt; struct intel_wm wm; }; diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 2836826f8c..d951edb366 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -17,6 +17,7 @@ #include "intel_de.h" #include "intel_crtc_state_dump.h" #include "intel_display_debugfs.h" +#include "intel_display_debugfs_params.h" #include "intel_display_power.h" #include "intel_display_power_well.h" #include "intel_display_types.h" @@ -641,6 +642,17 @@ static int i915_display_info(struct seq_file *m, void *unused) return 0; } +static int i915_display_capabilities(struct seq_file *m, void *unused) +{ + struct drm_i915_private *i915 = node_to_i915(m->private); + struct drm_printer p = drm_seq_file_printer(m); + + intel_display_device_info_print(DISPLAY_INFO(i915), + DISPLAY_RUNTIME_INFO(i915), &p); + + return 0; +} + static int i915_shared_dplls_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -1059,6 +1071,7 @@ static const struct drm_info_list intel_display_debugfs_list[] = { {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, {"i915_power_domain_info", i915_power_domain_info, 0}, {"i915_display_info", i915_display_info, 0}, + {"i915_display_capabilities", i915_display_capabilities, 0}, {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, {"i915_dp_mst_info", i915_dp_mst_info, 0}, {"i915_ddb_info", i915_ddb_info, 0}, @@ -1082,7 +1095,7 @@ void intel_display_debugfs_register(struct drm_i915_private *i915) for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) { debugfs_create_file(intel_display_debugfs_files[i].name, - S_IRUGO | S_IWUSR, + 0644, minor->debugfs_root, to_i915(minor->dev), intel_display_debugfs_files[i].fops); @@ -1098,15 +1111,15 @@ void intel_display_debugfs_register(struct drm_i915_private *i915) intel_hpd_debugfs_register(i915); intel_psr_debugfs_register(i915); intel_wm_debugfs_register(i915); + intel_display_debugfs_params(i915); } static int i915_panel_show(struct seq_file *m, void *data) { - struct drm_connector *connector = m->private; - struct intel_dp *intel_dp = - intel_attached_dp(to_intel_connector(connector)); + struct intel_connector *connector = m->private; + struct intel_dp *intel_dp = intel_attached_dp(connector); - if (connector->status != connector_status_connected) + if (connector->base.status != connector_status_connected) return -ENODEV; seq_printf(m, "Panel power up delay: %d\n", @@ -1124,23 +1137,23 @@ DEFINE_SHOW_ATTRIBUTE(i915_panel); static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) { - struct drm_connector *connector = m->private; - struct drm_i915_private *i915 = to_i915(connector->dev); - struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_connector *connector = m->private; + struct drm_i915_private *i915 = to_i915(connector->base.dev); int ret; ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex); if (ret) return ret; - if (!connector->encoder || connector->status != connector_status_connected) { + if (!connector->base.encoder || + connector->base.status != connector_status_connected) { ret = -ENODEV; goto out; } - seq_printf(m, "%s:%d HDCP version: ", connector->name, - connector->base.id); - intel_hdcp_info(m, intel_connector); + seq_printf(m, "%s:%d HDCP version: ", connector->base.name, + connector->base.base.id); + intel_hdcp_info(m, connector); out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); @@ -1151,16 +1164,16 @@ DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability); static int i915_lpsp_capability_show(struct seq_file *m, void *data) { - struct drm_connector *connector = m->private; - struct drm_i915_private *i915 = to_i915(connector->dev); - struct intel_encoder *encoder; + struct intel_connector *connector = m->private; + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_encoder *encoder = intel_attached_encoder(connector); + int connector_type = connector->base.connector_type; bool lpsp_capable = false; - encoder = intel_attached_encoder(to_intel_connector(connector)); if (!encoder) return -ENODEV; - if (connector->status != connector_status_connected) + if (connector->base.status != connector_status_connected) return -ENODEV; if (DISPLAY_VER(i915) >= 13) @@ -1173,15 +1186,15 @@ static int i915_lpsp_capability_show(struct seq_file *m, void *data) */ lpsp_capable = encoder->port <= PORT_B; else if (DISPLAY_VER(i915) == 11) - lpsp_capable = (connector->connector_type == DRM_MODE_CONNECTOR_DSI || - connector->connector_type == DRM_MODE_CONNECTOR_eDP); + lpsp_capable = (connector_type == DRM_MODE_CONNECTOR_DSI || + connector_type == DRM_MODE_CONNECTOR_eDP); else if (IS_DISPLAY_VER(i915, 9, 10)) lpsp_capable = (encoder->port == PORT_A && - (connector->connector_type == DRM_MODE_CONNECTOR_DSI || - connector->connector_type == DRM_MODE_CONNECTOR_eDP || - connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)); + (connector_type == DRM_MODE_CONNECTOR_DSI || + connector_type == DRM_MODE_CONNECTOR_eDP || + connector_type == DRM_MODE_CONNECTOR_DisplayPort)); else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) - lpsp_capable = connector->connector_type == DRM_MODE_CONNECTOR_eDP; + lpsp_capable = connector_type == DRM_MODE_CONNECTOR_eDP; seq_printf(m, "LPSP: %s\n", lpsp_capable ? "capable" : "incapable"); @@ -1191,7 +1204,7 @@ DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability); static int i915_dsc_fec_support_show(struct seq_file *m, void *data) { - struct intel_connector *connector = to_intel_connector(m->private); + struct intel_connector *connector = m->private; struct drm_i915_private *i915 = to_i915(connector->base.dev); struct drm_crtc *crtc; struct intel_dp *intel_dp; @@ -1242,6 +1255,8 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data) DP_DSC_YCbCr420_Native)), str_yes_no(drm_dp_dsc_sink_supports_format(connector->dp.dsc_dpcd, DP_DSC_YCbCr444))); + seq_printf(m, "DSC_Sink_BPP_Precision: %d\n", + drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd)); seq_printf(m, "Force_DSC_Enable: %s\n", str_yes_no(intel_dp->force_dsc_en)); if (!intel_dp_is_edp(intel_dp)) @@ -1259,13 +1274,13 @@ static ssize_t i915_dsc_fec_support_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { + struct seq_file *m = file->private_data; + struct intel_connector *connector = m->private; + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_encoder *encoder = intel_attached_encoder(connector); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); bool dsc_enable = false; int ret; - struct drm_connector *connector = - ((struct seq_file *)file->private_data)->private; - struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); if (len == 0) return 0; @@ -1303,22 +1318,22 @@ static const struct file_operations i915_dsc_fec_support_fops = { static int i915_dsc_bpc_show(struct seq_file *m, void *data) { - struct drm_connector *connector = m->private; - struct drm_device *dev = connector->dev; + struct intel_connector *connector = m->private; + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_encoder *encoder = intel_attached_encoder(connector); struct drm_crtc *crtc; struct intel_crtc_state *crtc_state; - struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); int ret; if (!encoder) return -ENODEV; - ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex); + ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex); if (ret) return ret; - crtc = connector->state->crtc; - if (connector->status != connector_status_connected || !crtc) { + crtc = connector->base.state->crtc; + if (connector->base.status != connector_status_connected || !crtc) { ret = -ENODEV; goto out; } @@ -1326,7 +1341,7 @@ static int i915_dsc_bpc_show(struct seq_file *m, void *data) crtc_state = to_intel_crtc_state(crtc->state); seq_printf(m, "Input_BPC: %d\n", crtc_state->dsc.config.bits_per_component); -out: drm_modeset_unlock(&dev->mode_config.connection_mutex); +out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); return ret; } @@ -1335,9 +1350,9 @@ static ssize_t i915_dsc_bpc_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { - struct drm_connector *connector = - ((struct seq_file *)file->private_data)->private; - struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); + struct seq_file *m = file->private_data; + struct intel_connector *connector = m->private; + struct intel_encoder *encoder = intel_attached_encoder(connector); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); int dsc_bpc = 0; int ret; @@ -1369,22 +1384,22 @@ static const struct file_operations i915_dsc_bpc_fops = { static int i915_dsc_output_format_show(struct seq_file *m, void *data) { - struct drm_connector *connector = m->private; - struct drm_device *dev = connector->dev; + struct intel_connector *connector = m->private; + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_encoder *encoder = intel_attached_encoder(connector); struct drm_crtc *crtc; struct intel_crtc_state *crtc_state; - struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); int ret; if (!encoder) return -ENODEV; - ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex); + ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex); if (ret) return ret; - crtc = connector->state->crtc; - if (connector->status != connector_status_connected || !crtc) { + crtc = connector->base.state->crtc; + if (connector->base.status != connector_status_connected || !crtc) { ret = -ENODEV; goto out; } @@ -1393,7 +1408,7 @@ static int i915_dsc_output_format_show(struct seq_file *m, void *data) seq_printf(m, "DSC_Output_Format: %s\n", intel_output_format_name(crtc_state->output_format)); -out: drm_modeset_unlock(&dev->mode_config.connection_mutex); +out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); return ret; } @@ -1402,9 +1417,9 @@ static ssize_t i915_dsc_output_format_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { - struct drm_connector *connector = - ((struct seq_file *)file->private_data)->private; - struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); + struct seq_file *m = file->private_data; + struct intel_connector *connector = m->private; + struct intel_encoder *encoder = intel_attached_encoder(connector); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); int dsc_output_format = 0; int ret; @@ -1434,6 +1449,84 @@ static const struct file_operations i915_dsc_output_format_fops = { .write = i915_dsc_output_format_write }; +static int i915_dsc_fractional_bpp_show(struct seq_file *m, void *data) +{ + struct intel_connector *connector = m->private; + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_encoder *encoder = intel_attached_encoder(connector); + struct drm_crtc *crtc; + struct intel_dp *intel_dp; + int ret; + + if (!encoder) + return -ENODEV; + + ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex); + if (ret) + return ret; + + crtc = connector->base.state->crtc; + if (connector->base.status != connector_status_connected || !crtc) { + ret = -ENODEV; + goto out; + } + + intel_dp = intel_attached_dp(connector); + seq_printf(m, "Force_DSC_Fractional_BPP_Enable: %s\n", + str_yes_no(intel_dp->force_dsc_fractional_bpp_en)); + +out: + drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); + + return ret; +} + +static ssize_t i915_dsc_fractional_bpp_write(struct file *file, + const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct intel_connector *connector = m->private; + struct intel_encoder *encoder = intel_attached_encoder(connector); + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + bool dsc_fractional_bpp_enable = false; + int ret; + + if (len == 0) + return 0; + + drm_dbg(&i915->drm, + "Copied %zu bytes from user to force fractional bpp for DSC\n", len); + + ret = kstrtobool_from_user(ubuf, len, &dsc_fractional_bpp_enable); + if (ret < 0) + return ret; + + drm_dbg(&i915->drm, "Got %s for DSC Fractional BPP Enable\n", + (dsc_fractional_bpp_enable) ? "true" : "false"); + intel_dp->force_dsc_fractional_bpp_en = dsc_fractional_bpp_enable; + + *offp += len; + + return len; +} + +static int i915_dsc_fractional_bpp_open(struct inode *inode, + struct file *file) +{ + return single_open(file, i915_dsc_fractional_bpp_show, inode->i_private); +} + +static const struct file_operations i915_dsc_fractional_bpp_fops = { + .owner = THIS_MODULE, + .open = i915_dsc_fractional_bpp_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = i915_dsc_fractional_bpp_write +}; + /* * Returns the Current CRTC's bpc. * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/i915_current_bpc @@ -1470,39 +1563,38 @@ DEFINE_SHOW_ATTRIBUTE(intel_crtc_pipe); /** * intel_connector_debugfs_add - add i915 specific connector debugfs files - * @intel_connector: pointer to a registered drm_connector + * @connector: pointer to a registered intel_connector * * Cleanup will be done by drm_connector_unregister() through a call to * drm_debugfs_connector_remove(). */ -void intel_connector_debugfs_add(struct intel_connector *intel_connector) +void intel_connector_debugfs_add(struct intel_connector *connector) { - struct drm_connector *connector = &intel_connector->base; - struct dentry *root = connector->debugfs_entry; - struct drm_i915_private *dev_priv = to_i915(connector->dev); + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct dentry *root = connector->base.debugfs_entry; + int connector_type = connector->base.connector_type; /* The connector must have been registered beforehands. */ if (!root) return; - intel_drrs_connector_debugfs_add(intel_connector); - intel_psr_connector_debugfs_add(intel_connector); + intel_drrs_connector_debugfs_add(connector); + intel_psr_connector_debugfs_add(connector); - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) - debugfs_create_file("i915_panel_timings", S_IRUGO, root, + if (connector_type == DRM_MODE_CONNECTOR_eDP) + debugfs_create_file("i915_panel_timings", 0444, root, connector, &i915_panel_fops); - if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || - connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || - connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) { - debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root, + if (connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector_type == DRM_MODE_CONNECTOR_HDMIA || + connector_type == DRM_MODE_CONNECTOR_HDMIB) { + debugfs_create_file("i915_hdcp_sink_capability", 0444, root, connector, &i915_hdcp_sink_capability_fops); } - if (DISPLAY_VER(dev_priv) >= 11 && - ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && - !to_intel_connector(connector)->mst_port) || - connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { + if (DISPLAY_VER(i915) >= 11 && + ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !connector->mst_port) || + connector_type == DRM_MODE_CONNECTOR_eDP)) { debugfs_create_file("i915_dsc_fec_support", 0644, root, connector, &i915_dsc_fec_support_fops); @@ -1511,13 +1603,16 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector) debugfs_create_file("i915_dsc_output_format", 0644, root, connector, &i915_dsc_output_format_fops); + + debugfs_create_file("i915_dsc_fractional_bpp", 0644, root, + connector, &i915_dsc_fractional_bpp_fops); } - if (connector->connector_type == DRM_MODE_CONNECTOR_DSI || - connector->connector_type == DRM_MODE_CONNECTOR_eDP || - connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || - connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || - connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) + if (connector_type == DRM_MODE_CONNECTOR_DSI || + connector_type == DRM_MODE_CONNECTOR_eDP || + connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector_type == DRM_MODE_CONNECTOR_HDMIA || + connector_type == DRM_MODE_CONNECTOR_HDMIB) debugfs_create_file("i915_lpsp_capability", 0444, root, connector, &i915_lpsp_capability_fops); } diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c new file mode 100644 index 0000000000..b7e68eb624 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include + +#include + +#include "intel_display_debugfs_params.h" +#include "i915_drv.h" +#include "intel_display_params.h" + +/* int param */ +static int intel_display_param_int_show(struct seq_file *m, void *data) +{ + int *value = m->private; + + seq_printf(m, "%d\n", *value); + + return 0; +} + +static int intel_display_param_int_open(struct inode *inode, struct file *file) +{ + return single_open(file, intel_display_param_int_show, inode->i_private); +} + +static ssize_t intel_display_param_int_write(struct file *file, + const char __user *ubuf, size_t len, + loff_t *offp) +{ + struct seq_file *m = file->private_data; + int *value = m->private; + int ret; + + ret = kstrtoint_from_user(ubuf, len, 0, value); + if (ret) { + /* support boolean values too */ + bool b; + + ret = kstrtobool_from_user(ubuf, len, &b); + if (!ret) + *value = b; + } + + return ret ?: len; +} + +static const struct file_operations intel_display_param_int_fops = { + .owner = THIS_MODULE, + .open = intel_display_param_int_open, + .read = seq_read, + .write = intel_display_param_int_write, + .llseek = default_llseek, + .release = single_release, +}; + +static const struct file_operations intel_display_param_int_fops_ro = { + .owner = THIS_MODULE, + .open = intel_display_param_int_open, + .read = seq_read, + .llseek = default_llseek, + .release = single_release, +}; + +/* unsigned int param */ +static int intel_display_param_uint_show(struct seq_file *m, void *data) +{ + unsigned int *value = m->private; + + seq_printf(m, "%u\n", *value); + + return 0; +} + +static int intel_display_param_uint_open(struct inode *inode, struct file *file) +{ + return single_open(file, intel_display_param_uint_show, inode->i_private); +} + +static ssize_t intel_display_param_uint_write(struct file *file, + const char __user *ubuf, size_t len, + loff_t *offp) +{ + struct seq_file *m = file->private_data; + unsigned int *value = m->private; + int ret; + + ret = kstrtouint_from_user(ubuf, len, 0, value); + if (ret) { + /* support boolean values too */ + bool b; + + ret = kstrtobool_from_user(ubuf, len, &b); + if (!ret) + *value = b; + } + + return ret ?: len; +} + +static const struct file_operations intel_display_param_uint_fops = { + .owner = THIS_MODULE, + .open = intel_display_param_uint_open, + .read = seq_read, + .write = intel_display_param_uint_write, + .llseek = default_llseek, + .release = single_release, +}; + +static const struct file_operations intel_display_param_uint_fops_ro = { + .owner = THIS_MODULE, + .open = intel_display_param_uint_open, + .read = seq_read, + .llseek = default_llseek, + .release = single_release, +}; + +#define RO(mode) (((mode) & 0222) == 0) + +__maybe_unused static struct dentry * +intel_display_debugfs_create_int(const char *name, umode_t mode, + struct dentry *parent, int *value) +{ + return debugfs_create_file_unsafe(name, mode, parent, value, + RO(mode) ? &intel_display_param_int_fops_ro : + &intel_display_param_int_fops); +} + +__maybe_unused static struct dentry * +intel_display_debugfs_create_uint(const char *name, umode_t mode, + struct dentry *parent, unsigned int *value) +{ + return debugfs_create_file_unsafe(name, mode, parent, value, + RO(mode) ? &intel_display_param_uint_fops_ro : + &intel_display_param_uint_fops); +} + +#define _intel_display_param_create_file(parent, name, mode, valp) \ + do { \ + if (mode) \ + _Generic(valp, \ + bool * : debugfs_create_bool, \ + int * : intel_display_debugfs_create_int, \ + unsigned int * : intel_display_debugfs_create_uint, \ + unsigned long * : debugfs_create_ulong, \ + char ** : debugfs_create_str) \ + (name, mode, parent, valp); \ + } while (0) + +/* add a subdirectory with files for each intel display param */ +void intel_display_debugfs_params(struct drm_i915_private *i915) +{ + struct drm_minor *minor = i915->drm.primary; + struct dentry *dir; + char dirname[16]; + + snprintf(dirname, sizeof(dirname), "%s_params", i915->drm.driver->name); + dir = debugfs_lookup(dirname, minor->debugfs_root); + if (!dir) + dir = debugfs_create_dir(dirname, minor->debugfs_root); + if (IS_ERR(dir)) + return; + + /* + * Note: We could create files for params needing special handling + * here. Set mode in params to 0 to skip the generic create file, or + * just let the generic create file fail silently with -EEXIST. + */ + +#define REGISTER(T, x, unused, mode, ...) _intel_display_param_create_file( \ + dir, #x, mode, &i915->display.params.x); + INTEL_DISPLAY_PARAMS_FOR_EACH(REGISTER); +#undef REGISTER +} diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h new file mode 100644 index 0000000000..1e9945a404 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_DISPLAY_DEBUGFS_PARAMS__ +#define __INTEL_DISPLAY_DEBUGFS_PARAMS__ + +struct drm_i915_private; + +void intel_display_debugfs_params(struct drm_i915_private *i915); + +#endif /* __INTEL_DISPLAY_DEBUGFS_PARAMS__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c index 2b1ec23ba9..0b522c6a8d 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.c +++ b/drivers/gpu/drm/i915/display/intel_display_device.c @@ -12,6 +12,7 @@ #include "intel_de.h" #include "intel_display.h" #include "intel_display_device.h" +#include "intel_display_params.h" #include "intel_display_power.h" #include "intel_display_reg_defs.h" #include "intel_fbc.h" @@ -937,6 +938,13 @@ void intel_display_device_probe(struct drm_i915_private *i915) DISPLAY_RUNTIME_INFO(i915)->ip.rel = rel; DISPLAY_RUNTIME_INFO(i915)->ip.step = step; } + + intel_display_params_copy(&i915->display.params); +} + +void intel_display_device_remove(struct drm_i915_private *i915) +{ + intel_display_params_free(&i915->display.params); } static void __intel_display_device_info_runtime_init(struct drm_i915_private *i915) @@ -1105,7 +1113,7 @@ void intel_display_device_info_runtime_init(struct drm_i915_private *i915) } /* Disable nuclear pageflip by default on pre-g4x */ - if (!i915->params.nuclear_pageflip && + if (!i915->display.params.nuclear_pageflip && DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) i915->drm.driver_features &= ~DRIVER_ATOMIC; } @@ -1145,5 +1153,6 @@ bool intel_display_device_enabled(struct drm_i915_private *i915) /* Only valid when HAS_DISPLAY() is true */ drm_WARN_ON(&i915->drm, !HAS_DISPLAY(i915)); - return !i915->params.disable_display && !intel_opregion_headless_sku(i915); + return !i915->display.params.disable_display && + !intel_opregion_headless_sku(i915); } diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h index 5b5c0e5330..9b1bce2624 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.h +++ b/drivers/gpu/drm/i915/display/intel_display_device.h @@ -36,7 +36,7 @@ struct drm_printer; #define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5) #define HAS_CDCLK_CRAWL(i915) (DISPLAY_INFO(i915)->has_cdclk_crawl) #define HAS_CDCLK_SQUASH(i915) (DISPLAY_INFO(i915)->has_cdclk_squash) -#define HAS_CUR_FBC(i915) (!HAS_GMCH(i915) && DISPLAY_VER(i915) >= 7) +#define HAS_CUR_FBC(i915) (!HAS_GMCH(i915) && IS_DISPLAY_VER(i915, 7, 13)) #define HAS_D12_PLANE_MINIMIZATION(i915) (IS_ROCKETLAKE(i915) || IS_ALDERLAKE_S(i915)) #define HAS_DDI(i915) (DISPLAY_INFO(i915)->has_ddi) #define HAS_DISPLAY(i915) (DISPLAY_RUNTIME_INFO(i915)->pipe_mask != 0) @@ -47,9 +47,10 @@ struct drm_printer; #define HAS_DPT(i915) (DISPLAY_VER(i915) >= 13) #define HAS_DSB(i915) (DISPLAY_INFO(i915)->has_dsb) #define HAS_DSC(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dsc) +#define HAS_DSC_MST(__i915) (DISPLAY_VER(__i915) >= 12 && HAS_DSC(__i915)) #define HAS_FBC(i915) (DISPLAY_RUNTIME_INFO(i915)->fbc_mask != 0) #define HAS_FPGA_DBG_UNCLAIMED(i915) (DISPLAY_INFO(i915)->has_fpga_dbg) -#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) > 2) +#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) >= 3) #define HAS_GMBUS_IRQ(i915) (DISPLAY_VER(i915) >= 4) #define HAS_GMBUS_BURST_READ(i915) (DISPLAY_VER(i915) >= 10 || IS_KABYLAKE(i915)) #define HAS_GMCH(i915) (DISPLAY_INFO(i915)->has_gmch) @@ -161,6 +162,7 @@ struct intel_display_device_info { bool intel_display_device_enabled(struct drm_i915_private *i915); void intel_display_device_probe(struct drm_i915_private *i915); +void intel_display_device_remove(struct drm_i915_private *i915); void intel_display_device_info_runtime_init(struct drm_i915_private *i915); void intel_display_device_info_print(const struct intel_display_device_info *info, diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c index 44b59ac301..9df9097a02 100644 --- a/drivers/gpu/drm/i915/display/intel_display_driver.c +++ b/drivers/gpu/drm/i915/display/intel_display_driver.c @@ -181,6 +181,13 @@ void intel_display_driver_early_probe(struct drm_i915_private *i915) if (!HAS_DISPLAY(i915)) return; + spin_lock_init(&i915->display.fb_tracking.lock); + mutex_init(&i915->display.backlight.lock); + mutex_init(&i915->display.audio.mutex); + mutex_init(&i915->display.wm.wm_mutex); + mutex_init(&i915->display.pps.mutex); + mutex_init(&i915->display.hdcp.hdcp_mutex); + intel_display_irq_init(i915); intel_dkl_phy_init(i915); intel_color_init_hooks(i915); @@ -252,10 +259,6 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915) if (ret) goto cleanup_vga_client_pw_domain_dmc; - init_llist_head(&i915->display.atomic_helper.free_list); - INIT_WORK(&i915->display.atomic_helper.free_work, - intel_atomic_helper_free_state_worker); - intel_init_quirks(i915); intel_fbc_init(i915); @@ -423,9 +426,6 @@ void intel_display_driver_remove(struct drm_i915_private *i915) flush_workqueue(i915->display.wq.flip); flush_workqueue(i915->display.wq.modeset); - flush_work(&i915->display.atomic_helper.free_work); - drm_WARN_ON(&i915->drm, !llist_empty(&i915->display.atomic_helper.free_list)); - /* * MST topology needs to be suspended so we don't have any calls to * fbdev after it's finalized. MST will be destroyed later as part of diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c index bff4a76310..a7d8f3fc98 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.c +++ b/drivers/gpu/drm/i915/display/intel_display_irq.c @@ -340,18 +340,15 @@ static void flip_done_handler(struct drm_i915_private *i915, enum pipe pipe) { struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); - struct drm_crtc_state *crtc_state = crtc->base.state; - struct drm_pending_vblank_event *e = crtc_state->event; - struct drm_device *dev = &i915->drm; - unsigned long irqflags; - - spin_lock_irqsave(&dev->event_lock, irqflags); - crtc_state->event = NULL; + spin_lock(&i915->drm.event_lock); - drm_crtc_send_vblank_event(&crtc->base, e); + if (crtc->flip_done_event) { + drm_crtc_send_vblank_event(&crtc->base, crtc->flip_done_event); + crtc->flip_done_event = NULL; + } - spin_unlock_irqrestore(&dev->event_lock, irqflags); + spin_unlock(&i915->drm.event_lock); } static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, @@ -896,7 +893,7 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) } if (!found) - drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n"); + drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir); } static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, @@ -1653,7 +1650,7 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) else if (HAS_PCH_SPLIT(dev_priv)) ibx_irq_postinstall(dev_priv); - if (DISPLAY_VER(dev_priv) <= 10) + if (DISPLAY_VER(dev_priv) < 11) de_misc_masked |= GEN8_DE_MISC_GSE; if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c new file mode 100644 index 0000000000..11e03cfb77 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_params.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include "intel_display_params.h" +#include "i915_drv.h" + +#define intel_display_param_named(name, T, perm, desc) \ + module_param_named(name, intel_display_modparams.name, T, perm); \ + MODULE_PARM_DESC(name, desc) +#define intel_display_param_named_unsafe(name, T, perm, desc) \ + module_param_named_unsafe(name, intel_display_modparams.name, T, perm); \ + MODULE_PARM_DESC(name, desc) + +static struct intel_display_params intel_display_modparams __read_mostly = { +#define MEMBER(T, member, value, ...) .member = (value), + INTEL_DISPLAY_PARAMS_FOR_EACH(MEMBER) +#undef MEMBER +}; +/* + * Note: As a rule, keep module parameter sysfs permissions read-only + * 0400. Runtime changes are only supported through i915 debugfs. + * + * For any exceptions requiring write access and runtime changes through module + * parameter sysfs, prevent debugfs file creation by setting the parameter's + * debugfs mode to 0. + */ + +intel_display_param_named_unsafe(vbt_firmware, charp, 0400, + "Load VBT from specified file under /lib/firmware"); + +intel_display_param_named_unsafe(lvds_channel_mode, int, 0400, + "Specify LVDS channel mode " + "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)"); + +intel_display_param_named_unsafe(panel_use_ssc, int, 0400, + "Use Spread Spectrum Clock with panels [LVDS/eDP] " + "(default: auto from VBT)"); + +intel_display_param_named_unsafe(vbt_sdvo_panel_type, int, 0400, + "Override/Ignore selection of SDVO panel mode in the VBT " + "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); + +intel_display_param_named_unsafe(enable_dc, int, 0400, + "Enable power-saving display C-states. " + "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6; " + "3=up to DC5 with DC3CO; 4=up to DC6 with DC3CO)"); + +intel_display_param_named_unsafe(enable_dpt, bool, 0400, + "Enable display page table (DPT) (default: true)"); + +intel_display_param_named_unsafe(enable_sagv, bool, 0400, + "Enable system agent voltage/frequency scaling (SAGV) (default: true)"); + +intel_display_param_named_unsafe(disable_power_well, int, 0400, + "Disable display power wells when possible " + "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)"); + +intel_display_param_named_unsafe(enable_ips, bool, 0400, "Enable IPS (default: true)"); + +intel_display_param_named_unsafe(invert_brightness, int, 0400, + "Invert backlight brightness " + "(-1 force normal, 0 machine defaults, 1 force inversion), please " + "report PCI device ID, subsystem vendor and subsystem device ID " + "to dri-devel@lists.freedesktop.org, if your machine needs it. " + "It will then be included in an upcoming module version."); + +/* WA to get away with the default setting in VBT for early platforms.Will be removed */ +intel_display_param_named_unsafe(edp_vswing, int, 0400, + "Ignore/Override vswing pre-emph table selection from VBT " + "(0=use value from vbt [default], 1=low power swing(200mV)," + "2=default swing(400mV))"); + +intel_display_param_named(enable_dpcd_backlight, int, 0400, + "Enable support for DPCD backlight control" + "(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enable, 2=force VESA interface, 3=force Intel interface)"); + +intel_display_param_named_unsafe(load_detect_test, bool, 0400, + "Force-enable the VGA load detect code for testing (default:false). " + "For developers only."); + +intel_display_param_named_unsafe(force_reset_modeset_test, bool, 0400, + "Force a modeset during gpu reset for testing (default:false). " + "For developers only."); + +intel_display_param_named(disable_display, bool, 0400, + "Disable display (default: false)"); + +intel_display_param_named(verbose_state_checks, bool, 0400, + "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions."); + +intel_display_param_named_unsafe(nuclear_pageflip, bool, 0400, + "Force enable atomic functionality on platforms that don't have full support yet."); + +intel_display_param_named_unsafe(enable_dp_mst, bool, 0400, + "Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)"); + +intel_display_param_named_unsafe(enable_fbc, int, 0400, + "Enable frame buffer compression for power savings " + "(default: -1 (use per-chip default))"); + +intel_display_param_named_unsafe(enable_psr, int, 0400, + "Enable PSR " + "(0=disabled, 1=enable up to PSR1, 2=enable up to PSR2) " + "Default: -1 (use per-chip default)"); + +intel_display_param_named(psr_safest_params, bool, 0400, + "Replace PSR VBT parameters by the safest and not optimal ones. This " + "is helpful to detect if PSR issues are related to bad values set in " + " VBT. (0=use VBT parameters, 1=use safest parameters)" + "Default: 0"); + +intel_display_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400, + "Enable PSR2 selective fetch " + "(0=disabled, 1=enabled) " + "Default: 1"); + +__maybe_unused +static void _param_print_bool(struct drm_printer *p, const char *driver_name, + const char *name, bool val) +{ + drm_printf(p, "%s.%s=%s\n", driver_name, name, str_yes_no(val)); +} + +__maybe_unused +static void _param_print_int(struct drm_printer *p, const char *driver_name, + const char *name, int val) +{ + drm_printf(p, "%s.%s=%d\n", driver_name, name, val); +} + +__maybe_unused +static void _param_print_uint(struct drm_printer *p, const char *driver_name, + const char *name, unsigned int val) +{ + drm_printf(p, "%s.%s=%u\n", driver_name, name, val); +} + +__maybe_unused +static void _param_print_ulong(struct drm_printer *p, const char *driver_name, + const char *name, unsigned long val) +{ + drm_printf(p, "%s.%s=%lu\n", driver_name, name, val); +} + +__maybe_unused +static void _param_print_charp(struct drm_printer *p, const char *driver_name, + const char *name, const char *val) +{ + drm_printf(p, "%s.%s=%s\n", driver_name, name, val); +} + +#define _param_print(p, driver_name, name, val) \ + _Generic(val, \ + bool : _param_print_bool, \ + int : _param_print_int, \ + unsigned int : _param_print_uint, \ + unsigned long : _param_print_ulong, \ + char * : _param_print_charp)(p, driver_name, name, val) + +/** + * intel_display_params_dump - dump intel display modparams + * @i915: i915 device + * @p: the &drm_printer + * + * Pretty printer for i915 modparams. + */ +void intel_display_params_dump(struct drm_i915_private *i915, struct drm_printer *p) +{ +#define PRINT(T, x, ...) _param_print(p, i915->drm.driver->name, #x, i915->display.params.x); + INTEL_DISPLAY_PARAMS_FOR_EACH(PRINT); +#undef PRINT +} + +__maybe_unused static void _param_dup_charp(char **valp) +{ + *valp = kstrdup(*valp ? *valp : "", GFP_ATOMIC); +} + +__maybe_unused static void _param_nop(void *valp) +{ +} + +#define _param_dup(valp) \ + _Generic(valp, \ + char ** : _param_dup_charp, \ + default : _param_nop) \ + (valp) + +void intel_display_params_copy(struct intel_display_params *dest) +{ + *dest = intel_display_modparams; +#define DUP(T, x, ...) _param_dup(&dest->x); + INTEL_DISPLAY_PARAMS_FOR_EACH(DUP); +#undef DUP +} + +__maybe_unused static void _param_free_charp(char **valp) +{ + kfree(*valp); + *valp = NULL; +} + +#define _param_free(valp) \ + _Generic(valp, \ + char ** : _param_free_charp, \ + default : _param_nop) \ + (valp) + +/* free the allocated members, *not* the passed in params itself */ +void intel_display_params_free(struct intel_display_params *params) +{ +#define FREE(T, x, ...) _param_free(¶ms->x); + INTEL_DISPLAY_PARAMS_FOR_EACH(FREE); +#undef FREE +} diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h new file mode 100644 index 0000000000..6206cc51df --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_params.h @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _INTEL_DISPLAY_PARAMS_H_ +#define _INTEL_DISPLAY_PARAMS_H_ + +#include + +struct drm_printer; +struct drm_i915_private; + +/* + * Invoke param, a function-like macro, for each intel display param, with + * arguments: + * + * param(type, name, value, mode) + * + * type: parameter type, one of {bool, int, unsigned int, unsigned long, char *} + * name: name of the parameter + * value: initial/default value of the parameter + * mode: debugfs file permissions, one of {0400, 0600, 0}, use 0 to not create + * debugfs file + */ +#define INTEL_DISPLAY_PARAMS_FOR_EACH(param) \ + param(char *, vbt_firmware, NULL, 0400) \ + param(int, lvds_channel_mode, 0, 0400) \ + param(int, panel_use_ssc, -1, 0600) \ + param(int, vbt_sdvo_panel_type, -1, 0400) \ + param(int, enable_dc, -1, 0400) \ + param(bool, enable_dpt, true, 0400) \ + param(bool, enable_sagv, true, 0600) \ + param(int, disable_power_well, -1, 0400) \ + param(bool, enable_ips, true, 0600) \ + param(int, invert_brightness, 0, 0600) \ + param(int, edp_vswing, 0, 0400) \ + param(int, enable_dpcd_backlight, -1, 0600) \ + param(bool, load_detect_test, false, 0600) \ + param(bool, force_reset_modeset_test, false, 0600) \ + param(bool, disable_display, false, 0400) \ + param(bool, verbose_state_checks, true, 0400) \ + param(bool, nuclear_pageflip, false, 0400) \ + param(bool, enable_dp_mst, true, 0600) \ + param(int, enable_fbc, -1, 0600) \ + param(int, enable_psr, -1, 0600) \ + param(bool, psr_safest_params, false, 0400) \ + param(bool, enable_psr2_sel_fetch, true, 0400) \ + +#define MEMBER(T, member, ...) T member; +struct intel_display_params { + INTEL_DISPLAY_PARAMS_FOR_EACH(MEMBER); +}; +#undef MEMBER + +void intel_display_params_dump(struct drm_i915_private *i915, + struct drm_printer *p); +void intel_display_params_copy(struct intel_display_params *dest); +void intel_display_params_free(struct intel_display_params *params); + +#endif diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index e25785ae1c..6fd4fa5225 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -405,8 +405,8 @@ print_async_put_domains_state(struct i915_power_domains *power_domains) struct drm_i915_private, display.power.domains); - drm_dbg(&i915->drm, "async_put_wakeref %u\n", - power_domains->async_put_wakeref); + drm_dbg(&i915->drm, "async_put_wakeref: %s\n", + str_yes_no(power_domains->async_put_wakeref)); print_power_domains(power_domains, "async_put_domains[0]", &power_domains->async_put_domains[0]); @@ -967,7 +967,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, DISPLAY_VER(dev_priv) >= 11 ? DC_STATE_EN_DC9 : 0; - if (!dev_priv->params.disable_power_well) + if (!dev_priv->display.params.disable_power_well) max_dc = 0; if (enable_dc >= 0 && enable_dc <= max_dc) { @@ -1016,11 +1016,11 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->display.power.domains; - dev_priv->params.disable_power_well = + dev_priv->display.params.disable_power_well = sanitize_disable_power_well_option(dev_priv, - dev_priv->params.disable_power_well); + dev_priv->display.params.disable_power_well); power_domains->allowed_dc_mask = - get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); + get_allowed_dc_mask(dev_priv, dev_priv->display.params.enable_dc); power_domains->target_dc_state = sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); @@ -1697,14 +1697,14 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, if (resume) intel_dmc_load_program(dev_priv); - /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */ - if (DISPLAY_VER(dev_priv) >= 12) + /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */ + if (IS_DISPLAY_IP_RANGE(dev_priv, IP_VER(12, 0), IP_VER(13, 0))) intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0, DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR); /* Wa_14011503030:xelpd */ - if (DISPLAY_VER(dev_priv) >= 13) + if (DISPLAY_VER(dev_priv) == 13) intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0); } @@ -1950,7 +1950,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) intel_display_power_get(i915, POWER_DOMAIN_INIT); /* Disable power support if the user asked so. */ - if (!i915->params.disable_power_well) { + if (!i915->display.params.disable_power_well) { drm_WARN_ON(&i915->drm, power_domains->disable_wakeref); i915->display.power.domains.disable_wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT); @@ -1977,7 +1977,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915) fetch_and_zero(&i915->display.power.domains.init_wakeref); /* Remove the refcount we took to keep power well support disabled. */ - if (!i915->params.disable_power_well) + if (!i915->display.params.disable_power_well) intel_display_power_put(i915, POWER_DOMAIN_INIT, fetch_and_zero(&i915->display.power.domains.disable_wakeref)); @@ -2096,7 +2096,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle) * Even if power well support was disabled we still want to disable * power wells if power domains must be deinitialized for suspend. */ - if (!i915->params.disable_power_well) + if (!i915->display.params.disable_power_well) intel_display_power_put(i915, POWER_DOMAIN_INIT, fetch_and_zero(&i915->display.power.domains.disable_wakeref)); diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index 82dd5be72e..06900ff307 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -1411,20 +1411,16 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, { enum i915_power_well_id id = i915_power_well_instance(power_well)->id; enum dpio_phy phy; - enum pipe pipe; u32 tmp; drm_WARN_ON_ONCE(&dev_priv->drm, id != VLV_DISP_PW_DPIO_CMN_BC && id != CHV_DISP_PW_DPIO_CMN_D); - if (id == VLV_DISP_PW_DPIO_CMN_BC) { - pipe = PIPE_A; + if (id == VLV_DISP_PW_DPIO_CMN_BC) phy = DPIO_PHY0; - } else { - pipe = PIPE_C; + else phy = DPIO_PHY1; - } /* since ref/cri clock was enabled */ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ @@ -1439,24 +1435,24 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, vlv_dpio_get(dev_priv); /* Enable dynamic power down */ - tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); + tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW28); tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW28, tmp); if (id == VLV_DISP_PW_DPIO_CMN_BC) { - tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); + tmp = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW6_CH1); tmp |= DPIO_DYNPWRDOWNEN_CH1; - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); + vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW6_CH1, tmp); } else { /* * Force the non-existing CL2 off. BXT does this * too, so maybe it saves some power even though * CL2 doesn't exist? */ - tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); + tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW30); tmp |= DPIO_CL2_LDOFUSE_PWRENB; - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW30, tmp); } vlv_dpio_put(dev_priv); @@ -1510,7 +1506,6 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, enum dpio_channel ch, bool override, unsigned int mask) { - enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; u32 reg, val, expected, actual; /* @@ -1529,7 +1524,7 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi reg = _CHV_CMN_DW6_CH1; vlv_dpio_get(dev_priv); - val = vlv_dpio_read(dev_priv, pipe, reg); + val = vlv_dpio_read(dev_priv, phy, reg); vlv_dpio_put(dev_priv); /* diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c index 17178d5d77..c2c347b224 100644 --- a/drivers/gpu/drm/i915/display/intel_display_reset.c +++ b/drivers/gpu/drm/i915/display/intel_display_reset.c @@ -29,7 +29,7 @@ void intel_display_reset_prepare(struct drm_i915_private *dev_priv) return; /* reset doesn't touch the display */ - if (!dev_priv->params.force_reset_modeset_test && + if (!dev_priv->display.params.force_reset_modeset_test && !gpu_reset_clobbers_display(dev_priv)) return; diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 65ea37fe8c..9529f77b87 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -198,6 +198,12 @@ struct intel_encoder { struct intel_encoder *, const struct intel_crtc_state *, const struct drm_connector_state *); + void (*audio_enable)(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); + void (*audio_disable)(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state); /* Read out the current hw state of this connector, returning true if * the encoder is active. If the encoder is enabled it also set the pipe * it is connected to in the pipe parameter. */ @@ -603,6 +609,13 @@ struct intel_connector { * and active (i.e. dpms ON state). */ bool (*get_hw_state)(struct intel_connector *); + /* + * Optional hook called during init/resume to sync any state + * stored in the connector (eg. DSC state) wrt. the HW state. + */ + void (*sync_state)(struct intel_connector *connector, + const struct intel_crtc_state *crtc_state); + /* Panel info for eDP and LVDS */ struct intel_panel panel; @@ -624,6 +637,9 @@ struct intel_connector { struct drm_dp_aux *dsc_decompression_aux; u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]; u8 fec_capability; + + u8 dsc_hblank_expansion_quirk:1; + u8 dsc_decompression_enabled:1; } dp; /* Work struct to schedule a uevent on link train failure */ @@ -675,10 +691,6 @@ struct intel_atomic_state { bool skip_intermediate_wm; bool rps_interactive; - - struct i915_sw_fence commit_ready; - - struct llist_node freed; }; struct intel_plane_state { @@ -707,6 +719,7 @@ struct intel_plane_state { #define PLANE_HAS_FENCE BIT(0) struct intel_fb_view view; + u32 phys_dma_addr; /* for cursor_needs_physical */ /* Plane pxp decryption state */ bool decrypt; @@ -1015,7 +1028,6 @@ struct intel_c10pll_state { }; struct intel_c20pll_state { - u32 link_bit_rate; u32 clock; /* in kHz */ u16 tx[3]; u16 cmn[4]; @@ -1210,6 +1222,7 @@ struct intel_crtc_state { bool has_psr2; bool enable_psr2_sel_fetch; bool req_psr2_sdp_prior_scanline; + bool has_panel_replay; bool wm_level_disabled; u32 dc3co_exitline; u16 su_y_granularity; @@ -1361,7 +1374,8 @@ struct intel_crtc_state { struct { bool compression_enable; bool dsc_split; - u16 compressed_bpp; + /* Compressed Bpp in U6.4 format (first 4 bits for fractional part) */ + u16 compressed_bpp_x16; u8 slice_count; struct drm_dsc_config config; } dsc; @@ -1467,6 +1481,9 @@ struct intel_crtc { struct intel_crtc_state *config; + /* armed event for async flip */ + struct drm_pending_vblank_event *flip_done_event; + /* Access to these should be protected by dev_priv->irq_lock. */ bool cpu_fifo_underrun_disabled; bool pch_fifo_underrun_disabled; @@ -1707,9 +1724,13 @@ struct intel_psr { bool irq_aux_error; u16 su_w_granularity; u16 su_y_granularity; + bool source_panel_replay_support; + bool sink_panel_replay_support; + bool panel_replay_enabled; u32 dc3co_exitline; u32 dc3co_exit_delay; struct delayed_work dc3co_work; + u8 entry_setup_frames; }; struct intel_dp { @@ -1808,6 +1829,7 @@ struct intel_dp { /* Display stream compression testing */ bool force_dsc_en; int force_dsc_output_format; + bool force_dsc_fractional_bpp_en; int force_dsc_bpc; bool hobl_failed; @@ -1992,17 +2014,6 @@ dp_to_lspcon(struct intel_dp *intel_dp) #define dp_to_i915(__intel_dp) to_i915(dp_to_dig_port(__intel_dp)->base.base.dev) -#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \ - (intel_dp)->psr.source_support) - -static inline bool intel_encoder_can_psr(struct intel_encoder *encoder) -{ - if (!intel_encoder_is_dp(encoder)) - return false; - - return CAN_PSR(enc_to_intel_dp(encoder)); -} - static inline struct intel_digital_port * hdmi_to_dig_port(struct intel_hdmi *intel_hdmi) { diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 073b85b576..b70502586a 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -335,77 +335,6 @@ static void disable_event_handler(struct drm_i915_private *i915, intel_de_write(i915, htp_reg, 0); } -static void -disable_flip_queue_event(struct drm_i915_private *i915, - i915_reg_t ctl_reg, i915_reg_t htp_reg) -{ - u32 event_ctl; - u32 event_htp; - - event_ctl = intel_de_read(i915, ctl_reg); - event_htp = intel_de_read(i915, htp_reg); - if (event_ctl != (DMC_EVT_CTL_ENABLE | - DMC_EVT_CTL_RECURRING | - REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, - DMC_EVT_CTL_TYPE_EDGE_0_1) | - REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, - DMC_EVT_CTL_EVENT_ID_CLK_MSEC)) || - !event_htp) { - drm_dbg_kms(&i915->drm, - "Unexpected DMC event configuration (control %08x htp %08x)\n", - event_ctl, event_htp); - return; - } - - disable_event_handler(i915, ctl_reg, htp_reg); -} - -static bool -get_flip_queue_event_regs(struct drm_i915_private *i915, enum intel_dmc_id dmc_id, - i915_reg_t *ctl_reg, i915_reg_t *htp_reg) -{ - if (dmc_id == DMC_FW_MAIN) { - if (DISPLAY_VER(i915) == 12) { - *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 3); - *htp_reg = DMC_EVT_HTP(i915, dmc_id, 3); - - return true; - } - } else if (dmc_id >= DMC_FW_PIPEA && dmc_id <= DMC_FW_PIPED) { - if (IS_DG2(i915)) { - *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 2); - *htp_reg = DMC_EVT_HTP(i915, dmc_id, 2); - - return true; - } - } - - return false; -} - -static void -disable_all_flip_queue_events(struct drm_i915_private *i915) -{ - enum intel_dmc_id dmc_id; - - /* TODO: check if the following applies to all D13+ platforms. */ - if (!IS_TIGERLAKE(i915)) - return; - - for_each_dmc_id(dmc_id) { - i915_reg_t ctl_reg; - i915_reg_t htp_reg; - - if (!has_dmc_id_fw(i915, dmc_id)) - continue; - - if (!get_flip_queue_event_regs(i915, dmc_id, &ctl_reg, &htp_reg)) - continue; - - disable_flip_queue_event(i915, ctl_reg, htp_reg); - } -} - static void disable_all_event_handlers(struct drm_i915_private *i915) { enum intel_dmc_id dmc_id; @@ -503,6 +432,16 @@ static bool is_dmc_evt_ctl_reg(struct drm_i915_private *i915, return offset >= start && offset < end; } +static bool is_dmc_evt_htp_reg(struct drm_i915_private *i915, + enum intel_dmc_id dmc_id, i915_reg_t reg) +{ + u32 offset = i915_mmio_reg_offset(reg); + u32 start = i915_mmio_reg_offset(DMC_EVT_HTP(i915, dmc_id, 0)); + u32 end = i915_mmio_reg_offset(DMC_EVT_HTP(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12)); + + return offset >= start && offset < end; +} + static bool disable_dmc_evt(struct drm_i915_private *i915, enum intel_dmc_id dmc_id, i915_reg_t reg, u32 data) @@ -514,6 +453,16 @@ static bool disable_dmc_evt(struct drm_i915_private *i915, if (dmc_id != DMC_FW_MAIN) return true; + /* also disable the flip queue event on the main DMC on TGL */ + if (IS_TIGERLAKE(i915) && + REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_CLK_MSEC) + return true; + + /* also disable the HRR event on the main DMC on TGL/ADLS */ + if ((IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915)) && + REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_VBLANK_A) + return true; + return false; } @@ -579,13 +528,6 @@ void intel_dmc_load_program(struct drm_i915_private *i915) gen9_set_dc_state_debugmask(i915); - /* - * Flip queue events need to be disabled before enabling DC5/6. - * i915 doesn't use the flip queue feature, so disable it already - * here. - */ - disable_all_flip_queue_events(i915); - pipedmc_clock_gating_wa(i915, false); } @@ -781,9 +723,17 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc, return 0; } + drm_dbg_kms(&i915->drm, "DMC %d:\n", dmc_id); for (i = 0; i < mmio_count; i++) { dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]); dmc_info->mmiodata[i] = mmiodata[i]; + + drm_dbg_kms(&i915->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n", + i, mmioaddr[i], mmiodata[i], + is_dmc_evt_ctl_reg(i915, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" : + is_dmc_evt_htp_reg(i915, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "", + disable_dmc_evt(i915, dmc_id, dmc_info->mmioaddr[i], + dmc_info->mmiodata[i]) ? " (disabling)" : ""); } dmc_info->mmio_count = mmio_count; dmc_info->start_mmioaddr = start_mmioaddr; diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h index cf10094aca..90d0dbb41c 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h +++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h @@ -60,6 +60,7 @@ #define DMC_EVT_CTL_EVENT_ID_MASK REG_GENMASK(15, 8) #define DMC_EVT_CTL_EVENT_ID_FALSE 0x01 +#define DMC_EVT_CTL_EVENT_ID_VBLANK_A 0x32 /* main DMC */ /* An event handler scheduled to run at a 1 kHz frequency. */ #define DMC_EVT_CTL_EVENT_ID_CLK_MSEC 0xbf diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index ef09356bcb..4e8545126e 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -85,8 +85,8 @@ #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 -/* DP DSC FEC Overhead factor = 1/(0.972261) */ -#define DP_DSC_FEC_OVERHEAD_FACTOR 972261 +/* DP DSC FEC Overhead factor in ppm = 1/(0.972261) = 1.028530 */ +#define DP_DSC_FEC_OVERHEAD_FACTOR 1028530 /* Compliance test status bits */ #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 @@ -124,7 +124,31 @@ static void intel_dp_unset_edid(struct intel_dp *intel_dp); /* Is link rate UHBR and thus 128b/132b? */ bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state) { - return crtc_state->port_clock >= 1000000; + return drm_dp_is_uhbr_rate(crtc_state->port_clock); +} + +/** + * intel_dp_link_symbol_size - get the link symbol size for a given link rate + * @rate: link rate in 10kbit/s units + * + * Returns the link symbol size in bits/symbol units depending on the link + * rate -> channel coding. + */ +int intel_dp_link_symbol_size(int rate) +{ + return drm_dp_is_uhbr_rate(rate) ? 32 : 10; +} + +/** + * intel_dp_link_symbol_clock - convert link rate to link symbol clock + * @rate: link rate in 10kbit/s units + * + * Returns the link symbol clock frequency in kHz units depending on the + * link rate and channel coding. + */ +int intel_dp_link_symbol_clock(int rate) +{ + return DIV_ROUND_CLOSEST(rate * 10, intel_dp_link_symbol_size(rate)); } static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp) @@ -331,6 +355,9 @@ int intel_dp_max_lane_count(struct intel_dp *intel_dp) /* * The required data bandwidth for a mode with given pixel clock and bpp. This * is the required net bandwidth independent of the data bandwidth efficiency. + * + * TODO: check if callers of this functions should use + * intel_dp_effective_data_rate() instead. */ int intel_dp_link_required(int pixel_clock, int bpp) @@ -339,6 +366,22 @@ intel_dp_link_required(int pixel_clock, int bpp) return DIV_ROUND_UP(pixel_clock * bpp, 8); } +/** + * intel_dp_effective_data_rate - Return the pixel data rate accounting for BW allocation overhead + * @pixel_clock: pixel clock in kHz + * @bpp_x16: bits per pixel .4 fixed point format + * @bw_overhead: BW allocation overhead in 1ppm units + * + * Return the effective pixel data rate in kB/sec units taking into account + * the provided SSC, FEC, DSC BW allocation overhead. + */ +int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16, + int bw_overhead) +{ + return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_clock * bpp_x16, bw_overhead), + 1000000 * 16 * 8); +} + /* * Given a link rate and lanes, get the data bandwidth. * @@ -362,29 +405,27 @@ intel_dp_link_required(int pixel_clock, int bpp) int intel_dp_max_data_rate(int max_link_rate, int max_lanes) { - if (max_link_rate >= 1000000) { - /* - * UHBR rates always use 128b/132b channel encoding, and have - * 97.71% data bandwidth efficiency. Consider max_link_rate the - * link bit rate in units of 10000 bps. - */ - int max_link_rate_kbps = max_link_rate * 10; - - max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(max_link_rate_kbps, 9671), 10000); - max_link_rate = max_link_rate_kbps / 8; - } + int ch_coding_efficiency = + drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(max_link_rate)); + int max_link_rate_kbps = max_link_rate * 10; + /* + * UHBR rates always use 128b/132b channel encoding, and have + * 97.71% data bandwidth efficiency. Consider max_link_rate the + * link bit rate in units of 10000 bps. + */ /* * Lower than UHBR rates always use 8b/10b channel encoding, and have * 80% data bandwidth efficiency for SST non-FEC. However, this turns - * out to be a nop by coincidence, and can be skipped: + * out to be a nop by coincidence: * * int max_link_rate_kbps = max_link_rate * 10; - * max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 8, 10); + * max_link_rate_kbps = DIV_ROUND_DOWN_ULL(max_link_rate_kbps * 8, 10); * max_link_rate = max_link_rate_kbps / 8; */ - - return max_link_rate * max_lanes; + return DIV_ROUND_DOWN_ULL(mul_u32_u32(max_link_rate_kbps * max_lanes, + ch_coding_efficiency), + 1000000 * 8); } bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) @@ -680,8 +721,22 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, u32 intel_dp_mode_to_fec_clock(u32 mode_clock) { - return div_u64(mul_u32_u32(mode_clock, 1000000U), - DP_DSC_FEC_OVERHEAD_FACTOR); + return div_u64(mul_u32_u32(mode_clock, DP_DSC_FEC_OVERHEAD_FACTOR), + 1000000U); +} + +int intel_dp_bw_fec_overhead(bool fec_enabled) +{ + /* + * TODO: Calculate the actual overhead for a given mode. + * The hard-coded 1/0.972261=2.853% overhead factor + * corresponds (for instance) to the 8b/10b DP FEC 2.4% + + * 0.453% DSC overhead. This is enough for a 3840 width mode, + * which has a DSC overhead of up to ~0.2%, but may not be + * enough for a 1024 width mode where this is ~0.8% (on a 4 + * lane DP link, with 2 DSC slices and 8 bpp color depth). + */ + return fec_enabled ? DP_DSC_FEC_OVERHEAD_FACTOR : 1000000; } static int @@ -1367,15 +1422,16 @@ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, if (DISPLAY_VER(dev_priv) >= 12) return true; - if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A) + if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A && + !intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) return true; return false; } -static bool intel_dp_supports_fec(struct intel_dp *intel_dp, - const struct intel_connector *connector, - const struct intel_crtc_state *pipe_config) +bool intel_dp_supports_fec(struct intel_dp *intel_dp, + const struct intel_connector *connector, + const struct intel_crtc_state *pipe_config) { return intel_dp_source_supports_fec(intel_dp, pipe_config) && drm_dp_sink_supports_fec(connector->dp.fec_capability); @@ -1388,6 +1444,7 @@ static bool intel_dp_supports_dsc(const struct intel_connector *connector, return false; return intel_dsc_source_support(crtc_state) && + connector->dp.dsc_decompression_aux && drm_dp_sink_supports_dsc(connector->dp.dsc_dpcd); } @@ -1721,15 +1778,15 @@ static bool intel_dp_dsc_supports_format(const struct intel_connector *connector return drm_dp_dsc_sink_supports_format(connector->dp.dsc_dpcd, sink_dsc_format); } -static bool is_bw_sufficient_for_dsc_config(u16 compressed_bpp, u32 link_clock, +static bool is_bw_sufficient_for_dsc_config(u16 compressed_bppx16, u32 link_clock, u32 lane_count, u32 mode_clock, enum intel_output_format output_format, int timeslots) { u32 available_bw, required_bw; - available_bw = (link_clock * lane_count * timeslots) / 8; - required_bw = compressed_bpp * (intel_dp_mode_to_fec_clock(mode_clock)); + available_bw = (link_clock * lane_count * timeslots * 16) / 8; + required_bw = compressed_bppx16 * (intel_dp_mode_to_fec_clock(mode_clock)); return available_bw > required_bw; } @@ -1737,7 +1794,7 @@ static bool is_bw_sufficient_for_dsc_config(u16 compressed_bpp, u32 link_clock, static int dsc_compute_link_config(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config, struct link_config_limits *limits, - u16 compressed_bpp, + u16 compressed_bppx16, int timeslots) { const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; @@ -1752,8 +1809,8 @@ static int dsc_compute_link_config(struct intel_dp *intel_dp, for (lane_count = limits->min_lane_count; lane_count <= limits->max_lane_count; lane_count <<= 1) { - if (!is_bw_sufficient_for_dsc_config(compressed_bpp, link_rate, lane_count, - adjusted_mode->clock, + if (!is_bw_sufficient_for_dsc_config(compressed_bppx16, link_rate, + lane_count, adjusted_mode->clock, pipe_config->output_format, timeslots)) continue; @@ -1795,7 +1852,7 @@ u16 intel_dp_dsc_max_sink_compressed_bppx16(const struct intel_connector *connec return 0; } -static int dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config) +int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config) { /* From Mandatory bit rate range Support Table 2-157 (DP v2.0) */ switch (pipe_config->output_format) { @@ -1812,9 +1869,9 @@ static int dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config) return 0; } -static int dsc_sink_max_compressed_bpp(const struct intel_connector *connector, - struct intel_crtc_state *pipe_config, - int bpc) +int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector, + struct intel_crtc_state *pipe_config, + int bpc) { return intel_dp_dsc_max_sink_compressed_bppx16(connector, pipe_config, bpc) >> 4; @@ -1834,7 +1891,7 @@ static int dsc_src_max_compressed_bpp(struct intel_dp *intel_dp) * Max Compressed bpp for Gen 13+ is 27bpp. * For earlier platform is 23bpp. (Bspec:49259). */ - if (DISPLAY_VER(i915) <= 12) + if (DISPLAY_VER(i915) < 13) return 23; else return 27; @@ -1859,17 +1916,19 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp, dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1); for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) { - if (valid_dsc_bpp[i] < dsc_min_bpp || - valid_dsc_bpp[i] > dsc_max_bpp) + if (valid_dsc_bpp[i] < dsc_min_bpp) + continue; + if (valid_dsc_bpp[i] > dsc_max_bpp) break; ret = dsc_compute_link_config(intel_dp, pipe_config, limits, - valid_dsc_bpp[i], + valid_dsc_bpp[i] << 4, timeslots); if (ret == 0) { - pipe_config->dsc.compressed_bpp = valid_dsc_bpp[i]; + pipe_config->dsc.compressed_bpp_x16 = + to_bpp_x16(valid_dsc_bpp[i]); return 0; } } @@ -1885,6 +1944,7 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp, */ static int xelpd_dsc_compute_link_config(struct intel_dp *intel_dp, + const struct intel_connector *connector, struct intel_crtc_state *pipe_config, struct link_config_limits *limits, int dsc_max_bpp, @@ -1892,22 +1952,38 @@ xelpd_dsc_compute_link_config(struct intel_dp *intel_dp, int pipe_bpp, int timeslots) { - u16 compressed_bpp; + u8 bppx16_incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + u16 compressed_bppx16; + u8 bppx16_step; int ret; - /* Compressed BPP should be less than the Input DSC bpp */ - dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1); + if (DISPLAY_VER(i915) < 14 || bppx16_incr <= 1) + bppx16_step = 16; + else + bppx16_step = 16 / bppx16_incr; - for (compressed_bpp = dsc_max_bpp; - compressed_bpp >= dsc_min_bpp; - compressed_bpp--) { + /* Compressed BPP should be less than the Input DSC bpp */ + dsc_max_bpp = min(dsc_max_bpp << 4, (pipe_bpp << 4) - bppx16_step); + dsc_min_bpp = dsc_min_bpp << 4; + + for (compressed_bppx16 = dsc_max_bpp; + compressed_bppx16 >= dsc_min_bpp; + compressed_bppx16 -= bppx16_step) { + if (intel_dp->force_dsc_fractional_bpp_en && + !to_bpp_frac(compressed_bppx16)) + continue; ret = dsc_compute_link_config(intel_dp, pipe_config, limits, - compressed_bpp, + compressed_bppx16, timeslots); if (ret == 0) { - pipe_config->dsc.compressed_bpp = compressed_bpp; + pipe_config->dsc.compressed_bpp_x16 = compressed_bppx16; + if (intel_dp->force_dsc_fractional_bpp_en && + to_bpp_frac(compressed_bppx16)) + drm_dbg_kms(&i915->drm, "Forcing DSC fractional bpp\n"); + return 0; } } @@ -1928,12 +2004,14 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp, int dsc_joiner_max_bpp; dsc_src_min_bpp = dsc_src_min_compressed_bpp(); - dsc_sink_min_bpp = dsc_sink_min_compressed_bpp(pipe_config); + dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config); dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp); dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16)); dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp); - dsc_sink_max_bpp = dsc_sink_max_compressed_bpp(connector, pipe_config, pipe_bpp / 3); + dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector, + pipe_config, + pipe_bpp / 3); dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp; dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, adjusted_mode->clock, @@ -1943,7 +2021,7 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp, dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16)); if (DISPLAY_VER(i915) >= 13) - return xelpd_dsc_compute_link_config(intel_dp, pipe_config, limits, + return xelpd_dsc_compute_link_config(intel_dp, connector, pipe_config, limits, dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots); return icl_dsc_compute_link_config(intel_dp, pipe_config, limits, dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots); @@ -2088,19 +2166,22 @@ static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp, pipe_config->lane_count = limits->max_lane_count; dsc_src_min_bpp = dsc_src_min_compressed_bpp(); - dsc_sink_min_bpp = dsc_sink_min_compressed_bpp(pipe_config); + dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config); dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp); dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16)); dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp); - dsc_sink_max_bpp = dsc_sink_max_compressed_bpp(connector, pipe_config, pipe_bpp / 3); + dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector, + pipe_config, + pipe_bpp / 3); dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp; dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16)); /* Compressed BPP should be less than the Input DSC bpp */ dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1); - pipe_config->dsc.compressed_bpp = max(dsc_min_bpp, dsc_max_bpp); + pipe_config->dsc.compressed_bpp_x16 = + to_bpp_x16(max(dsc_min_bpp, dsc_max_bpp)); pipe_config->pipe_bpp = pipe_bpp; @@ -2122,8 +2203,9 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, &pipe_config->hw.adjusted_mode; int ret; - pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && - intel_dp_supports_fec(intel_dp, connector, pipe_config); + pipe_config->fec_enable = pipe_config->fec_enable || + (!intel_dp_is_edp(intel_dp) && + intel_dp_supports_fec(intel_dp, connector, pipe_config)); if (!intel_dp_supports_dsc(connector, pipe_config)) return -EINVAL; @@ -2188,18 +2270,18 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, ret = intel_dp_dsc_compute_params(connector, pipe_config); if (ret < 0) { drm_dbg_kms(&dev_priv->drm, - "Cannot compute valid DSC parameters for Input Bpp = %d " - "Compressed BPP = %d\n", + "Cannot compute valid DSC parameters for Input Bpp = %d" + "Compressed BPP = " BPP_X16_FMT "\n", pipe_config->pipe_bpp, - pipe_config->dsc.compressed_bpp); + BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16)); return ret; } pipe_config->dsc.compression_enable = true; drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " - "Compressed Bpp = %d Slice Count = %d\n", + "Compressed Bpp = " BPP_X16_FMT " Slice Count = %d\n", pipe_config->pipe_bpp, - pipe_config->dsc.compressed_bpp, + BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16), pipe_config->dsc.slice_count); return 0; @@ -2314,6 +2396,8 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); + const struct intel_connector *connector = + to_intel_connector(conn_state->connector); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -2322,6 +2406,10 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, bool dsc_needed; int ret = 0; + if (pipe_config->fec_enable && + !intel_dp_supports_fec(intel_dp, connector, pipe_config)) + return -EINVAL; + if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_clock)) pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe); @@ -2369,15 +2457,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, if (pipe_config->dsc.compression_enable) { drm_dbg_kms(&i915->drm, - "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", + "DP lane count %d clock %d Input bpp %d Compressed bpp " BPP_X16_FMT "\n", pipe_config->lane_count, pipe_config->port_clock, pipe_config->pipe_bpp, - pipe_config->dsc.compressed_bpp); + BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16)); drm_dbg_kms(&i915->drm, "DP link rate required %i available %i\n", intel_dp_link_required(adjusted_mode->crtc_clock, - pipe_config->dsc.compressed_bpp), + to_bpp_int_roundup(pipe_config->dsc.compressed_bpp_x16)), intel_dp_max_data_rate(pipe_config->port_clock, pipe_config->lane_count)); } else { @@ -2446,12 +2534,22 @@ static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - /* - * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 - * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ - * Colorimetry Format indication. - */ - vsc->revision = 0x5; + if (crtc_state->has_panel_replay) { + /* + * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223 + * VSC SDP supporting 3D stereo, Panel Replay, and Pixel + * Encoding/Colorimetry Format indication. + */ + vsc->revision = 0x7; + } else { + /* + * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 + * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ + * Colorimetry Format indication. + */ + vsc->revision = 0x5; + } + vsc->length = 0x13; /* DP 1.4a spec, Table 2-120 */ @@ -2560,6 +2658,21 @@ void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, vsc->revision = 0x4; vsc->length = 0xe; } + } else if (crtc_state->has_panel_replay) { + if (intel_dp->psr.colorimetry_support && + intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { + /* [Panel Replay with colorimetry info] */ + intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, + vsc); + } else { + /* + * [Panel Replay without colorimetry info] + * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223 + * VSC SDP supporting 3D stereo + Panel Replay. + */ + vsc->revision = 0x6; + vsc->length = 0x10; + } } else { /* * [PSR1] @@ -2636,14 +2749,18 @@ static bool can_enable_drrs(struct intel_connector *connector, static void intel_dp_drrs_compute_config(struct intel_connector *connector, struct intel_crtc_state *pipe_config, - int link_bpp) + int link_bpp_x16) { struct drm_i915_private *i915 = to_i915(connector->base.dev); const struct drm_display_mode *downclock_mode = intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode); int pixel_clock; - if (has_seamless_m_n(connector)) + /* + * FIXME all joined pipes share the same transcoder. + * Need to account for that when updating M/N live. + */ + if (has_seamless_m_n(connector) && !pipe_config->bigjoiner_pipes) pipe_config->update_m_n = true; if (!can_enable_drrs(connector, pipe_config, downclock_mode)) { @@ -2661,9 +2778,10 @@ intel_dp_drrs_compute_config(struct intel_connector *connector, if (pipe_config->splitter.enable) pixel_clock /= pipe_config->splitter.link_count; - intel_link_compute_m_n(link_bpp, pipe_config->lane_count, pixel_clock, - pipe_config->port_clock, &pipe_config->dp_m2_n2, - pipe_config->fec_enable); + intel_link_compute_m_n(link_bpp_x16, pipe_config->lane_count, pixel_clock, + pipe_config->port_clock, + intel_dp_bw_fec_overhead(pipe_config->fec_enable), + &pipe_config->dp_m2_n2); /* FIXME: abstract this better */ if (pipe_config->splitter.enable) @@ -2739,19 +2857,12 @@ intel_dp_audio_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - struct drm_connector *connector = conn_state->connector; - pipe_config->has_audio = intel_dp_has_audio(encoder, pipe_config, conn_state) && intel_audio_compute_config(encoder, pipe_config, conn_state); pipe_config->sdp_split_enable = pipe_config->has_audio && intel_dp_is_uhbr(pipe_config); - - drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDP split enable: %s\n", - connector->base.id, connector->name, - str_yes_no(pipe_config->sdp_split_enable)); } int @@ -2764,7 +2875,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(encoder); const struct drm_display_mode *fixed_mode; struct intel_connector *connector = intel_dp->attached_connector; - int ret = 0, link_bpp; + int ret = 0, link_bpp_x16; if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A) pipe_config->has_pch_encoder = true; @@ -2813,10 +2924,10 @@ intel_dp_compute_config(struct intel_encoder *encoder, drm_dp_enhanced_frame_cap(intel_dp->dpcd); if (pipe_config->dsc.compression_enable) - link_bpp = pipe_config->dsc.compressed_bpp; + link_bpp_x16 = pipe_config->dsc.compressed_bpp_x16; else - link_bpp = intel_dp_output_bpp(pipe_config->output_format, - pipe_config->pipe_bpp); + link_bpp_x16 = to_bpp_x16(intel_dp_output_bpp(pipe_config->output_format, + pipe_config->pipe_bpp)); if (intel_dp->mso_link_count) { int n = intel_dp->mso_link_count; @@ -2840,12 +2951,12 @@ intel_dp_compute_config(struct intel_encoder *encoder, intel_dp_audio_compute_config(encoder, pipe_config, conn_state); - intel_link_compute_m_n(link_bpp, + intel_link_compute_m_n(link_bpp_x16, pipe_config->lane_count, adjusted_mode->crtc_clock, pipe_config->port_clock, - &pipe_config->dp_m_n, - pipe_config->fec_enable); + intel_dp_bw_fec_overhead(pipe_config->fec_enable), + &pipe_config->dp_m_n); /* FIXME: abstract this better */ if (pipe_config->splitter.enable) @@ -2856,7 +2967,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, intel_vrr_compute_config(pipe_config, conn_state); intel_psr_compute_config(intel_dp, pipe_config, conn_state); - intel_dp_drrs_compute_config(connector, pipe_config, link_bpp); + intel_dp_drrs_compute_config(connector, pipe_config, link_bpp_x16); intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); @@ -2924,24 +3035,179 @@ static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; } -void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state, - bool enable) +static int +write_dsc_decompression_flag(struct drm_dp_aux *aux, u8 flag, bool set) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - int ret; + int err; + u8 val; - if (!crtc_state->dsc.compression_enable) - return; + err = drm_dp_dpcd_readb(aux, DP_DSC_ENABLE, &val); + if (err < 0) + return err; - ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, - enable ? DP_DECOMPRESSION_EN : 0); - if (ret < 0) + if (set) + val |= flag; + else + val &= ~flag; + + return drm_dp_dpcd_writeb(aux, DP_DSC_ENABLE, val); +} + +static void +intel_dp_sink_set_dsc_decompression(struct intel_connector *connector, + bool enable) +{ + struct drm_i915_private *i915 = to_i915(connector->base.dev); + + if (write_dsc_decompression_flag(connector->dp.dsc_decompression_aux, + DP_DECOMPRESSION_EN, enable) < 0) drm_dbg_kms(&i915->drm, "Failed to %s sink decompression state\n", str_enable_disable(enable)); } +static void +intel_dp_sink_set_dsc_passthrough(const struct intel_connector *connector, + bool enable) +{ + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct drm_dp_aux *aux = connector->port ? + connector->port->passthrough_aux : NULL; + + if (!aux) + return; + + if (write_dsc_decompression_flag(aux, + DP_DSC_PASSTHROUGH_EN, enable) < 0) + drm_dbg_kms(&i915->drm, + "Failed to %s sink compression passthrough state\n", + str_enable_disable(enable)); +} + +static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state, + const struct intel_connector *connector, + bool for_get_ref) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct drm_connector *_connector_iter; + struct drm_connector_state *old_conn_state; + struct drm_connector_state *new_conn_state; + int ref_count = 0; + int i; + + /* + * On SST the decompression AUX device won't be shared, each connector + * uses for this its own AUX targeting the sink device. + */ + if (!connector->mst_port) + return connector->dp.dsc_decompression_enabled ? 1 : 0; + + for_each_oldnew_connector_in_state(&state->base, _connector_iter, + old_conn_state, new_conn_state, i) { + const struct intel_connector * + connector_iter = to_intel_connector(_connector_iter); + + if (connector_iter->mst_port != connector->mst_port) + continue; + + if (!connector_iter->dp.dsc_decompression_enabled) + continue; + + drm_WARN_ON(&i915->drm, + (for_get_ref && !new_conn_state->crtc) || + (!for_get_ref && !old_conn_state->crtc)); + + if (connector_iter->dp.dsc_decompression_aux == + connector->dp.dsc_decompression_aux) + ref_count++; + } + + return ref_count; +} + +static bool intel_dp_dsc_aux_get_ref(struct intel_atomic_state *state, + struct intel_connector *connector) +{ + bool ret = intel_dp_dsc_aux_ref_count(state, connector, true) == 0; + + connector->dp.dsc_decompression_enabled = true; + + return ret; +} + +static bool intel_dp_dsc_aux_put_ref(struct intel_atomic_state *state, + struct intel_connector *connector) +{ + connector->dp.dsc_decompression_enabled = false; + + return intel_dp_dsc_aux_ref_count(state, connector, false) == 0; +} + +/** + * intel_dp_sink_enable_decompression - Enable DSC decompression in sink/last branch device + * @state: atomic state + * @connector: connector to enable the decompression for + * @new_crtc_state: new state for the CRTC driving @connector + * + * Enable the DSC decompression if required in the %DP_DSC_ENABLE DPCD + * register of the appropriate sink/branch device. On SST this is always the + * sink device, whereas on MST based on each device's DSC capabilities it's + * either the last branch device (enabling decompression in it) or both the + * last branch device (enabling passthrough in it) and the sink device + * (enabling decompression in it). + */ +void intel_dp_sink_enable_decompression(struct intel_atomic_state *state, + struct intel_connector *connector, + const struct intel_crtc_state *new_crtc_state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + + if (!new_crtc_state->dsc.compression_enable) + return; + + if (drm_WARN_ON(&i915->drm, + !connector->dp.dsc_decompression_aux || + connector->dp.dsc_decompression_enabled)) + return; + + if (!intel_dp_dsc_aux_get_ref(state, connector)) + return; + + intel_dp_sink_set_dsc_passthrough(connector, true); + intel_dp_sink_set_dsc_decompression(connector, true); +} + +/** + * intel_dp_sink_disable_decompression - Disable DSC decompression in sink/last branch device + * @state: atomic state + * @connector: connector to disable the decompression for + * @old_crtc_state: old state for the CRTC driving @connector + * + * Disable the DSC decompression if required in the %DP_DSC_ENABLE DPCD + * register of the appropriate sink/branch device, corresponding to the + * sequence in intel_dp_sink_enable_decompression(). + */ +void intel_dp_sink_disable_decompression(struct intel_atomic_state *state, + struct intel_connector *connector, + const struct intel_crtc_state *old_crtc_state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + + if (!old_crtc_state->dsc.compression_enable) + return; + + if (drm_WARN_ON(&i915->drm, + !connector->dp.dsc_decompression_aux || + !connector->dp.dsc_decompression_enabled)) + return; + + if (!intel_dp_dsc_aux_put_ref(state, connector)) + return; + + intel_dp_sink_set_dsc_decompression(connector, false); + intel_dp_sink_set_dsc_passthrough(connector, false); +} + static void intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) { @@ -3778,7 +4044,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); - return i915->params.enable_dp_mst && + return i915->display.params.enable_dp_mst && intel_dp_mst_source_support(intel_dp) && drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); } @@ -3796,13 +4062,13 @@ intel_dp_configure_mst(struct intel_dp *intel_dp) encoder->base.base.id, encoder->base.name, str_yes_no(intel_dp_mst_source_support(intel_dp)), str_yes_no(sink_can_mst), - str_yes_no(i915->params.enable_dp_mst)); + str_yes_no(i915->display.params.enable_dp_mst)); if (!intel_dp_mst_source_support(intel_dp)) return; intel_dp->is_mst = sink_can_mst && - i915->params.enable_dp_mst; + i915->display.params.enable_dp_mst; drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); @@ -3872,11 +4138,16 @@ static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ + if (vsc->revision == 0x6) { + sdp->db[0] = 1; + sdp->db[3] = 1; + } + /* - * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as - * per DP 1.4a spec. + * Revision 0x5 and revision 0x7 supports Pixel Encoding/Colorimetry + * Format as per DP 1.4a spec and DP 2.0 respectively. */ - if (vsc->revision != 0x5) + if (!(vsc->revision == 0x5 || vsc->revision == 0x7)) goto out; /* VSC SDP Payload for DB16 through DB18 */ @@ -4056,7 +4327,10 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder, VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; u32 val = intel_de_read(dev_priv, reg) & ~dip_enable; - /* TODO: Add DSC case (DIP_ENABLE_PPS) */ + /* TODO: Sanitize DSC enabling wrt. intel_dsc_dp_pps_write(). */ + if (!enable && HAS_DSC(dev_priv)) + val &= ~VDIP_ENABLE_PPS; + /* When PSR is enabled, this routine doesn't disable VSC DIP */ if (!crtc_state->has_psr) val &= ~VIDEO_DIP_ENABLE_VSC_HSW; @@ -5416,6 +5690,7 @@ intel_dp_detect(struct drm_connector *connector, if (status == connector_status_disconnected) { memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd)); + intel_dp->psr.sink_panel_replay_support = false; if (intel_dp->is_mst) { drm_dbg_kms(&dev_priv->drm, @@ -5430,6 +5705,9 @@ intel_dp_detect(struct drm_connector *connector, goto out; } + if (!intel_dp_is_edp(intel_dp)) + intel_psr_init_dpcd(intel_dp); + intel_dp_detect_dsc_caps(intel_dp, intel_connector); intel_dp_configure_mst(intel_dp); @@ -5590,6 +5868,19 @@ intel_dp_connector_unregister(struct drm_connector *connector) intel_connector_unregister(connector); } +void intel_dp_connector_sync_state(struct intel_connector *connector, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(connector->base.dev); + + if (crtc_state && crtc_state->dsc.compression_enable) { + drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux); + connector->dp.dsc_decompression_enabled = true; + } else { + connector->dp.dsc_decompression_enabled = false; + } +} + void intel_dp_encoder_flush_work(struct drm_encoder *encoder) { struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); @@ -6238,6 +6529,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; else intel_connector->get_hw_state = intel_connector_get_hw_state; + intel_connector->sync_state = intel_dp_connector_sync_state; if (!intel_edp_init_connector(intel_dp, intel_connector)) { intel_dp_aux_fini(intel_dp); @@ -6261,16 +6553,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, "HDCP init failed, skipping.\n"); } - /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written - * 0xd. Failure to do so will result in spurious interrupts being - * generated on the port when a cable is not attached. - */ - if (IS_G45(dev_priv)) { - u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); - intel_de_write(dev_priv, PEG_BAND_GAP_DATA, - (temp & ~0xf) | 0xd); - } - intel_dp->frl.is_trained = false; intel_dp->frl.trained_rate_gbps = 0; diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 484aea215a..375d0677cd 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -45,6 +45,8 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, int intel_dp_min_bpp(enum intel_output_format output_format); bool intel_dp_init_connector(struct intel_digital_port *dig_port, struct intel_connector *intel_connector); +void intel_dp_connector_sync_state(struct intel_connector *connector, + const struct intel_crtc_state *crtc_state); void intel_dp_set_link_params(struct intel_dp *intel_dp, int link_rate, int lane_count); int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, @@ -57,9 +59,12 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode); void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); -void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state, - bool enable); +void intel_dp_sink_enable_decompression(struct intel_atomic_state *state, + struct intel_connector *connector, + const struct intel_crtc_state *new_crtc_state); +void intel_dp_sink_disable_decompression(struct intel_atomic_state *state, + struct intel_connector *connector, + const struct intel_crtc_state *old_crtc_state); void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder); void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder); void intel_dp_encoder_flush_work(struct drm_encoder *encoder); @@ -78,6 +83,8 @@ void intel_dp_audio_compute_config(struct intel_encoder *encoder, bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp); bool intel_dp_is_edp(struct intel_dp *intel_dp); bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state); +int intel_dp_link_symbol_size(int rate); +int intel_dp_link_symbol_clock(int rate); bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port); enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd); @@ -98,6 +105,8 @@ bool intel_dp_source_supports_tps4(struct drm_i915_private *i915); bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp); int intel_dp_link_required(int pixel_clock, int bpp); +int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16, + int bw_overhead); int intel_dp_max_data_rate(int max_link_rate, int max_lanes); bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp); bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, @@ -125,6 +134,10 @@ u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915, enum intel_output_format output_format, u32 pipe_bpp, u32 timeslots); +int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config); +int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector, + struct intel_crtc_state *pipe_config, + int bpc); u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector, int mode_clock, int mode_hdisplay, bool bigjoiner); @@ -136,7 +149,16 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count) return ~((1 << lane_count) - 1) & 0xf; } +bool intel_dp_supports_fec(struct intel_dp *intel_dp, + const struct intel_connector *connector, + const struct intel_crtc_state *pipe_config); u32 intel_dp_mode_to_fec_clock(u32 mode_clock); +int intel_dp_bw_fec_overhead(bool fec_enabled); + +bool intel_dp_supports_fec(struct intel_dp *intel_dp, + const struct intel_connector *connector, + const struct intel_crtc_state *pipe_config); + u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp); void intel_ddi_update_pipe(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c index 4431b6290c..2e2af71bcd 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -74,7 +74,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp) static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); if (index) return 0; @@ -83,12 +83,12 @@ static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) * The clock divider is based off the hrawclk, and would like to run at * 2MHz. So, take the hrawclk value and divide by 2000 and use that */ - return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000); + return DIV_ROUND_CLOSEST(RUNTIME_INFO(i915)->rawclk_freq, 2000); } static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); u32 freq; @@ -101,18 +101,18 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) * divide by 2000 and use that */ if (dig_port->aux_ch == AUX_CH_A) - freq = dev_priv->display.cdclk.hw.cdclk; + freq = i915->display.cdclk.hw.cdclk; else - freq = RUNTIME_INFO(dev_priv)->rawclk_freq; + freq = RUNTIME_INFO(i915)->rawclk_freq; return DIV_ROUND_CLOSEST(freq, 2000); } static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { + if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(i915)) { /* Workaround for non-ULT HSW */ switch (index) { case 0: return 63; @@ -165,12 +165,11 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, u32 aux_clock_divider) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = - to_i915(dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); u32 timeout; /* Max timeout value on G4x-BDW: 1.6ms */ - if (IS_BROADWELL(dev_priv)) + if (IS_BROADWELL(i915)) timeout = DP_AUX_CH_CTL_TIME_OUT_600us; else timeout = DP_AUX_CH_CTL_TIME_OUT_400us; @@ -229,8 +228,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, u32 aux_send_ctl_flags) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *i915 = - to_i915(dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); enum phy phy = intel_port_to_phy(i915, dig_port->base.port); bool is_tc_port = intel_phy_is_tc(i915, phy); i915_reg_t ch_ctl, ch_data[5]; @@ -531,9 +529,40 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) return ret; } +static i915_reg_t vlv_aux_ctl_reg(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum aux_ch aux_ch = dig_port->aux_ch; + + switch (aux_ch) { + case AUX_CH_B: + case AUX_CH_C: + case AUX_CH_D: + return VLV_DP_AUX_CH_CTL(aux_ch); + default: + MISSING_CASE(aux_ch); + return VLV_DP_AUX_CH_CTL(AUX_CH_B); + } +} + +static i915_reg_t vlv_aux_data_reg(struct intel_dp *intel_dp, int index) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum aux_ch aux_ch = dig_port->aux_ch; + + switch (aux_ch) { + case AUX_CH_B: + case AUX_CH_C: + case AUX_CH_D: + return VLV_DP_AUX_CH_DATA(aux_ch, index); + default: + MISSING_CASE(aux_ch); + return VLV_DP_AUX_CH_DATA(AUX_CH_B, index); + } +} + static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum aux_ch aux_ch = dig_port->aux_ch; @@ -550,7 +579,6 @@ static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum aux_ch aux_ch = dig_port->aux_ch; @@ -567,7 +595,6 @@ static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum aux_ch aux_ch = dig_port->aux_ch; @@ -586,7 +613,6 @@ static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum aux_ch aux_ch = dig_port->aux_ch; @@ -605,7 +631,6 @@ static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum aux_ch aux_ch = dig_port->aux_ch; @@ -625,7 +650,6 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum aux_ch aux_ch = dig_port->aux_ch; @@ -645,7 +669,6 @@ static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum aux_ch aux_ch = dig_port->aux_ch; @@ -668,7 +691,6 @@ static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp) static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum aux_ch aux_ch = dig_port->aux_ch; @@ -691,7 +713,7 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index) static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum aux_ch aux_ch = dig_port->aux_ch; @@ -702,16 +724,16 @@ static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp) case AUX_CH_USBC2: case AUX_CH_USBC3: case AUX_CH_USBC4: - return XELPDP_DP_AUX_CH_CTL(dev_priv, aux_ch); + return XELPDP_DP_AUX_CH_CTL(i915, aux_ch); default: MISSING_CASE(aux_ch); - return XELPDP_DP_AUX_CH_CTL(dev_priv, AUX_CH_A); + return XELPDP_DP_AUX_CH_CTL(i915, AUX_CH_A); } } static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum aux_ch aux_ch = dig_port->aux_ch; @@ -722,10 +744,10 @@ static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index) case AUX_CH_USBC2: case AUX_CH_USBC3: case AUX_CH_USBC4: - return XELPDP_DP_AUX_CH_DATA(dev_priv, aux_ch, index); + return XELPDP_DP_AUX_CH_DATA(i915, aux_ch, index); default: MISSING_CASE(aux_ch); - return XELPDP_DP_AUX_CH_DATA(dev_priv, AUX_CH_A, index); + return XELPDP_DP_AUX_CH_DATA(i915, AUX_CH_A, index); } } @@ -739,49 +761,52 @@ void intel_dp_aux_fini(struct intel_dp *intel_dp) void intel_dp_aux_init(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &dig_port->base; enum aux_ch aux_ch = dig_port->aux_ch; char buf[AUX_CH_NAME_BUFSIZE]; - if (DISPLAY_VER(dev_priv) >= 14) { + if (DISPLAY_VER(i915) >= 14) { intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg; intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg; - } else if (DISPLAY_VER(dev_priv) >= 12) { + } else if (DISPLAY_VER(i915) >= 12) { intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg; intel_dp->aux_ch_data_reg = tgl_aux_data_reg; - } else if (DISPLAY_VER(dev_priv) >= 9) { + } else if (DISPLAY_VER(i915) >= 9) { intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; intel_dp->aux_ch_data_reg = skl_aux_data_reg; - } else if (HAS_PCH_SPLIT(dev_priv)) { + } else if (HAS_PCH_SPLIT(i915)) { intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; intel_dp->aux_ch_data_reg = ilk_aux_data_reg; + } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { + intel_dp->aux_ch_ctl_reg = vlv_aux_ctl_reg; + intel_dp->aux_ch_data_reg = vlv_aux_data_reg; } else { intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; intel_dp->aux_ch_data_reg = g4x_aux_data_reg; } - if (DISPLAY_VER(dev_priv) >= 9) + if (DISPLAY_VER(i915) >= 9) intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; - else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) + else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; - else if (HAS_PCH_SPLIT(dev_priv)) + else if (HAS_PCH_SPLIT(i915)) intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; else intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; - if (DISPLAY_VER(dev_priv) >= 9) + if (DISPLAY_VER(i915) >= 9) intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; else intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; - intel_dp->aux.drm_dev = &dev_priv->drm; + intel_dp->aux.drm_dev = &i915->drm; drm_dp_aux_init(&intel_dp->aux); /* Failure to allocate our preferred name is not critical */ intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %s/%s", - aux_ch_name(dev_priv, buf, sizeof(buf), aux_ch), + aux_ch_name(i915, buf, sizeof(buf), aux_ch), encoder->base.name); intel_dp->aux.transfer = intel_dp_aux_transfer; diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 26ea7e9f1b..4f58efdc68 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -146,7 +146,7 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) * HDR static metadata we need to start maintaining table of * ranges for such panels. */ - if (i915->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL && + if (i915->display.params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL && !(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type & BIT(HDMI_STATIC_METADATA_TYPE1))) { drm_info(&i915->drm, @@ -489,7 +489,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector) /* Check the VBT and user's module parameters to figure out which * interfaces to probe */ - switch (i915->params.enable_dpcd_backlight) { + switch (i915->display.params.enable_dpcd_backlight) { case INTEL_DP_AUX_BACKLIGHT_OFF: return -ENODEV; case INTEL_DP_AUX_BACKLIGHT_AUTO: diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h b/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h index 34f6e0a48e..e642445364 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h @@ -21,13 +21,14 @@ #define __xe2lpd_aux_ch_idx(aux_ch) \ (aux_ch >= AUX_CH_USBC1 ? aux_ch : AUX_CH_USBC4 + 1 + (aux_ch) - AUX_CH_A) -/* TODO: Remove implicit dev_priv */ -#define _DPA_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64010) -#define _DPB_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64110) +#define _DPA_AUX_CH_CTL 0x64010 +#define _DPB_AUX_CH_CTL 0x64110 #define _XELPDP_USBC1_AUX_CH_CTL 0x16f210 #define _XELPDP_USBC2_AUX_CH_CTL 0x16f410 #define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, \ _DPB_AUX_CH_CTL) +#define VLV_DP_AUX_CH_CTL(aux_ch) _MMIO(VLV_DISPLAY_BASE + \ + _PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)) #define _XELPDP_DP_AUX_CH_CTL(aux_ch) \ _MMIO(_PICK_EVEN_2RANGES(aux_ch, AUX_CH_USBC1, \ _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL, \ @@ -69,13 +70,14 @@ #define DP_AUX_CH_CTL_SYNC_PULSE_SKL_MASK REG_GENMASK(4, 0) /* skl+ */ #define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) REG_FIELD_PREP(DP_AUX_CH_CTL_SYNC_PULSE_SKL_MASK, (c) - 1) -/* TODO: Remove implicit dev_priv */ -#define _DPA_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64014) -#define _DPB_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64114) +#define _DPA_AUX_CH_DATA1 0x64014 +#define _DPB_AUX_CH_DATA1 0x64114 #define _XELPDP_USBC1_AUX_CH_DATA1 0x16f214 #define _XELPDP_USBC2_AUX_CH_DATA1 0x16f414 #define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, \ _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */ +#define VLV_DP_AUX_CH_DATA(aux_ch, i) _MMIO(VLV_DISPLAY_BASE + _PORT(aux_ch, _DPA_AUX_CH_DATA1, \ + _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */ #define _XELPDP_DP_AUX_CH_DATA(aux_ch, i) \ _MMIO(_PICK_EVEN_2RANGES(aux_ch, AUX_CH_USBC1, \ _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1, \ diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c index 3a595cd433..8538d1ce2f 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c @@ -330,23 +330,13 @@ static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = { 0, 0 }, }; -static struct drm_dp_aux * -intel_dp_hdcp_get_aux(struct intel_connector *connector) -{ - struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - - if (intel_encoder_is_mst(connector->encoder)) - return &connector->port->aux; - else - return &dig_port->dp.aux; -} - static int intel_dp_hdcp2_read_rx_status(struct intel_connector *connector, u8 *rx_status) { struct drm_i915_private *i915 = to_i915(connector->base.dev); - struct drm_dp_aux *aux = intel_dp_hdcp_get_aux(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); + struct drm_dp_aux *aux = &dig_port->dp.aux; ssize_t ret; ret = drm_dp_dpcd_read(aux, @@ -399,7 +389,9 @@ intel_dp_hdcp2_wait_for_msg(struct intel_connector *connector, const struct hdcp2_dp_msg_data *hdcp2_msg_data) { struct drm_i915_private *i915 = to_i915(connector->base.dev); - struct intel_hdcp *hdcp = &connector->hdcp; + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); + struct intel_dp *dp = &dig_port->dp; + struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; u8 msg_id = hdcp2_msg_data->msg_id; int ret, timeout; bool msg_ready = false; @@ -454,8 +446,9 @@ int intel_dp_hdcp2_write_msg(struct intel_connector *connector, unsigned int offset; u8 *byte = buf; ssize_t ret, bytes_to_write, len; + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); + struct drm_dp_aux *aux = &dig_port->dp.aux; const struct hdcp2_dp_msg_data *hdcp2_msg_data; - struct drm_dp_aux *aux; hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte); if (!hdcp2_msg_data) @@ -463,8 +456,6 @@ int intel_dp_hdcp2_write_msg(struct intel_connector *connector, offset = hdcp2_msg_data->offset; - aux = intel_dp_hdcp_get_aux(connector); - /* No msg_id in DP HDCP2.2 msgs */ bytes_to_write = size - 1; byte++; @@ -490,7 +481,8 @@ static ssize_t get_receiver_id_list_rx_info(struct intel_connector *connector, u32 *dev_cnt, u8 *byte) { - struct drm_dp_aux *aux = intel_dp_hdcp_get_aux(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); + struct drm_dp_aux *aux = &dig_port->dp.aux; ssize_t ret; u8 *rx_info = byte; @@ -515,8 +507,9 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector, { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - struct intel_hdcp *hdcp = &connector->hdcp; - struct drm_dp_aux *aux; + struct drm_dp_aux *aux = &dig_port->dp.aux; + struct intel_dp *dp = &dig_port->dp; + struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; unsigned int offset; u8 *byte = buf; ssize_t ret, bytes_to_recv, len; @@ -530,8 +523,6 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector, return -EINVAL; offset = hdcp2_msg_data->offset; - aux = intel_dp_hdcp_get_aux(connector); - ret = intel_dp_hdcp2_wait_for_msg(connector, hdcp2_msg_data); if (ret < 0) return ret; @@ -561,13 +552,8 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector, /* Entire msg read timeout since initiate of msg read */ if (bytes_to_recv == size - 1 && hdcp2_msg_data->msg_read_timeout > 0) { - if (intel_encoder_is_mst(connector->encoder)) - msg_end = ktime_add_ms(ktime_get_raw(), - hdcp2_msg_data->msg_read_timeout * - connector->port->parent->num_ports); - else - msg_end = ktime_add_ms(ktime_get_raw(), - hdcp2_msg_data->msg_read_timeout); + msg_end = ktime_add_ms(ktime_get_raw(), + hdcp2_msg_data->msg_read_timeout); } ret = drm_dp_dpcd_read(aux, offset, @@ -651,12 +637,11 @@ static int intel_dp_hdcp2_capable(struct intel_connector *connector, bool *capable) { - struct drm_dp_aux *aux; + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); + struct drm_dp_aux *aux = &dig_port->dp.aux; u8 rx_caps[3]; int ret; - aux = intel_dp_hdcp_get_aux(connector); - *capable = false; ret = drm_dp_dpcd_read(aux, DP_HDCP_2_2_REG_RX_CAPS_OFFSET, diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 03ac281766..2151e916cc 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include "i915_drv.h" @@ -43,6 +44,9 @@ #include "intel_dpio_phy.h" #include "intel_hdcp.h" #include "intel_hotplug.h" +#include "intel_link_bw.h" +#include "intel_psr.h" +#include "intel_vdsc.h" #include "skl_scaler.h" static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp, @@ -50,7 +54,7 @@ static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp struct intel_crtc_state *crtc_state, bool dsc) { - if (intel_dp_is_uhbr(crtc_state) && DISPLAY_VER(i915) <= 13 && dsc) { + if (intel_dp_is_uhbr(crtc_state) && DISPLAY_VER(i915) < 14 && dsc) { int output_bpp = bpp; /* DisplayPort 2 128b/132b, bits per lane is always 32 */ int symbol_clock = crtc_state->port_clock / 32; @@ -66,6 +70,73 @@ static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp return 0; } +static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state, + const struct intel_connector *connector, + bool ssc, bool dsc, int bpp_x16) +{ + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + unsigned long flags = DRM_DP_BW_OVERHEAD_MST; + int dsc_slice_count = 0; + int overhead; + + flags |= intel_dp_is_uhbr(crtc_state) ? DRM_DP_BW_OVERHEAD_UHBR : 0; + flags |= ssc ? DRM_DP_BW_OVERHEAD_SSC_REF_CLK : 0; + flags |= crtc_state->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0; + + if (dsc) { + flags |= DRM_DP_BW_OVERHEAD_DSC; + /* TODO: add support for bigjoiner */ + dsc_slice_count = intel_dp_dsc_get_slice_count(connector, + adjusted_mode->clock, + adjusted_mode->hdisplay, + false); + } + + overhead = drm_dp_bw_overhead(crtc_state->lane_count, + adjusted_mode->hdisplay, + dsc_slice_count, + bpp_x16, + flags); + + /* + * TODO: clarify whether a minimum required by the fixed FEC overhead + * in the bspec audio programming sequence is required here. + */ + return max(overhead, intel_dp_bw_fec_overhead(crtc_state->fec_enable)); +} + +static void intel_dp_mst_compute_m_n(const struct intel_crtc_state *crtc_state, + const struct intel_connector *connector, + int overhead, + int bpp_x16, + struct intel_link_m_n *m_n) +{ + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + + /* TODO: Check WA 14013163432 to set data M/N for full BW utilization. */ + intel_link_compute_m_n(bpp_x16, crtc_state->lane_count, + adjusted_mode->crtc_clock, + crtc_state->port_clock, + overhead, + m_n); + + m_n->tu = DIV_ROUND_UP_ULL(mul_u32_u32(m_n->data_m, 64), m_n->data_n); +} + +static int intel_dp_mst_calc_pbn(int pixel_clock, int bpp_x16, int bw_overhead) +{ + int effective_data_rate = + intel_dp_effective_data_rate(pixel_clock, bpp_x16, bw_overhead); + + /* + * TODO: Use drm_dp_calc_pbn_mode() instead, once it's converted + * to calculate PBN with the BW overhead passed to it. + */ + return DIV_ROUND_UP(effective_data_rate * 64, 54 * 1000); +} + static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, int max_bpp, @@ -94,19 +165,67 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder, crtc_state->lane_count = limits->max_lane_count; crtc_state->port_clock = limits->max_rate; + if (dsc) { + if (!intel_dp_supports_fec(intel_dp, connector, crtc_state)) + return -EINVAL; + + crtc_state->fec_enable = !intel_dp_is_uhbr(crtc_state); + } + mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr, crtc_state->port_clock, crtc_state->lane_count); + drm_dbg_kms(&i915->drm, "Looking for slots in range min bpp %d max bpp %d\n", + min_bpp, max_bpp); + for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) { + int local_bw_overhead; + int remote_bw_overhead; + int link_bpp_x16; + int remote_tu; + drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp); ret = intel_dp_mst_check_constraints(i915, bpp, adjusted_mode, crtc_state, dsc); if (ret) continue; - crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, - bpp << 4); + link_bpp_x16 = to_bpp_x16(dsc ? bpp : + intel_dp_output_bpp(crtc_state->output_format, bpp)); + + local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector, + false, dsc, link_bpp_x16); + remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector, + true, dsc, link_bpp_x16); + + intel_dp_mst_compute_m_n(crtc_state, connector, + local_bw_overhead, + link_bpp_x16, + &crtc_state->dp_m_n); + + /* + * The TU size programmed to the HW determines which slots in + * an MTP frame are used for this stream, which needs to match + * the payload size programmed to the first downstream branch + * device's payload table. + * + * Note that atm the payload's PBN value DRM core sends via + * the ALLOCATE_PAYLOAD side-band message matches the payload + * size (which it calculates from the PBN value) it programs + * to the first branch device's payload table. The allocation + * in the payload table could be reduced though (to + * crtc_state->dp_m_n.tu), provided that the driver doesn't + * enable SSC on the corresponding link. + */ + crtc_state->pbn = intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock, + link_bpp_x16, + remote_bw_overhead); + + remote_tu = DIV_ROUND_UP(dfixed_const(crtc_state->pbn), mst_state->pbn_div.full); + + drm_WARN_ON(&i915->drm, remote_tu < crtc_state->dp_m_n.tu); + crtc_state->dp_m_n.tu = remote_tu; slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr, connector->port, @@ -115,13 +234,9 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder, return slots; if (slots >= 0) { - ret = drm_dp_mst_atomic_check(state); - /* - * If we got slots >= 0 and we can fit those based on check - * then we can exit the loop. Otherwise keep trying. - */ - if (!ret) - break; + drm_WARN_ON(&i915->drm, slots != crtc_state->dp_m_n.tu); + + break; } } @@ -136,7 +251,7 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder, if (!dsc) crtc_state->pipe_bpp = bpp; else - crtc_state->dsc.compressed_bpp = bpp; + crtc_state->dsc.compressed_bpp_x16 = to_bpp_x16(bpp); drm_dbg_kms(&i915->drm, "Got %d slots for pipe bpp %d dsc %d\n", slots, bpp, dsc); } @@ -148,10 +263,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, struct drm_connector_state *conn_state, struct link_config_limits *limits) { - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; int slots = -EINVAL; - int link_bpp; /* * FIXME: allocate the BW according to link_bpp, which in the case of @@ -166,16 +278,6 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, if (slots < 0) return slots; - link_bpp = intel_dp_output_bpp(crtc_state->output_format, crtc_state->pipe_bpp); - - intel_link_compute_m_n(link_bpp, - crtc_state->lane_count, - adjusted_mode->crtc_clock, - crtc_state->port_clock, - &crtc_state->dp_m_n, - crtc_state->fec_enable); - crtc_state->dp_m_n.tu = slots; - return 0; } @@ -187,15 +289,12 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder, struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; int slots = -EINVAL; int i, num_bpc; u8 dsc_bpc[3] = {}; int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp; u8 dsc_max_bpc; - bool need_timeslot_recalc = false; - u32 last_compressed_bpp; + int min_compressed_bpp, max_compressed_bpp; /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ if (DISPLAY_VER(i915) >= 12) @@ -231,45 +330,31 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder, if (max_bpp > sink_max_bpp) max_bpp = sink_max_bpp; - min_bpp = max(min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16)); - max_bpp = min(max_bpp, to_bpp_int(limits->link.max_bpp_x16)); + max_compressed_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector, + crtc_state, + max_bpp / 3); + max_compressed_bpp = min(max_compressed_bpp, + to_bpp_int(limits->link.max_bpp_x16)); - slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, max_bpp, - min_bpp, limits, - conn_state, 2 * 3, true); + min_compressed_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state); + min_compressed_bpp = max(min_compressed_bpp, + to_bpp_int_roundup(limits->link.min_bpp_x16)); - if (slots < 0) - return slots; + drm_dbg_kms(&i915->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n", + min_compressed_bpp, max_compressed_bpp); - last_compressed_bpp = crtc_state->dsc.compressed_bpp; + /* Align compressed bpps according to our own constraints */ + max_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, max_compressed_bpp, + crtc_state->pipe_bpp); + min_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, min_compressed_bpp, + crtc_state->pipe_bpp); - crtc_state->dsc.compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, - last_compressed_bpp, - crtc_state->pipe_bpp); + slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, max_compressed_bpp, + min_compressed_bpp, limits, + conn_state, 1, true); - if (crtc_state->dsc.compressed_bpp != last_compressed_bpp) - need_timeslot_recalc = true; - - /* - * Apparently some MST hubs dislike if vcpi slots are not matching precisely - * the actual compressed bpp we use. - */ - if (need_timeslot_recalc) { - slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, - crtc_state->dsc.compressed_bpp, - crtc_state->dsc.compressed_bpp, - limits, conn_state, 2 * 3, true); - if (slots < 0) - return slots; - } - - intel_link_compute_m_n(crtc_state->dsc.compressed_bpp, - crtc_state->lane_count, - adjusted_mode->crtc_clock, - crtc_state->port_clock, - &crtc_state->dp_m_n, - crtc_state->fec_enable); - crtc_state->dp_m_n.tu = slots; + if (slots < 0) + return slots; return 0; } @@ -296,8 +381,103 @@ static int intel_dp_mst_update_slots(struct intel_encoder *encoder, return 0; } +static bool +intel_dp_mst_dsc_source_support(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + + /* + * FIXME: Enabling DSC on ICL results in blank screen and FIFO pipe / + * transcoder underruns, re-enable DSC after fixing this issue. + */ + return DISPLAY_VER(i915) >= 12 && intel_dsc_source_support(crtc_state); +} + +static int mode_hblank_period_ns(const struct drm_display_mode *mode) +{ + return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(mode->htotal - mode->hdisplay, + NSEC_PER_SEC / 1000), + mode->crtc_clock); +} + +static bool +hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector, + const struct intel_crtc_state *crtc_state) +{ + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + + if (!connector->dp.dsc_hblank_expansion_quirk) + return false; + + if (mode_hblank_period_ns(adjusted_mode) > 300) + return false; + + return true; +} + +static bool +adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *connector, + const struct intel_crtc_state *crtc_state, + struct link_config_limits *limits, + bool dsc) +{ + struct drm_i915_private *i915 = to_i915(connector->base.dev); + const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + int min_bpp_x16 = limits->link.min_bpp_x16; + + if (!hblank_expansion_quirk_needs_dsc(connector, crtc_state)) + return true; + + if (!dsc) { + if (intel_dp_mst_dsc_source_support(crtc_state)) { + drm_dbg_kms(&i915->drm, + "[CRTC:%d:%s][CONNECTOR:%d:%s] DSC needed by hblank expansion quirk\n", + crtc->base.base.id, crtc->base.name, + connector->base.base.id, connector->base.name); + return false; + } + + drm_dbg_kms(&i915->drm, + "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to 24 due to hblank expansion quirk\n", + crtc->base.base.id, crtc->base.name, + connector->base.base.id, connector->base.name); + + if (limits->link.max_bpp_x16 < to_bpp_x16(24)) + return false; + + limits->link.min_bpp_x16 = to_bpp_x16(24); + + return true; + } + + drm_WARN_ON(&i915->drm, limits->min_rate != limits->max_rate); + + if (limits->max_rate < 540000) + min_bpp_x16 = to_bpp_x16(13); + else if (limits->max_rate < 810000) + min_bpp_x16 = to_bpp_x16(10); + + if (limits->link.min_bpp_x16 >= min_bpp_x16) + return true; + + drm_dbg_kms(&i915->drm, + "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " BPP_X16_FMT " in DSC mode due to hblank expansion quirk\n", + crtc->base.base.id, crtc->base.name, + connector->base.base.id, connector->base.name, + BPP_X16_ARGS(min_bpp_x16)); + + if (limits->link.max_bpp_x16 < min_bpp_x16) + return false; + + limits->link.min_bpp_x16 = min_bpp_x16; + + return true; +} + static bool intel_dp_mst_compute_config_limits(struct intel_dp *intel_dp, + const struct intel_connector *connector, struct intel_crtc_state *crtc_state, bool dsc, struct link_config_limits *limits) @@ -325,10 +505,16 @@ intel_dp_mst_compute_config_limits(struct intel_dp *intel_dp, intel_dp_adjust_compliance_config(intel_dp, crtc_state, limits); - return intel_dp_compute_config_link_bpp_limits(intel_dp, - crtc_state, - dsc, - limits); + if (!intel_dp_compute_config_link_bpp_limits(intel_dp, + crtc_state, + dsc, + limits)) + return false; + + return adjust_limits_for_dsc_hblank_expansion_quirk(connector, + crtc_state, + limits, + dsc); } static int intel_dp_mst_compute_config(struct intel_encoder *encoder, @@ -338,12 +524,18 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_dp *intel_dp = &intel_mst->primary->dp; + const struct intel_connector *connector = + to_intel_connector(conn_state->connector); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct link_config_limits limits; bool dsc_needed; int ret = 0; + if (pipe_config->fec_enable && + !intel_dp_supports_fec(intel_dp, connector, pipe_config)) + return -EINVAL; + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; @@ -353,6 +545,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, dsc_needed = intel_dp->force_dsc_en || !intel_dp_mst_compute_config_limits(intel_dp, + connector, pipe_config, false, &limits); @@ -374,7 +567,11 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, str_yes_no(ret), str_yes_no(intel_dp->force_dsc_en)); + if (!intel_dp_mst_dsc_source_support(pipe_config)) + return -EINVAL; + if (!intel_dp_mst_compute_config_limits(intel_dp, + connector, pipe_config, true, &limits)) @@ -417,7 +614,9 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, intel_dp_audio_compute_config(encoder, pipe_config, conn_state); - intel_ddi_compute_min_voltage_level(dev_priv, pipe_config); + intel_ddi_compute_min_voltage_level(pipe_config); + + intel_psr_compute_config(intel_dp, pipe_config, conn_state); return 0; } @@ -458,6 +657,130 @@ intel_dp_mst_transcoder_mask(struct intel_atomic_state *state, return transcoders; } +static u8 get_pipes_downstream_of_mst_port(struct intel_atomic_state *state, + struct drm_dp_mst_topology_mgr *mst_mgr, + struct drm_dp_mst_port *parent_port) +{ + const struct intel_digital_connector_state *conn_state; + struct intel_connector *connector; + u8 mask = 0; + int i; + + for_each_new_intel_connector_in_state(state, connector, conn_state, i) { + if (!conn_state->base.crtc) + continue; + + if (&connector->mst_port->mst_mgr != mst_mgr) + continue; + + if (connector->port != parent_port && + !drm_dp_mst_port_downstream_of_parent(mst_mgr, + connector->port, + parent_port)) + continue; + + mask |= BIT(to_intel_crtc(conn_state->base.crtc)->pipe); + } + + return mask; +} + +static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state, + struct drm_dp_mst_topology_mgr *mst_mgr, + struct intel_link_bw_limits *limits) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_crtc *crtc; + u8 mst_pipe_mask; + u8 fec_pipe_mask = 0; + int ret; + + mst_pipe_mask = get_pipes_downstream_of_mst_port(state, mst_mgr, NULL); + + for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mst_pipe_mask) { + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + /* Atomic connector check should've added all the MST CRTCs. */ + if (drm_WARN_ON(&i915->drm, !crtc_state)) + return -EINVAL; + + if (crtc_state->fec_enable) + fec_pipe_mask |= BIT(crtc->pipe); + } + + if (!fec_pipe_mask || mst_pipe_mask == fec_pipe_mask) + return 0; + + limits->force_fec_pipes |= mst_pipe_mask; + + ret = intel_modeset_pipes_in_mask_early(state, "MST FEC", + mst_pipe_mask); + + return ret ? : -EAGAIN; +} + +static int intel_dp_mst_check_bw(struct intel_atomic_state *state, + struct drm_dp_mst_topology_mgr *mst_mgr, + struct drm_dp_mst_topology_state *mst_state, + struct intel_link_bw_limits *limits) +{ + struct drm_dp_mst_port *mst_port; + u8 mst_port_pipes; + int ret; + + ret = drm_dp_mst_atomic_check_mgr(&state->base, mst_mgr, mst_state, &mst_port); + if (ret != -ENOSPC) + return ret; + + mst_port_pipes = get_pipes_downstream_of_mst_port(state, mst_mgr, mst_port); + + ret = intel_link_bw_reduce_bpp(state, limits, + mst_port_pipes, "MST link BW"); + + return ret ? : -EAGAIN; +} + +/** + * intel_dp_mst_atomic_check_link - check all modeset MST link configuration + * @state: intel atomic state + * @limits: link BW limits + * + * Check the link configuration for all modeset MST outputs. If the + * configuration is invalid @limits will be updated if possible to + * reduce the total BW, after which the configuration for all CRTCs in + * @state must be recomputed with the updated @limits. + * + * Returns: + * - 0 if the confugration is valid + * - %-EAGAIN, if the configuration is invalid and @limits got updated + * with fallback values with which the configuration of all CRTCs in + * @state must be recomputed + * - Other negative error, if the configuration is invalid without a + * fallback possibility, or the check failed for another reason + */ +int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state, + struct intel_link_bw_limits *limits) +{ + struct drm_dp_mst_topology_mgr *mgr; + struct drm_dp_mst_topology_state *mst_state; + int ret; + int i; + + for_each_new_mst_mgr_in_state(&state->base, mgr, mst_state, i) { + ret = intel_dp_mst_check_fec_change(state, mgr, limits); + if (ret) + return ret; + + ret = intel_dp_mst_check_bw(state, mgr, mst_state, + limits); + if (ret) + return ret; + } + + return 0; +} + static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) @@ -478,19 +801,23 @@ static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder, * that shares the same MST stream as mode changed, * intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do * a fastset when possible. + * + * On TGL+ this is required since each stream go through a master transcoder, + * so if the master transcoder needs modeset, all other streams in the + * topology need a modeset. All platforms need to add the atomic state + * for all streams in the topology, since a modeset on one may require + * changing the MST link BW usage of the others, which in turn needs a + * recomputation of the corresponding CRTC states. */ static int -intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector, - struct intel_atomic_state *state) +intel_dp_mst_atomic_topology_check(struct intel_connector *connector, + struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct drm_connector_list_iter connector_list_iter; struct intel_connector *connector_iter; int ret = 0; - if (DISPLAY_VER(dev_priv) < 12) - return 0; - if (!intel_connector_needs_modeset(state, &connector->base)) return 0; @@ -544,7 +871,7 @@ intel_dp_mst_atomic_check(struct drm_connector *connector, if (ret) return ret; - ret = intel_dp_mst_atomic_master_trans_check(intel_connector, state); + ret = intel_dp_mst_atomic_topology_check(intel_connector, state); if (ret) return ret; @@ -586,10 +913,6 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state, struct intel_dp *intel_dp = &dig_port->dp; struct intel_connector *connector = to_intel_connector(old_conn_state->connector); - struct drm_dp_mst_topology_state *new_mst_state = - drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr); - struct drm_dp_mst_atomic_payload *new_payload = - drm_atomic_get_mst_payload_state(new_mst_state, connector->port); struct drm_i915_private *i915 = to_i915(connector->base.dev); drm_dbg_kms(&i915->drm, "active links %d\n", @@ -597,9 +920,7 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state, intel_hdcp_disable(intel_mst->connector); - drm_dp_remove_payload_part1(&intel_dp->mst_mgr, new_mst_state, new_payload); - - intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); + intel_dp_sink_disable_decompression(state, connector, old_crtc_state); } static void intel_mst_post_disable_dp(struct intel_atomic_state *state, @@ -633,6 +954,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, intel_disable_transcoder(old_crtc_state); + drm_dp_remove_payload_part1(&intel_dp->mst_mgr, new_mst_state, new_payload); + clear_act_sent(encoder, old_crtc_state); intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder), @@ -645,6 +968,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, intel_ddi_disable_transcoder_func(old_crtc_state); + intel_dsc_disable(old_crtc_state); + if (DISPLAY_VER(dev_priv) >= 9) skl_scaler_disable(old_crtc_state); else @@ -661,9 +986,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, * BSpec 4287: disable DIP after the transcoder is disabled and before * the transcoder clock select is set to none. */ - if (last_mst_stream) - intel_dp_set_infoframes(&dig_port->base, false, - old_crtc_state, NULL); + intel_dp_set_infoframes(&dig_port->base, false, + old_crtc_state, NULL); /* * From TGL spec: "If multi-stream slave transcoder: Configure * Transcoder Clock Select to direct no clock to the transcoder" @@ -753,6 +1077,8 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true); + intel_dp_sink_enable_decompression(state, connector, pipe_config); + if (first_mst_stream) dig_port->base.pre_enable(state, &dig_port->base, pipe_config, NULL); @@ -775,6 +1101,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, if (DISPLAY_VER(dev_priv) < 12 || !first_mst_stream) intel_ddi_enable_transcoder_clock(encoder, pipe_config); + intel_dsc_dp_pps_write(&dig_port->base, pipe_config); intel_ddi_set_dp_msa(pipe_config, conn_state); } @@ -791,11 +1118,10 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr); enum transcoder trans = pipe_config->cpu_transcoder; + bool first_mst_stream = intel_dp->active_mst_links == 1; drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder); - clear_act_sent(encoder, pipe_config); - if (intel_dp_is_uhbr(pipe_config)) { const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; @@ -809,6 +1135,8 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, intel_ddi_enable_transcoder_func(encoder, pipe_config); + clear_act_sent(encoder, pipe_config); + intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(trans), 0, TRANS_DDI_DP_VC_PAYLOAD_ALLOC); @@ -817,15 +1145,16 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, wait_for_act_sent(encoder, pipe_config); + if (first_mst_stream) + intel_ddi_wait_for_fec_status(encoder, pipe_config, true); + drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base, drm_atomic_get_mst_payload_state(mst_state, connector->port)); - if (DISPLAY_VER(dev_priv) >= 14 && pipe_config->fec_enable) - intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(trans), 0, - FECSTALL_DIS_DPTSTREAM_DPTTG); - else if (DISPLAY_VER(dev_priv) >= 12 && pipe_config->fec_enable) - intel_de_rmw(dev_priv, CHICKEN_TRANS(trans), 0, - FECSTALL_DIS_DPTSTREAM_DPTTG); + if (DISPLAY_VER(dev_priv) >= 12) + intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, trans), + FECSTALL_DIS_DPTSTREAM_DPTTG, + pipe_config->fec_enable ? FECSTALL_DIS_DPTSTREAM_DPTTG : 0); intel_audio_sdp_split_update(pipe_config); @@ -833,12 +1162,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, intel_crtc_vblank_on(pipe_config); - intel_audio_codec_enable(encoder, pipe_config, conn_state); - - /* Enable hdcp if it's desired */ - if (conn_state->content_protection == - DRM_MODE_CONTENT_PROTECTION_DESIRED) - intel_hdcp_enable(state, encoder, pipe_config, conn_state); + intel_hdcp_enable(state, encoder, pipe_config, conn_state); } static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder, @@ -977,6 +1301,18 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, if (ret) return ret; + /* + * TODO: + * - Also check if compression would allow for the mode + * - Calculate the overhead using drm_dp_bw_overhead() / + * drm_dp_bw_channel_coding_efficiency(), similarly to the + * compute config code, as drm_dp_calc_pbn_mode() doesn't + * account with all the overheads. + * - Check here and during compute config the BW reported by + * DFP_Link_Available_Payload_Bandwidth_Number (or the + * corresponding link capabilities of the sink) in case the + * stream is uncompressed for it by the last branch device. + */ if (mode_rate > max_rate || mode->clock > max_dotclk || drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) { *status = MODE_CLOCK_HIGH; @@ -1002,7 +1338,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, return 0; } - if (DISPLAY_VER(dev_priv) >= 10 && + if (HAS_DSC_MST(dev_priv) && drm_dp_sink_supports_dsc(intel_connector->dp.dsc_dpcd)) { /* * TBD pass the connector BPC, @@ -1150,6 +1486,36 @@ intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp, intel_dp_get_dsc_sink_cap(dpcd_caps[DP_DPCD_REV], connector); } +static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector) +{ + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct drm_dp_desc desc; + u8 dpcd[DP_RECEIVER_CAP_SIZE]; + + if (!connector->dp.dsc_decompression_aux) + return false; + + if (drm_dp_read_desc(connector->dp.dsc_decompression_aux, + &desc, true) < 0) + return false; + + if (!drm_dp_has_quirk(&desc, + DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC)) + return false; + + if (drm_dp_read_dpcd_caps(connector->dp.dsc_decompression_aux, dpcd) < 0) + return false; + + if (!(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE)) + return false; + + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] DSC HBLANK expansion quirk detected\n", + connector->base.base.id, connector->base.name); + + return true; +} + static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop) @@ -1168,17 +1534,15 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo return NULL; intel_connector->get_hw_state = intel_dp_mst_get_hw_state; + intel_connector->sync_state = intel_dp_connector_sync_state; intel_connector->mst_port = intel_dp; intel_connector->port = port; drm_dp_mst_get_port_malloc(port); - /* - * TODO: set the AUX for the actual MST port decompressing the stream. - * At the moment the driver only supports enabling this globally in the - * first downstream MST branch, via intel_dp's (root port) AUX. - */ - intel_connector->dp.dsc_decompression_aux = &intel_dp->aux; + intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port); intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector); + intel_connector->dp.dsc_hblank_expansion_quirk = + detect_dsc_hblank_expansion_quirk(intel_connector); connector = &intel_connector->base; ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, @@ -1271,6 +1635,8 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp; intel_encoder->pre_enable = intel_mst_pre_enable_dp; intel_encoder->enable = intel_mst_enable_dp; + intel_encoder->audio_enable = intel_audio_codec_enable; + intel_encoder->audio_disable = intel_audio_codec_disable; intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state; intel_encoder->get_config = intel_dp_mst_enc_get_config; intel_encoder->initial_fastset_check = intel_dp_mst_initial_fastset_check; @@ -1418,3 +1784,91 @@ int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state, return 0; } + +static struct intel_connector * +get_connector_in_state_for_crtc(struct intel_atomic_state *state, + const struct intel_crtc *crtc) +{ + struct drm_connector_state *old_conn_state; + struct drm_connector_state *new_conn_state; + struct drm_connector *_connector; + int i; + + for_each_oldnew_connector_in_state(&state->base, _connector, + old_conn_state, new_conn_state, i) { + struct intel_connector *connector = + to_intel_connector(_connector); + + if (old_conn_state->crtc == &crtc->base || + new_conn_state->crtc == &crtc->base) + return connector; + } + + return NULL; +} + +/** + * intel_dp_mst_crtc_needs_modeset - check if changes in topology need to modeset the given CRTC + * @state: atomic state + * @crtc: CRTC for which to check the modeset requirement + * + * Check if any change in a MST topology requires a forced modeset on @crtc in + * this topology. One such change is enabling/disabling the DSC decompression + * state in the first branch device's UFP DPCD as required by one CRTC, while + * the other @crtc in the same topology is still active, requiring a full modeset + * on @crtc. + */ +bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_connector *crtc_connector; + const struct drm_connector_state *conn_state; + const struct drm_connector *_connector; + int i; + + if (!intel_crtc_has_type(intel_atomic_get_new_crtc_state(state, crtc), + INTEL_OUTPUT_DP_MST)) + return false; + + crtc_connector = get_connector_in_state_for_crtc(state, crtc); + + if (!crtc_connector) + /* None of the connectors in the topology needs modeset */ + return false; + + for_each_new_connector_in_state(&state->base, _connector, conn_state, i) { + const struct intel_connector *connector = + to_intel_connector(_connector); + const struct intel_crtc_state *new_crtc_state; + const struct intel_crtc_state *old_crtc_state; + struct intel_crtc *crtc_iter; + + if (connector->mst_port != crtc_connector->mst_port || + !conn_state->crtc) + continue; + + crtc_iter = to_intel_crtc(conn_state->crtc); + + new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc_iter); + old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc_iter); + + if (!intel_crtc_needs_modeset(new_crtc_state)) + continue; + + if (old_crtc_state->dsc.compression_enable == + new_crtc_state->dsc.compression_enable) + continue; + /* + * Toggling the decompression flag because of this stream in + * the first downstream branch device's UFP DPCD may reset the + * whole branch device. To avoid the reset while other streams + * are also active modeset the whole MST topology in this + * case. + */ + if (connector->dp.dsc_decompression_aux == + &connector->mst_port->aux) + return true; + } + + return false; +} diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h index f1815bb722..8ca1d59909 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.h +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h @@ -13,6 +13,7 @@ struct intel_crtc; struct intel_crtc_state; struct intel_digital_port; struct intel_dp; +struct intel_link_bw_limits; int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_id); void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port); @@ -22,5 +23,9 @@ bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state); bool intel_dp_mst_source_support(struct intel_dp *intel_dp); int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state, struct intel_crtc *crtc); +int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state, + struct intel_link_bw_limits *limits); +bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state, + struct intel_crtc *crtc); #endif /* __INTEL_DP_MST_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c index 62b93d097e..4ca910874a 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c @@ -666,6 +666,20 @@ enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_port) } } +enum dpio_phy vlv_pipe_to_phy(enum pipe pipe) +{ + switch (pipe) { + default: + MISSING_CASE(pipe); + fallthrough; + case PIPE_A: + case PIPE_B: + return DPIO_PHY0; + case PIPE_C: + return DPIO_PHY1; + } +} + enum dpio_channel vlv_pipe_to_channel(enum pipe pipe) { switch (pipe) { @@ -689,50 +703,50 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder, struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); - enum pipe pipe = crtc->pipe; + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); u32 val; int i; vlv_dpio_get(dev_priv); /* Clear calc init */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); + val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch)); val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); + vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val); if (crtc_state->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); + val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch)); val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); + vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val); } - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch)); + val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW9(ch)); val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val); + vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW9(ch), val); if (crtc_state->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch)); + val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW9(ch)); val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val); + vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW9(ch), val); } /* Program swing deemph */ for (i = 0; i < crtc_state->lane_count; i++) { - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i)); + val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW4(ch, i)); val &= ~DPIO_SWING_DEEMPH9P5_MASK; val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val); + vlv_dpio_write(dev_priv, phy, CHV_TX_DW4(ch, i), val); } /* Program swing margin */ for (i = 0; i < crtc_state->lane_count; i++) { - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); + val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW2(ch, i)); val &= ~DPIO_SWING_MARGIN000_MASK; val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT; @@ -745,7 +759,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder, val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT); val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); + vlv_dpio_write(dev_priv, phy, CHV_TX_DW2(ch, i), val); } /* @@ -755,23 +769,23 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder, * 27 for ch0 and ch1. */ for (i = 0; i < crtc_state->lane_count; i++) { - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); + val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW3(ch, i)); if (uniq_trans_scale) val |= DPIO_TX_UNIQ_TRANS_SCALE_EN; else val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val); + vlv_dpio_write(dev_priv, phy, CHV_TX_DW3(ch, i), val); } /* Start swing calculation */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); + val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch)); val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); + vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val); if (crtc_state->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); + val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch)); val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); + vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val); } vlv_dpio_put(dev_priv); @@ -782,43 +796,43 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder, bool reset) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - enum pipe pipe = crtc->pipe; + enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); u32 val; - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); + val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW0(ch)); if (reset) val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); else val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); + vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW0(ch), val); if (crtc_state->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); + val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW0(ch)); if (reset) val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); else val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); + vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW0(ch), val); } - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); + val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW1(ch)); val |= CHV_PCS_REQ_SOFTRESET_EN; if (reset) val &= ~DPIO_PCS_CLK_SOFT_RESET; else val |= DPIO_PCS_CLK_SOFT_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); + vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW1(ch), val); if (crtc_state->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); + val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW1(ch)); val |= CHV_PCS_REQ_SOFTRESET_EN; if (reset) val &= ~DPIO_PCS_CLK_SOFT_RESET; else val |= DPIO_PCS_CLK_SOFT_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); + vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW1(ch), val); } } @@ -829,6 +843,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); enum pipe pipe = crtc->pipe; unsigned int lane_mask = intel_dp_unused_lane_mask(crtc_state->lane_count); @@ -851,40 +866,40 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder, /* program left/right clock distribution */ if (pipe != PIPE_B) { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); + val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW5_CH0); val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); if (ch == DPIO_CH0) val |= CHV_BUFLEFTENA1_FORCE; if (ch == DPIO_CH1) val |= CHV_BUFRIGHTENA1_FORCE; - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); + vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW5_CH0, val); } else { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); + val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW1_CH1); val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); if (ch == DPIO_CH0) val |= CHV_BUFLEFTENA2_FORCE; if (ch == DPIO_CH1) val |= CHV_BUFRIGHTENA2_FORCE; - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); + vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW1_CH1, val); } /* program clock channel usage */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch)); + val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(ch)); val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; if (pipe != PIPE_B) val &= ~CHV_PCS_USEDCLKCHANNEL; else val |= CHV_PCS_USEDCLKCHANNEL; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val); + vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW8(ch), val); if (crtc_state->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch)); + val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW8(ch)); val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; if (pipe != PIPE_B) val &= ~CHV_PCS_USEDCLKCHANNEL; else val |= CHV_PCS_USEDCLKCHANNEL; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val); + vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW8(ch), val); } /* @@ -892,12 +907,12 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder, * matches the pipe, but here we need to * pick the CL based on the port. */ - val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch)); + val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW19(ch)); if (pipe != PIPE_B) val &= ~CHV_CMN_USEDCLKCHANNEL; else val |= CHV_CMN_USEDCLKCHANNEL; - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val); + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW19(ch), val); vlv_dpio_put(dev_priv); } @@ -910,21 +925,21 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); - enum pipe pipe = crtc->pipe; + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); int data, i, stagger; u32 val; vlv_dpio_get(dev_priv); /* allow hardware to manage TX FIFO reset source */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); + val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch)); val &= ~DPIO_LANEDESKEW_STRAP_OVRD; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); + vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val); if (crtc_state->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); + val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch)); val &= ~DPIO_LANEDESKEW_STRAP_OVRD; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); + vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val); } /* Program Tx lane latency optimal setting*/ @@ -934,7 +949,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder, data = 0x0; else data = (i == 1) ? 0x0 : 0x1; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i), + vlv_dpio_write(dev_priv, phy, CHV_TX_DW14(ch, i), data << DPIO_UPAR_SHIFT); } @@ -950,17 +965,17 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder, else stagger = 0x2; - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); + val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch)); val |= DPIO_TX2_STAGGER_MASK(0x1f); - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); + vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val); if (crtc_state->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); + val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch)); val |= DPIO_TX2_STAGGER_MASK(0x1f); - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); + vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val); } - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch), + vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW12(ch), DPIO_LANESTAGGER_STRAP(stagger) | DPIO_LANESTAGGER_STRAP_OVRD | DPIO_TX1_STAGGER_MASK(0x1f) | @@ -968,7 +983,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder, DPIO_TX2_STAGGER_MULT(0)); if (crtc_state->lane_count > 2) { - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch), + vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW12(ch), DPIO_LANESTAGGER_STRAP(stagger) | DPIO_LANESTAGGER_STRAP_OVRD | DPIO_TX1_STAGGER_MASK(0x1f) | @@ -998,19 +1013,20 @@ void chv_phy_post_pll_disable(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe; + enum dpio_phy phy = vlv_pipe_to_phy(pipe); u32 val; vlv_dpio_get(dev_priv); /* disable left/right clock distribution */ if (pipe != PIPE_B) { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); + val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW5_CH0); val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); + vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW5_CH0, val); } else { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); + val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW1_CH1); val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); + vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW1_CH1, val); } vlv_dpio_put(dev_priv); @@ -1036,22 +1052,22 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder, struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel port = vlv_dig_port_to_channel(dig_port); - enum pipe pipe = crtc->pipe; + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); vlv_dpio_get(dev_priv); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), + vlv_dpio_write(dev_priv, phy, VLV_TX_DW5(port), 0x00000000); + vlv_dpio_write(dev_priv, phy, VLV_TX_DW4(port), demph_reg_value); + vlv_dpio_write(dev_priv, phy, VLV_TX_DW2(port), uniqtranscale_reg_value); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040); + vlv_dpio_write(dev_priv, phy, VLV_TX_DW3(port), 0x0C782040); if (tx3_demph) - vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph); + vlv_dpio_write(dev_priv, phy, VLV_TX3_DW4(port), tx3_demph); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN); + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW11(port), 0x00030000); + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW9(port), preemph_reg_value); + vlv_dpio_write(dev_priv, phy, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN); vlv_dpio_put(dev_priv); } @@ -1063,24 +1079,24 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum dpio_channel port = vlv_dig_port_to_channel(dig_port); - enum pipe pipe = crtc->pipe; + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); /* Program Tx lane resets to default */ vlv_dpio_get(dev_priv); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0(port), DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1(port), DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | (1<uapi.crtc); enum dpio_channel port = vlv_dig_port_to_channel(dig_port); enum pipe pipe = crtc->pipe; + enum dpio_phy phy = vlv_pipe_to_phy(pipe); u32 val; vlv_dpio_get(dev_priv); /* Enable clock channels for this port */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port)); + val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(port)); val = 0; if (pipe) val |= (1<<21); else val &= ~(1<<21); val |= 0x001000c4; - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val); + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW8(port), val); /* Program lane clock */ - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888); + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW14(port), 0x00760018); + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW23(port), 0x00400888); vlv_dpio_put(dev_priv); } @@ -1122,10 +1139,10 @@ void vlv_phy_reset_lanes(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); enum dpio_channel port = vlv_dig_port_to_channel(dig_port); - enum pipe pipe = crtc->pipe; + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); vlv_dpio_get(dev_priv); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060); + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0(port), 0x00000000); + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1(port), 0x00e00060); vlv_dpio_put(dev_priv); } diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.h b/drivers/gpu/drm/i915/display/intel_dpio_phy.h index 4d43dbbdf8..9adc4e8c17 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.h +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.h @@ -44,6 +44,7 @@ u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder); enum dpio_channel vlv_dig_port_to_channel(struct intel_digital_port *dig_port); enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_port); +enum dpio_phy vlv_pipe_to_phy(enum pipe pipe); enum dpio_channel vlv_pipe_to_channel(enum pipe pipe); void chv_set_phy_signal_level(struct intel_encoder *encoder, @@ -116,6 +117,10 @@ static inline enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_p { return DPIO_PHY0; } +static inline enum dpio_phy vlv_pipe_to_phy(enum pipe pipe) +{ + return DPIO_PHY0; +} static inline enum dpio_channel vlv_pipe_to_channel(enum pipe pipe) { return DPIO_CH0; diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index d41c1dc9f6..3038655377 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -16,6 +16,7 @@ #include "intel_dpio_phy.h" #include "intel_dpll.h" #include "intel_lvds.h" +#include "intel_lvds_regs.h" #include "intel_panel.h" #include "intel_pps.h" #include "intel_snps_phy.h" @@ -311,7 +312,7 @@ static const struct intel_limit intel_limits_bxt = { * divided-down version of it. */ /* m1 is reserved as 0 in Pineview, n is a ring counter */ -int pnv_calc_dpll_params(int refclk, struct dpll *clock) +static int pnv_calc_dpll_params(int refclk, struct dpll *clock) { clock->m = clock->m2 + 2; clock->p = clock->p1 * clock->p2; @@ -342,7 +343,7 @@ int i9xx_calc_dpll_params(int refclk, struct dpll *clock) return clock->dot; } -int vlv_calc_dpll_params(int refclk, struct dpll *clock) +static int vlv_calc_dpll_params(int refclk, struct dpll *clock) { clock->m = clock->m1 * clock->m2; clock->p = clock->p1 * clock->p2 * 5; @@ -368,6 +369,176 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock) return clock->dot; } +static int i9xx_pll_refclk(struct drm_device *dev, + const struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + u32 dpll = pipe_config->dpll_hw_state.dpll; + + if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) + return dev_priv->display.vbt.lvds_ssc_freq; + else if (HAS_PCH_SPLIT(dev_priv)) + return 120000; + else if (DISPLAY_VER(dev_priv) != 2) + return 96000; + else + return 48000; +} + +/* Returns the clock of the currently programmed mode of the given pipe. */ +void i9xx_crtc_clock_get(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + u32 dpll = pipe_config->dpll_hw_state.dpll; + u32 fp; + struct dpll clock; + int port_clock; + int refclk = i9xx_pll_refclk(dev, pipe_config); + + if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) + fp = pipe_config->dpll_hw_state.fp0; + else + fp = pipe_config->dpll_hw_state.fp1; + + clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; + if (IS_PINEVIEW(dev_priv)) { + clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; + clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; + } else { + clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; + clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; + } + + if (DISPLAY_VER(dev_priv) != 2) { + if (IS_PINEVIEW(dev_priv)) + clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> + DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); + else + clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> + DPLL_FPA01_P1_POST_DIV_SHIFT); + + switch (dpll & DPLL_MODE_MASK) { + case DPLLB_MODE_DAC_SERIAL: + clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? + 5 : 10; + break; + case DPLLB_MODE_LVDS: + clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? + 7 : 14; + break; + default: + drm_dbg_kms(&dev_priv->drm, + "Unknown DPLL mode %08x in programmed " + "mode\n", (int)(dpll & DPLL_MODE_MASK)); + return; + } + + if (IS_PINEVIEW(dev_priv)) + port_clock = pnv_calc_dpll_params(refclk, &clock); + else + port_clock = i9xx_calc_dpll_params(refclk, &clock); + } else { + enum pipe lvds_pipe; + + if (IS_I85X(dev_priv) && + intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) && + lvds_pipe == crtc->pipe) { + u32 lvds = intel_de_read(dev_priv, LVDS); + + clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> + DPLL_FPA01_P1_POST_DIV_SHIFT); + + if (lvds & LVDS_CLKB_POWER_UP) + clock.p2 = 7; + else + clock.p2 = 14; + } else { + if (dpll & PLL_P1_DIVIDE_BY_TWO) + clock.p1 = 2; + else { + clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> + DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; + } + if (dpll & PLL_P2_DIVIDE_BY_4) + clock.p2 = 4; + else + clock.p2 = 2; + } + + port_clock = i9xx_calc_dpll_params(refclk, &clock); + } + + /* + * This value includes pixel_multiplier. We will use + * port_clock to compute adjusted_mode.crtc_clock in the + * encoder's get_config() function. + */ + pipe_config->port_clock = port_clock; +} + +void vlv_crtc_clock_get(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); + struct dpll clock; + u32 mdiv; + int refclk = 100000; + + /* In case of DSI, DPLL will not be used */ + if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) + return; + + vlv_dpio_get(dev_priv); + mdiv = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(crtc->pipe)); + vlv_dpio_put(dev_priv); + + clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; + clock.m2 = mdiv & DPIO_M2DIV_MASK; + clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; + clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; + clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; + + pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); +} + +void chv_crtc_clock_get(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + enum dpio_channel port = vlv_pipe_to_channel(crtc->pipe); + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); + struct dpll clock; + u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; + int refclk = 100000; + + /* In case of DSI, DPLL will not be used */ + if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) + return; + + vlv_dpio_get(dev_priv); + cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(port)); + pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(port)); + pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(port)); + pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(port)); + pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(port)); + vlv_dpio_put(dev_priv); + + clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; + clock.m2 = (pll_dw0 & 0xff) << 22; + if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) + clock.m2 |= pll_dw2 & 0x3fffff; + clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; + clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; + clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; + + pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); +} + /* * Returns whether the given set of divisors are valid for a given refclk with * the given connectors. @@ -1003,12 +1174,10 @@ static int dg2_crtc_compute_clock(struct intel_atomic_state *state, static int mtl_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder = intel_get_crtc_new_encoder(state, crtc_state); - enum phy phy = intel_port_to_phy(i915, encoder->port); int ret; ret = intel_cx0pll_calc_state(crtc_state, encoder); @@ -1016,10 +1185,7 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state, return ret; /* TODO: Do the readback via intel_compute_shared_dplls() */ - if (intel_is_c10phy(i915, phy)) - crtc_state->port_clock = intel_c10pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c10); - else - crtc_state->port_clock = intel_c20pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c20); + crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->cx0pll_state); crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); @@ -1645,7 +1811,7 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state) } static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, - enum pipe pipe) + enum dpio_phy phy) { u32 reg_val; @@ -1653,30 +1819,31 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, * PLLB opamp always calibrates to max value of 0x3f, force enable it * and set it to a reasonable value instead. */ - reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); + reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW9(1)); reg_val &= 0xffffff00; reg_val |= 0x00000030; - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9(1), reg_val); - reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); + reg_val = vlv_dpio_read(dev_priv, phy, VLV_REF_DW13); reg_val &= 0x00ffffff; reg_val |= 0x8c000000; - vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); + vlv_dpio_write(dev_priv, phy, VLV_REF_DW13, reg_val); - reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); + reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW9(1)); reg_val &= 0xffffff00; - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9(1), reg_val); - reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); + reg_val = vlv_dpio_read(dev_priv, phy, VLV_REF_DW13); reg_val &= 0x00ffffff; reg_val |= 0xb0000000; - vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); + vlv_dpio_write(dev_priv, phy, VLV_REF_DW13, reg_val); } static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); enum pipe pipe = crtc->pipe; u32 mdiv; u32 bestn, bestm1, bestm2, bestp1, bestp2; @@ -1694,18 +1861,18 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state) /* PLL B needs special handling */ if (pipe == PIPE_B) - vlv_pllb_recal_opamp(dev_priv, pipe); + vlv_pllb_recal_opamp(dev_priv, phy); /* Set up Tx target for periodic Rcomp update */ - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9_BCAST, 0x0100000f); /* Disable target IRef on PLL */ - reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); + reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW8(pipe)); reg_val &= 0x00ffffff; - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW8(pipe), reg_val); /* Disable fast lock */ - vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); + vlv_dpio_write(dev_priv, phy, VLV_CMN_DW0, 0x610); /* Set idtafcrecal before PLL is enabled */ mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); @@ -1719,46 +1886,46 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state) * Note: don't use the DAC post divider as it seems unstable. */ mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(pipe), mdiv); mdiv |= DPIO_ENABLE_CALIBRATION; - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(pipe), mdiv); /* Set HBR and RBR LPF coefficients */ if (crtc_state->port_clock == 162000 || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW10(pipe), 0x009f0003); else - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW10(pipe), 0x00d0000f); if (intel_crtc_has_dp_encoder(crtc_state)) { /* Use SSC source */ if (pipe == PIPE_A) - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe), 0x0df40000); else - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe), 0x0df70000); } else { /* HDMI or VGA */ /* Use bend source */ if (pipe == PIPE_A) - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe), 0x0df70000); else - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe), 0x0df40000); } - coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); + coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(pipe)); coreclk = (coreclk & 0x0000ff00) | 0x01c00000; if (intel_crtc_has_dp_encoder(crtc_state)) coreclk |= 0x01000000; - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(pipe), coreclk); - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW11(pipe), 0x87871000); vlv_dpio_put(dev_priv); } @@ -1809,6 +1976,7 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state) struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; enum dpio_channel port = vlv_pipe_to_channel(pipe); + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); u32 loopfilter, tribuf_calcntr; u32 bestm2, bestp1, bestp2, bestm2_frac; u32 dpio_val; @@ -1825,39 +1993,39 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state) vlv_dpio_get(dev_priv); /* p1 and p2 divider */ - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(port), 5 << DPIO_CHV_S1_DIV_SHIFT | bestp1 << DPIO_CHV_P1_DIV_SHIFT | bestp2 << DPIO_CHV_P2_DIV_SHIFT | 1 << DPIO_CHV_K_DIV_SHIFT); /* Feedback post-divider - m2 */ - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); + vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(port), bestm2); /* Feedback refclk divider - n and m1 */ - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), + vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(port), DPIO_CHV_M1_DIV_BY_2 | 1 << DPIO_CHV_N_DIV_SHIFT); /* M2 fraction division */ - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); + vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(port), bestm2_frac); /* M2 fraction division enable */ - dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); + dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(port)); dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); if (bestm2_frac) dpio_val |= DPIO_CHV_FRAC_DIV_EN; - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); + vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(port), dpio_val); /* Program digital lock detect threshold */ - dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); + dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(port)); dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); if (!bestm2_frac) dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); + vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(port), dpio_val); /* Loop filter */ if (vco == 5400000) { @@ -1882,16 +2050,16 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state) loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); tribuf_calcntr = 0; } - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); + vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(port), loopfilter); - dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); + dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(port)); dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); + vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(port), dpio_val); /* AFC Recal */ - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), - vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), + vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port)) | DPIO_AFC_RECAL); vlv_dpio_put(dev_priv); @@ -1903,14 +2071,15 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state) struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; enum dpio_channel port = vlv_pipe_to_channel(pipe); + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); u32 tmp; vlv_dpio_get(dev_priv); /* Enable back the 10bit clock to display controller */ - tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); + tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port)); tmp |= DPIO_DCLKP_EN; - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), tmp); vlv_dpio_put(dev_priv); @@ -2031,6 +2200,7 @@ void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) { enum dpio_channel port = vlv_pipe_to_channel(pipe); + enum dpio_phy phy = vlv_pipe_to_phy(pipe); u32 val; /* Make sure the pipe isn't still relying on us */ @@ -2047,9 +2217,9 @@ void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) vlv_dpio_get(dev_priv); /* Disable 10bit clock to display controller */ - val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); + val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port)); val &= ~DPIO_DCLKP_EN; - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), val); vlv_dpio_put(dev_priv); } diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h index bbc30542f2..ac01bb19cc 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.h +++ b/drivers/gpu/drm/i915/display/intel_dpll.h @@ -20,8 +20,6 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state, struct intel_crtc *crtc); int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc); -int vlv_calc_dpll_params(int refclk, struct dpll *clock); -int pnv_calc_dpll_params(int refclk, struct dpll *clock); int i9xx_calc_dpll_params(int refclk, struct dpll *clock); u32 i9xx_dpll_compute_fp(const struct dpll *dpll); void vlv_compute_dpll(struct intel_crtc_state *crtc_state); @@ -41,6 +39,13 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, struct dpll *best_clock); int chv_calc_dpll_params(int refclk, struct dpll *pll_clock); +void i9xx_crtc_clock_get(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config); +void vlv_crtc_clock_get(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config); +void chv_crtc_clock_get(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config); + void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe); void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe); diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index fb694ff9dc..87deb135c9 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -219,6 +219,26 @@ intel_tc_pll_enable_reg(struct drm_i915_private *i915, return MG_PLL_ENABLE(tc_port); } +static void _intel_enable_shared_dpll(struct drm_i915_private *i915, + struct intel_shared_dpll *pll) +{ + if (pll->info->power_domain) + pll->wakeref = intel_display_power_get(i915, pll->info->power_domain); + + pll->info->funcs->enable(i915, pll); + pll->on = true; +} + +static void _intel_disable_shared_dpll(struct drm_i915_private *i915, + struct intel_shared_dpll *pll) +{ + pll->info->funcs->disable(i915, pll); + pll->on = false; + + if (pll->info->power_domain) + intel_display_power_put(i915, pll->info->power_domain, pll->wakeref); +} + /** * intel_enable_shared_dpll - enable a CRTC's shared DPLL * @crtc_state: CRTC, and its state, which has a shared DPLL @@ -258,8 +278,8 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state) drm_WARN_ON(&i915->drm, pll->on); drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name); - pll->info->funcs->enable(i915, pll); - pll->on = true; + + _intel_enable_shared_dpll(i915, pll); out: mutex_unlock(&i915->display.dpll.lock); @@ -304,8 +324,8 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state) goto out; drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name); - pll->info->funcs->disable(i915, pll); - pll->on = false; + + _intel_disable_shared_dpll(i915, pll); out: mutex_unlock(&i915->display.dpll.lock); @@ -3844,18 +3864,6 @@ static void combo_pll_enable(struct drm_i915_private *i915, { i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll); - if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && - pll->info->id == DPLL_ID_EHL_DPLL4) { - - /* - * We need to disable DC states when this DPLL is enabled. - * This can be done by taking a reference on DPLL4 power - * domain. - */ - pll->wakeref = intel_display_power_get(i915, - POWER_DOMAIN_DC_OFF); - } - icl_pll_power_enable(i915, pll, enable_reg); icl_dpll_write(i915, pll); @@ -3951,11 +3959,6 @@ static void combo_pll_disable(struct drm_i915_private *i915, i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll); icl_pll_disable(i915, pll, enable_reg); - - if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && - pll->info->id == DPLL_ID_EHL_DPLL4) - intel_display_power_put(i915, POWER_DOMAIN_DC_OFF, - pll->wakeref); } static void tbt_pll_disable(struct drm_i915_private *i915, @@ -4048,7 +4051,8 @@ static const struct intel_dpll_mgr icl_pll_mgr = { static const struct dpll_info ehl_plls[] = { { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, }, { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, }, - { .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, }, + { .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, + .power_domain = POWER_DOMAIN_DC_OFF, }, {} }; @@ -4378,12 +4382,8 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915, pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state); - if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && - pll->on && - pll->info->id == DPLL_ID_EHL_DPLL4) { - pll->wakeref = intel_display_power_get(i915, - POWER_DOMAIN_DC_OFF); - } + if (pll->on && pll->info->power_domain) + pll->wakeref = intel_display_power_get(i915, pll->info->power_domain); pll->state.pipe_mask = 0; for_each_intel_crtc(&i915->drm, crtc) { @@ -4430,8 +4430,7 @@ static void sanitize_dpll_state(struct drm_i915_private *i915, "%s enabled but not in use, disabling\n", pll->info->name); - pll->info->funcs->disable(i915, pll); - pll->on = false; + _intel_disable_shared_dpll(i915, pll); } void intel_dpll_sanitize_state(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index 1a88ccc983..5cdec77cfd 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -27,6 +27,7 @@ #include +#include "intel_display_power.h" #include "intel_wakeref.h" #define for_each_shared_dpll(__i915, __pll, __i) \ @@ -270,6 +271,11 @@ struct dpll_info { */ enum intel_dpll_id id; + /** + * @power_domain: extra power domain required by the DPLL + */ + enum intel_display_power_domain power_domain; + #define INTEL_DPLL_ALWAYS_ON (1 << 0) #define INTEL_DPLL_IS_ALT_PORT_DPLL (1 << 1) /** diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c index 48582b31b7..b29bceff73 100644 --- a/drivers/gpu/drm/i915/display/intel_dpt.c +++ b/drivers/gpu/drm/i915/display/intel_dpt.c @@ -9,8 +9,6 @@ #include "gt/gen8_ppgtt.h" #include "i915_drv.h" -#include "i915_reg.h" -#include "intel_de.h" #include "intel_display_types.h" #include "intel_dpt.h" #include "intel_fb.h" @@ -318,25 +316,3 @@ void intel_dpt_destroy(struct i915_address_space *vm) i915_vm_put(&dpt->vm); } -void intel_dpt_configure(struct intel_crtc *crtc) -{ - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - - if (DISPLAY_VER(i915) == 14) { - enum pipe pipe = crtc->pipe; - enum plane_id plane_id; - - for_each_plane_id_on_crtc(crtc, plane_id) { - if (plane_id == PLANE_CURSOR) - continue; - - intel_de_rmw(i915, PLANE_CHICKEN(pipe, plane_id), - PLANE_CHICKEN_DISABLE_DPT, - i915->params.enable_dpt ? 0 : PLANE_CHICKEN_DISABLE_DPT); - } - } else if (DISPLAY_VER(i915) == 13) { - intel_de_rmw(i915, CHICKEN_MISC_2, - CHICKEN_MISC_DISABLE_DPT, - i915->params.enable_dpt ? 0 : CHICKEN_MISC_DISABLE_DPT); - } -} diff --git a/drivers/gpu/drm/i915/display/intel_dpt.h b/drivers/gpu/drm/i915/display/intel_dpt.h index d9a1665501..e18a9f767b 100644 --- a/drivers/gpu/drm/i915/display/intel_dpt.h +++ b/drivers/gpu/drm/i915/display/intel_dpt.h @@ -10,7 +10,6 @@ struct drm_i915_private; struct i915_address_space; struct i915_vma; -struct intel_crtc; struct intel_framebuffer; void intel_dpt_destroy(struct i915_address_space *vm); @@ -20,6 +19,5 @@ void intel_dpt_suspend(struct drm_i915_private *i915); void intel_dpt_resume(struct drm_i915_private *i915); struct i915_address_space * intel_dpt_create(struct intel_framebuffer *fb); -void intel_dpt_configure(struct intel_crtc *crtc); #endif /* __INTEL_DPT_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dpt_common.c b/drivers/gpu/drm/i915/display/intel_dpt_common.c new file mode 100644 index 0000000000..cdba47165c --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dpt_common.c @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include "i915_reg.h" +#include "intel_de.h" +#include "intel_display_types.h" +#include "intel_dpt_common.h" + +void intel_dpt_configure(struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + + if (DISPLAY_VER(i915) == 14) { + enum pipe pipe = crtc->pipe; + enum plane_id plane_id; + + for_each_plane_id_on_crtc(crtc, plane_id) { + if (plane_id == PLANE_CURSOR) + continue; + + intel_de_rmw(i915, PLANE_CHICKEN(pipe, plane_id), + PLANE_CHICKEN_DISABLE_DPT, + i915->display.params.enable_dpt ? 0 : + PLANE_CHICKEN_DISABLE_DPT); + } + } else if (DISPLAY_VER(i915) == 13) { + intel_de_rmw(i915, CHICKEN_MISC_2, + CHICKEN_MISC_DISABLE_DPT, + i915->display.params.enable_dpt ? 0 : + CHICKEN_MISC_DISABLE_DPT); + } +} diff --git a/drivers/gpu/drm/i915/display/intel_dpt_common.h b/drivers/gpu/drm/i915/display/intel_dpt_common.h new file mode 100644 index 0000000000..6d7de40512 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dpt_common.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_DPT_COMMON_H__ +#define __INTEL_DPT_COMMON_H__ + +struct intel_crtc; + +void intel_dpt_configure(struct intel_crtc *crtc); + +#endif /* __INTEL_DPT_COMMON_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index 15c3dd5d38..54d9abeb2d 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -4,9 +4,6 @@ * */ -#include "gem/i915_gem_internal.h" -#include "gem/i915_gem_lmem.h" - #include "i915_drv.h" #include "i915_irq.h" #include "i915_reg.h" @@ -14,12 +11,13 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_dsb.h" +#include "intel_dsb_buffer.h" #include "intel_dsb_regs.h" #include "intel_vblank.h" #include "intel_vrr.h" #include "skl_watermark.h" -struct i915_vma; +#define CACHELINE_BYTES 64 enum dsb_id { INVALID_DSB = -1, @@ -32,8 +30,7 @@ enum dsb_id { struct intel_dsb { enum dsb_id id; - u32 *cmd_buf; - struct i915_vma *vma; + struct intel_dsb_buffer dsb_buf; struct intel_crtc *crtc; /* @@ -109,15 +106,17 @@ static void intel_dsb_dump(struct intel_dsb *dsb) { struct intel_crtc *crtc = dsb->crtc; struct drm_i915_private *i915 = to_i915(crtc->base.dev); - const u32 *buf = dsb->cmd_buf; int i; drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] DSB %d commands {\n", crtc->base.base.id, crtc->base.name, dsb->id); for (i = 0; i < ALIGN(dsb->free_pos, 64 / 4); i += 4) drm_dbg_kms(&i915->drm, - " 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", - i * 4, buf[i], buf[i+1], buf[i+2], buf[i+3]); + " 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i * 4, + intel_dsb_buffer_read(&dsb->dsb_buf, i), + intel_dsb_buffer_read(&dsb->dsb_buf, i + 1), + intel_dsb_buffer_read(&dsb->dsb_buf, i + 2), + intel_dsb_buffer_read(&dsb->dsb_buf, i + 3)); drm_dbg_kms(&i915->drm, "}\n"); } @@ -129,8 +128,6 @@ static bool is_dsb_busy(struct drm_i915_private *i915, enum pipe pipe, static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw) { - u32 *buf = dsb->cmd_buf; - if (!assert_dsb_has_room(dsb)) return; @@ -139,14 +136,13 @@ static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw) dsb->ins_start_offset = dsb->free_pos; - buf[dsb->free_pos++] = ldw; - buf[dsb->free_pos++] = udw; + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, ldw); + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, udw); } static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb, u32 opcode, i915_reg_t reg) { - const u32 *buf = dsb->cmd_buf; u32 prev_opcode, prev_reg; /* @@ -157,8 +153,10 @@ static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb, if (dsb->free_pos == 0) return false; - prev_opcode = buf[dsb->ins_start_offset + 1] & ~DSB_REG_VALUE_MASK; - prev_reg = buf[dsb->ins_start_offset + 1] & DSB_REG_VALUE_MASK; + prev_opcode = intel_dsb_buffer_read(&dsb->dsb_buf, + dsb->ins_start_offset + 1) & ~DSB_REG_VALUE_MASK; + prev_reg = intel_dsb_buffer_read(&dsb->dsb_buf, + dsb->ins_start_offset + 1) & DSB_REG_VALUE_MASK; return prev_opcode == opcode && prev_reg == i915_mmio_reg_offset(reg); } @@ -191,6 +189,8 @@ static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_ void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val) { + u32 old_val; + /* * For example the buffer will look like below for 3 dwords for auto * increment register: @@ -214,31 +214,32 @@ void intel_dsb_reg_write(struct intel_dsb *dsb, (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) | i915_mmio_reg_offset(reg)); } else { - u32 *buf = dsb->cmd_buf; - if (!assert_dsb_has_room(dsb)) return; /* convert to indexed write? */ if (intel_dsb_prev_ins_is_mmio_write(dsb, reg)) { - u32 prev_val = buf[dsb->ins_start_offset + 0]; + u32 prev_val = intel_dsb_buffer_read(&dsb->dsb_buf, + dsb->ins_start_offset + 0); - buf[dsb->ins_start_offset + 0] = 1; /* count */ - buf[dsb->ins_start_offset + 1] = - (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) | - i915_mmio_reg_offset(reg); - buf[dsb->ins_start_offset + 2] = prev_val; + intel_dsb_buffer_write(&dsb->dsb_buf, + dsb->ins_start_offset + 0, 1); /* count */ + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 1, + (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) | + i915_mmio_reg_offset(reg)); + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 2, prev_val); dsb->free_pos++; } - buf[dsb->free_pos++] = val; + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, val); /* Update the count */ - buf[dsb->ins_start_offset]++; + old_val = intel_dsb_buffer_read(&dsb->dsb_buf, dsb->ins_start_offset); + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset, old_val + 1); /* if number of data words is odd, then the last dword should be 0.*/ if (dsb->free_pos & 0x1) - buf[dsb->free_pos] = 0; + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos, 0); } } @@ -297,8 +298,8 @@ static void intel_dsb_align_tail(struct intel_dsb *dsb) aligned_tail = ALIGN(tail, CACHELINE_BYTES); if (aligned_tail > tail) - memset(&dsb->cmd_buf[dsb->free_pos], 0, - aligned_tail - tail); + intel_dsb_buffer_memset(&dsb->dsb_buf, dsb->free_pos, 0, + aligned_tail - tail); dsb->free_pos = aligned_tail / 4; } @@ -317,7 +318,7 @@ void intel_dsb_finish(struct intel_dsb *dsb) intel_dsb_align_tail(dsb); - i915_gem_object_flush_map(dsb->vma->obj); + intel_dsb_buffer_flush_map(&dsb->dsb_buf); } static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state) @@ -375,7 +376,7 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl, dsb_chicken(crtc)); intel_de_write_fw(dev_priv, DSB_HEAD(pipe, dsb->id), - i915_ggtt_offset(dsb->vma)); + intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf)); if (dewake_scanline >= 0) { int diff, hw_dewake_scanline; @@ -397,7 +398,7 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl, } intel_de_write_fw(dev_priv, DSB_TAIL(pipe, dsb->id), - i915_ggtt_offset(dsb->vma) + tail); + intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf) + tail); } /** @@ -422,7 +423,7 @@ void intel_dsb_wait(struct intel_dsb *dsb) enum pipe pipe = crtc->pipe; if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1)) { - u32 offset = i915_ggtt_offset(dsb->vma); + u32 offset = intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf); intel_de_write_fw(dev_priv, DSB_CTRL(pipe, dsb->id), DSB_ENABLE | DSB_HALT); @@ -459,12 +460,9 @@ struct intel_dsb *intel_dsb_prepare(const struct intel_crtc_state *crtc_state, { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); - struct drm_i915_gem_object *obj; intel_wakeref_t wakeref; struct intel_dsb *dsb; - struct i915_vma *vma; unsigned int size; - u32 *buf; if (!HAS_DSB(i915)) return NULL; @@ -478,37 +476,13 @@ struct intel_dsb *intel_dsb_prepare(const struct intel_crtc_state *crtc_state, /* ~1 qword per instruction, full cachelines */ size = ALIGN(max_cmds * 8, CACHELINE_BYTES); - if (HAS_LMEM(i915)) { - obj = i915_gem_object_create_lmem(i915, PAGE_ALIGN(size), - I915_BO_ALLOC_CONTIGUOUS); - if (IS_ERR(obj)) - goto out_put_rpm; - } else { - obj = i915_gem_object_create_internal(i915, PAGE_ALIGN(size)); - if (IS_ERR(obj)) - goto out_put_rpm; - - i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); - } - - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); - if (IS_ERR(vma)) { - i915_gem_object_put(obj); - goto out_put_rpm; - } - - buf = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC); - if (IS_ERR(buf)) { - i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP); + if (!intel_dsb_buffer_create(crtc, &dsb->dsb_buf, size)) goto out_put_rpm; - } intel_runtime_pm_put(&i915->runtime_pm, wakeref); dsb->id = DSB1; - dsb->vma = vma; dsb->crtc = crtc; - dsb->cmd_buf = buf; dsb->size = size / 4; /* in dwords */ dsb->free_pos = 0; dsb->ins_start_offset = 0; @@ -536,6 +510,6 @@ out: */ void intel_dsb_cleanup(struct intel_dsb *dsb) { - i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP); + intel_dsb_buffer_cleanup(&dsb->dsb_buf); kfree(dsb); } diff --git a/drivers/gpu/drm/i915/display/intel_dsb_buffer.c b/drivers/gpu/drm/i915/display/intel_dsb_buffer.c new file mode 100644 index 0000000000..c77d48bda2 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dsb_buffer.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2023, Intel Corporation. + */ + +#include "gem/i915_gem_internal.h" +#include "gem/i915_gem_lmem.h" +#include "i915_drv.h" +#include "i915_vma.h" +#include "intel_display_types.h" +#include "intel_dsb_buffer.h" + +u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf) +{ + return i915_ggtt_offset(dsb_buf->vma); +} + +void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val) +{ + dsb_buf->cmd_buf[idx] = val; +} + +u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx) +{ + return dsb_buf->cmd_buf[idx]; +} + +void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size) +{ + WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf)); + + memset(&dsb_buf->cmd_buf[idx], val, size); +} + +bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + u32 *buf; + + if (HAS_LMEM(i915)) { + obj = i915_gem_object_create_lmem(i915, PAGE_ALIGN(size), + I915_BO_ALLOC_CONTIGUOUS); + if (IS_ERR(obj)) + return false; + } else { + obj = i915_gem_object_create_internal(i915, PAGE_ALIGN(size)); + if (IS_ERR(obj)) + return false; + + i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); + } + + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); + if (IS_ERR(vma)) { + i915_gem_object_put(obj); + return false; + } + + buf = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC); + if (IS_ERR(buf)) { + i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP); + return false; + } + + dsb_buf->vma = vma; + dsb_buf->cmd_buf = buf; + dsb_buf->buf_size = size; + + return true; +} + +void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf) +{ + i915_vma_unpin_and_release(&dsb_buf->vma, I915_VMA_RELEASE_MAP); +} + +void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf) +{ + i915_gem_object_flush_map(dsb_buf->vma->obj); +} diff --git a/drivers/gpu/drm/i915/display/intel_dsb_buffer.h b/drivers/gpu/drm/i915/display/intel_dsb_buffer.h new file mode 100644 index 0000000000..425acd3939 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dsb_buffer.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright © 2023 Intel Corporation + */ + +#ifndef _INTEL_DSB_BUFFER_H +#define _INTEL_DSB_BUFFER_H + +#include + +struct intel_crtc; +struct i915_vma; + +struct intel_dsb_buffer { + u32 *cmd_buf; + struct i915_vma *vma; + size_t buf_size; +}; + +u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf); +void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val); +u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx); +void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size); +bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, + size_t size); +void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf); +void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf); + +#endif diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index 24b2cbcfc1..a5d7fc8418 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -55,43 +55,6 @@ #define MIPI_VIRTUAL_CHANNEL_SHIFT 1 #define MIPI_PORT_SHIFT 3 -/* base offsets for gpio pads */ -#define VLV_GPIO_NC_0_HV_DDI0_HPD 0x4130 -#define VLV_GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120 -#define VLV_GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110 -#define VLV_GPIO_NC_3_PANEL0_VDDEN 0x4140 -#define VLV_GPIO_NC_4_PANEL0_BKLTEN 0x4150 -#define VLV_GPIO_NC_5_PANEL0_BKLTCTL 0x4160 -#define VLV_GPIO_NC_6_HV_DDI1_HPD 0x4180 -#define VLV_GPIO_NC_7_HV_DDI1_DDC_SDA 0x4190 -#define VLV_GPIO_NC_8_HV_DDI1_DDC_SCL 0x4170 -#define VLV_GPIO_NC_9_PANEL1_VDDEN 0x4100 -#define VLV_GPIO_NC_10_PANEL1_BKLTEN 0x40E0 -#define VLV_GPIO_NC_11_PANEL1_BKLTCTL 0x40F0 - -#define VLV_GPIO_PCONF0(base_offset) (base_offset) -#define VLV_GPIO_PAD_VAL(base_offset) ((base_offset) + 8) - -struct gpio_map { - u16 base_offset; - bool init; -}; - -static struct gpio_map vlv_gpio_table[] = { - { VLV_GPIO_NC_0_HV_DDI0_HPD }, - { VLV_GPIO_NC_1_HV_DDI0_DDC_SDA }, - { VLV_GPIO_NC_2_HV_DDI0_DDC_SCL }, - { VLV_GPIO_NC_3_PANEL0_VDDEN }, - { VLV_GPIO_NC_4_PANEL0_BKLTEN }, - { VLV_GPIO_NC_5_PANEL0_BKLTCTL }, - { VLV_GPIO_NC_6_HV_DDI1_HPD }, - { VLV_GPIO_NC_7_HV_DDI1_DDC_SDA }, - { VLV_GPIO_NC_8_HV_DDI1_DDC_SCL }, - { VLV_GPIO_NC_9_PANEL1_VDDEN }, - { VLV_GPIO_NC_10_PANEL1_BKLTEN }, - { VLV_GPIO_NC_11_PANEL1_BKLTCTL }, -}; - struct i2c_adapter_lookup { u16 slave_addr; struct intel_dsi *intel_dsi; @@ -103,19 +66,6 @@ struct i2c_adapter_lookup { #define CHV_GPIO_IDX_START_SW 100 #define CHV_GPIO_IDX_START_SE 198 -#define CHV_VBT_MAX_PINS_PER_FMLY 15 - -#define CHV_GPIO_PAD_CFG0(f, i) (0x4400 + (f) * 0x400 + (i) * 8) -#define CHV_GPIO_GPIOEN (1 << 15) -#define CHV_GPIO_GPIOCFG_GPIO (0 << 8) -#define CHV_GPIO_GPIOCFG_GPO (1 << 8) -#define CHV_GPIO_GPIOCFG_GPI (2 << 8) -#define CHV_GPIO_GPIOCFG_HIZ (3 << 8) -#define CHV_GPIO_GPIOTXSTATE(state) ((!!(state)) << 1) - -#define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4) -#define CHV_GPIO_CFGLOCK (1 << 31) - /* ICL DSI Display GPIO Pins */ #define ICL_GPIO_DDSP_HPD_A 0 #define ICL_GPIO_L_VDDEN_1 1 @@ -142,7 +92,7 @@ static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi, if (seq_port) { if (intel_dsi->ports & BIT(PORT_B)) return PORT_B; - else if (intel_dsi->ports & BIT(PORT_C)) + if (intel_dsi->ports & BIT(PORT_C)) return PORT_C; } @@ -243,75 +193,93 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data) return data; } -static void vlv_exec_gpio(struct intel_connector *connector, - u8 gpio_source, u8 gpio_index, bool value) +static void soc_gpio_set_value(struct intel_connector *connector, u8 gpio_index, + const char *con_id, u8 idx, bool value) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct gpio_map *map; - u16 pconf0, padval; - u32 tmp; - u8 port; - - if (gpio_index >= ARRAY_SIZE(vlv_gpio_table)) { - drm_dbg_kms(&dev_priv->drm, "unknown gpio index %u\n", - gpio_index); - return; + /* XXX: this table is a quick ugly hack. */ + static struct gpio_desc *soc_gpio_table[U8_MAX + 1]; + struct gpio_desc *gpio_desc = soc_gpio_table[gpio_index]; + + if (gpio_desc) { + gpiod_set_value(gpio_desc, value); + } else { + gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, con_id, idx, + value ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW); + if (IS_ERR(gpio_desc)) { + drm_err(&dev_priv->drm, + "GPIO index %u request failed (%pe)\n", + gpio_index, gpio_desc); + return; + } + + soc_gpio_table[gpio_index] = gpio_desc; } +} - map = &vlv_gpio_table[gpio_index]; +static void soc_opaque_gpio_set_value(struct intel_connector *connector, + u8 gpio_index, const char *chip, + const char *con_id, u8 idx, bool value) +{ + struct gpiod_lookup_table *lookup; - if (connector->panel.vbt.dsi.seq_version >= 3) { - /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */ - port = IOSF_PORT_GPIO_NC; - } else { - if (gpio_source == 0) { - port = IOSF_PORT_GPIO_NC; - } else if (gpio_source == 1) { + lookup = kzalloc(struct_size(lookup, table, 2), GFP_KERNEL); + if (!lookup) + return; + + lookup->dev_id = "0000:00:02.0"; + lookup->table[0] = + GPIO_LOOKUP_IDX(chip, idx, con_id, idx, GPIO_ACTIVE_HIGH); + + gpiod_add_lookup_table(lookup); + + soc_gpio_set_value(connector, gpio_index, con_id, idx, value); + + gpiod_remove_lookup_table(lookup); + kfree(lookup); +} + +static void vlv_gpio_set_value(struct intel_connector *connector, + u8 gpio_source, u8 gpio_index, bool value) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + + /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */ + if (connector->panel.vbt.dsi.seq_version < 3) { + if (gpio_source == 1) { drm_dbg_kms(&dev_priv->drm, "SC gpio not supported\n"); return; - } else { + } + if (gpio_source > 1) { drm_dbg_kms(&dev_priv->drm, "unknown gpio source %u\n", gpio_source); return; } } - pconf0 = VLV_GPIO_PCONF0(map->base_offset); - padval = VLV_GPIO_PAD_VAL(map->base_offset); - - vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_GPIO)); - if (!map->init) { - /* FIXME: remove constant below */ - vlv_iosf_sb_write(dev_priv, port, pconf0, 0x2000CC00); - map->init = true; - } - - tmp = 0x4 | value; - vlv_iosf_sb_write(dev_priv, port, padval, tmp); - vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO)); + soc_opaque_gpio_set_value(connector, gpio_index, + "INT33FC:01", "Panel N", gpio_index, value); } -static void chv_exec_gpio(struct intel_connector *connector, - u8 gpio_source, u8 gpio_index, bool value) +static void chv_gpio_set_value(struct intel_connector *connector, + u8 gpio_source, u8 gpio_index, bool value) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - u16 cfg0, cfg1; - u16 family_num; - u8 port; if (connector->panel.vbt.dsi.seq_version >= 3) { if (gpio_index >= CHV_GPIO_IDX_START_SE) { /* XXX: it's unclear whether 255->57 is part of SE. */ - gpio_index -= CHV_GPIO_IDX_START_SE; - port = CHV_IOSF_PORT_GPIO_SE; + soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:03", "Panel SE", + gpio_index - CHV_GPIO_IDX_START_SE, value); } else if (gpio_index >= CHV_GPIO_IDX_START_SW) { - gpio_index -= CHV_GPIO_IDX_START_SW; - port = CHV_IOSF_PORT_GPIO_SW; + soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:00", "Panel SW", + gpio_index - CHV_GPIO_IDX_START_SW, value); } else if (gpio_index >= CHV_GPIO_IDX_START_E) { - gpio_index -= CHV_GPIO_IDX_START_E; - port = CHV_IOSF_PORT_GPIO_E; + soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:02", "Panel E", + gpio_index - CHV_GPIO_IDX_START_E, value); } else { - port = CHV_IOSF_PORT_GPIO_N; + soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:01", "Panel N", + gpio_index - CHV_GPIO_IDX_START_N, value); } } else { /* XXX: The spec is unclear about CHV GPIO on seq v2 */ @@ -328,56 +296,15 @@ static void chv_exec_gpio(struct intel_connector *connector, return; } - port = CHV_IOSF_PORT_GPIO_N; - } - - family_num = gpio_index / CHV_VBT_MAX_PINS_PER_FMLY; - gpio_index = gpio_index % CHV_VBT_MAX_PINS_PER_FMLY; - - cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index); - cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index); - - vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_GPIO)); - vlv_iosf_sb_write(dev_priv, port, cfg1, 0); - vlv_iosf_sb_write(dev_priv, port, cfg0, - CHV_GPIO_GPIOEN | CHV_GPIO_GPIOCFG_GPO | - CHV_GPIO_GPIOTXSTATE(value)); - vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO)); -} - -static void bxt_exec_gpio(struct intel_connector *connector, - u8 gpio_source, u8 gpio_index, bool value) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - /* XXX: this table is a quick ugly hack. */ - static struct gpio_desc *bxt_gpio_table[U8_MAX + 1]; - struct gpio_desc *gpio_desc = bxt_gpio_table[gpio_index]; - - if (!gpio_desc) { - gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, - NULL, gpio_index, - value ? GPIOD_OUT_LOW : - GPIOD_OUT_HIGH); - - if (IS_ERR_OR_NULL(gpio_desc)) { - drm_err(&dev_priv->drm, - "GPIO index %u request failed (%ld)\n", - gpio_index, PTR_ERR(gpio_desc)); - return; - } - - bxt_gpio_table[gpio_index] = gpio_desc; + soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:01", "Panel N", + gpio_index - CHV_GPIO_IDX_START_N, value); } - - gpiod_set_value(gpio_desc, value); } -static void icl_exec_gpio(struct intel_connector *connector, - u8 gpio_source, u8 gpio_index, bool value) +static void bxt_gpio_set_value(struct intel_connector *connector, + u8 gpio_index, bool value) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - - drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n"); + soc_gpio_set_value(connector, gpio_index, NULL, gpio_index, value); } enum { @@ -462,44 +389,45 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv, static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) { struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *i915 = to_i915(dev); struct intel_connector *connector = intel_dsi->attached_connector; - u8 gpio_source, gpio_index = 0, gpio_number; + u8 gpio_source = 0, gpio_index = 0, gpio_number; bool value; - bool native = DISPLAY_VER(dev_priv) >= 11; + int size; + bool native = DISPLAY_VER(i915) >= 11; - if (connector->panel.vbt.dsi.seq_version >= 3) - gpio_index = *data++; + if (connector->panel.vbt.dsi.seq_version >= 3) { + size = 3; - gpio_number = *data++; + gpio_index = data[0]; + gpio_number = data[1]; + value = data[2] & BIT(0); - /* gpio source in sequence v2 only */ - if (connector->panel.vbt.dsi.seq_version == 2) - gpio_source = (*data >> 1) & 3; - else - gpio_source = 0; + if (connector->panel.vbt.dsi.seq_version >= 4 && data[2] & BIT(1)) + native = false; + } else { + size = 2; - if (connector->panel.vbt.dsi.seq_version >= 4 && *data & BIT(1)) - native = false; + gpio_number = data[0]; + value = data[1] & BIT(0); - /* pull up/down */ - value = *data++ & 1; + if (connector->panel.vbt.dsi.seq_version == 2) + gpio_source = (data[1] >> 1) & 3; + } - drm_dbg_kms(&dev_priv->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n", + drm_dbg_kms(&i915->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n", gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value)); if (native) - icl_native_gpio_set_value(dev_priv, gpio_number, value); - else if (DISPLAY_VER(dev_priv) >= 11) - icl_exec_gpio(connector, gpio_source, gpio_index, value); - else if (IS_VALLEYVIEW(dev_priv)) - vlv_exec_gpio(connector, gpio_source, gpio_number, value); - else if (IS_CHERRYVIEW(dev_priv)) - chv_exec_gpio(connector, gpio_source, gpio_number, value); - else - bxt_exec_gpio(connector, gpio_source, gpio_index, value); - - return data; + icl_native_gpio_set_value(i915, gpio_number, value); + else if (DISPLAY_VER(i915) >= 9) + bxt_gpio_set_value(connector, gpio_index, value); + else if (IS_VALLEYVIEW(i915)) + vlv_gpio_set_value(connector, gpio_source, gpio_number, value); + else if (IS_CHERRYVIEW(i915)) + chv_gpio_set_value(connector, gpio_source, gpio_number, value); + + return data + size; } #ifdef CONFIG_ACPI @@ -658,6 +586,7 @@ static const fn_mipi_elem_exec exec_elem[] = { */ static const char * const seq_name[] = { + [MIPI_SEQ_END] = "MIPI_SEQ_END", [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET", [MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP", [MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON", @@ -673,10 +602,10 @@ static const char * const seq_name[] = { static const char *sequence_name(enum mipi_seq seq_id) { - if (seq_id < ARRAY_SIZE(seq_name) && seq_name[seq_id]) + if (seq_id < ARRAY_SIZE(seq_name)) return seq_name[seq_id]; - else - return "(unknown)"; + + return "(unknown)"; } static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi, @@ -707,13 +636,10 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi, if (connector->panel.vbt.dsi.seq_version >= 3) data += 4; - while (1) { + while (*data != MIPI_SEQ_ELEM_END) { u8 operation_byte = *data++; u8 operation_size = 0; - if (operation_byte == MIPI_SEQ_ELEM_END) - break; - if (operation_byte < ARRAY_SIZE(exec_elem)) mipi_elem_exec = exec_elem[operation_byte]; else @@ -873,36 +799,34 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) * multiply by 100 to preserve remainder */ if (intel_dsi->video_mode == BURST_MODE) { - if (mipi_config->target_burst_mode_freq) { - u32 bitrate = intel_dsi_bitrate(intel_dsi); - - /* - * Sometimes the VBT contains a slightly lower clock, - * then the bitrate we have calculated, in this case - * just replace it with the calculated bitrate. - */ - if (mipi_config->target_burst_mode_freq < bitrate && - intel_fuzzy_clock_check( - mipi_config->target_burst_mode_freq, - bitrate)) - mipi_config->target_burst_mode_freq = bitrate; - - if (mipi_config->target_burst_mode_freq < bitrate) { - drm_err(&dev_priv->drm, - "Burst mode freq is less than computed\n"); - return false; - } + u32 bitrate; - burst_mode_ratio = DIV_ROUND_UP( - mipi_config->target_burst_mode_freq * 100, - bitrate); + if (mipi_config->target_burst_mode_freq == 0) { + drm_err(&dev_priv->drm, "Burst mode target is not set\n"); + return false; + } - intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100); - } else { - drm_err(&dev_priv->drm, - "Burst mode target is not set\n"); + bitrate = intel_dsi_bitrate(intel_dsi); + + /* + * Sometimes the VBT contains a slightly lower clock, then + * the bitrate we have calculated, in this case just replace it + * with the calculated bitrate. + */ + if (mipi_config->target_burst_mode_freq < bitrate && + intel_fuzzy_clock_check(mipi_config->target_burst_mode_freq, + bitrate)) + mipi_config->target_burst_mode_freq = bitrate; + + if (mipi_config->target_burst_mode_freq < bitrate) { + drm_err(&dev_priv->drm, "Burst mode freq is less than computed\n"); return false; } + + burst_mode_ratio = + DIV_ROUND_UP(mipi_config->target_burst_mode_freq * 100, bitrate); + + intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100); } else burst_mode_ratio = 100; @@ -964,6 +888,7 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on) struct intel_connector *connector = intel_dsi->attached_connector; struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW; + struct gpiod_lookup_table *gpiod_lookup_table = NULL; bool want_backlight_gpio = false; bool want_panel_gpio = false; struct pinctrl *pinctrl; @@ -971,12 +896,12 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on) if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && mipi_config->pwm_blc == PPS_BLC_PMIC) { - gpiod_add_lookup_table(&pmic_panel_gpio_table); + gpiod_lookup_table = &pmic_panel_gpio_table; want_panel_gpio = true; } if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) { - gpiod_add_lookup_table(&soc_panel_gpio_table); + gpiod_lookup_table = &soc_panel_gpio_table; want_panel_gpio = true; want_backlight_gpio = true; @@ -993,8 +918,11 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on) "Failed to set pinmux to PWM\n"); } + if (gpiod_lookup_table) + gpiod_add_lookup_table(gpiod_lookup_table); + if (want_panel_gpio) { - intel_dsi->gpio_panel = gpiod_get(dev->dev, "panel", flags); + intel_dsi->gpio_panel = devm_gpiod_get(dev->dev, "panel", flags); if (IS_ERR(intel_dsi->gpio_panel)) { drm_err(&dev_priv->drm, "Failed to own gpio for panel control\n"); @@ -1004,38 +932,14 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on) if (want_backlight_gpio) { intel_dsi->gpio_backlight = - gpiod_get(dev->dev, "backlight", flags); + devm_gpiod_get(dev->dev, "backlight", flags); if (IS_ERR(intel_dsi->gpio_backlight)) { drm_err(&dev_priv->drm, "Failed to own gpio for backlight control\n"); intel_dsi->gpio_backlight = NULL; } } -} -void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi) -{ - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_connector *connector = intel_dsi->attached_connector; - struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; - - if (intel_dsi->gpio_panel) { - gpiod_put(intel_dsi->gpio_panel); - intel_dsi->gpio_panel = NULL; - } - - if (intel_dsi->gpio_backlight) { - gpiod_put(intel_dsi->gpio_backlight); - intel_dsi->gpio_backlight = NULL; - } - - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && - mipi_config->pwm_blc == PPS_BLC_PMIC) - gpiod_remove_lookup_table(&pmic_panel_gpio_table); - - if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) { - pinctrl_unregister_mappings(soc_pwm_pinctrl_map); - gpiod_remove_lookup_table(&soc_panel_gpio_table); - } + if (gpiod_lookup_table) + gpiod_remove_lookup_table(gpiod_lookup_table); } diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.h b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h index 468d873fab..3462fcc760 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.h +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h @@ -13,7 +13,6 @@ struct intel_dsi; bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id); void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on); -void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi); void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, enum mipi_seq seq_id); void intel_dsi_log_params(struct intel_dsi *intel_dsi); diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index 646f367a13..0c0144eaa8 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -4,7 +4,6 @@ */ #include -#include #include #include @@ -15,6 +14,7 @@ #include "intel_display_types.h" #include "intel_dpt.h" #include "intel_fb.h" +#include "intel_fb_bo.h" #include "intel_frontbuffer.h" #define check_array_bounds(i915, a, i) drm_WARN_ON(&(i915)->drm, (i) >= ARRAY_SIZE(a)) @@ -301,6 +301,33 @@ lookup_format_info(const struct drm_format_info formats[], return NULL; } +unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) +{ + const struct intel_modifier_desc *md; + u8 tiling_caps; + + md = lookup_modifier_or_null(fb_modifier); + if (!md) + return I915_TILING_NONE; + + tiling_caps = lookup_modifier_or_null(fb_modifier)->plane_caps & + INTEL_PLANE_CAP_TILING_MASK; + + switch (tiling_caps) { + case INTEL_PLANE_CAP_TILING_Y: + return I915_TILING_Y; + case INTEL_PLANE_CAP_TILING_X: + return I915_TILING_X; + case INTEL_PLANE_CAP_TILING_4: + case INTEL_PLANE_CAP_TILING_Yf: + case INTEL_PLANE_CAP_TILING_NONE: + return I915_TILING_NONE; + default: + MISSING_CASE(tiling_caps); + return I915_TILING_NONE; + } +} + /** * intel_fb_get_format_info: Get a modifier specific format information * @cmd: FB add command structure @@ -737,26 +764,6 @@ intel_fb_align_height(const struct drm_framebuffer *fb, return ALIGN(height, tile_height); } -static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) -{ - u8 tiling_caps = lookup_modifier(fb_modifier)->plane_caps & - INTEL_PLANE_CAP_TILING_MASK; - - switch (tiling_caps) { - case INTEL_PLANE_CAP_TILING_Y: - return I915_TILING_Y; - case INTEL_PLANE_CAP_TILING_X: - return I915_TILING_X; - case INTEL_PLANE_CAP_TILING_4: - case INTEL_PLANE_CAP_TILING_Yf: - case INTEL_PLANE_CAP_TILING_NONE: - return I915_TILING_NONE; - default: - MISSING_CASE(tiling_caps); - return I915_TILING_NONE; - } -} - bool intel_fb_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier) { return HAS_DPT(i915) && modifier != DRM_FORMAT_MOD_LINEAR; @@ -764,7 +771,7 @@ bool intel_fb_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier) bool intel_fb_uses_dpt(const struct drm_framebuffer *fb) { - return fb && to_i915(fb->dev)->params.enable_dpt && + return to_i915(fb->dev)->display.params.enable_dpt && intel_fb_modifier_uses_dpt(to_i915(fb->dev), fb->modifier); } @@ -1670,10 +1677,10 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer * max_size = max(max_size, offset + size); } - if (mul_u32_u32(max_size, tile_size) > obj->base.size) { + if (mul_u32_u32(max_size, tile_size) > intel_bo_to_drm_bo(obj)->size) { drm_dbg_kms(&i915->drm, "fb too big for bo (need %llu bytes, have %zu bytes)\n", - mul_u32_u32(max_size, tile_size), obj->base.size); + mul_u32_u32(max_size, tile_size), intel_bo_to_drm_bo(obj)->size); return -EINVAL; } @@ -1894,6 +1901,8 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) intel_frontbuffer_put(intel_fb->frontbuffer); + intel_fb_bo_framebuffer_fini(intel_fb_obj(fb)); + kfree(intel_fb); } @@ -1902,7 +1911,7 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, unsigned int *handle) { struct drm_i915_gem_object *obj = intel_fb_obj(fb); - struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct drm_i915_private *i915 = to_i915(intel_bo_to_drm_bo(obj)->dev); if (i915_gem_object_is_userptr(obj)) { drm_dbg(&i915->drm, @@ -1910,7 +1919,7 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, return -EINVAL; } - return drm_gem_handle_create(file, &obj->base, handle); + return drm_gem_handle_create(file, intel_bo_to_drm_bo(obj), handle); } struct frontbuffer_fence_cb { @@ -1943,10 +1952,10 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, if (!atomic_read(&front->bits)) return 0; - if (dma_resv_test_signaled(obj->base.resv, dma_resv_usage_rw(false))) + if (dma_resv_test_signaled(intel_bo_to_drm_bo(obj)->resv, dma_resv_usage_rw(false))) goto flush; - ret = dma_resv_get_singleton(obj->base.resv, dma_resv_usage_rw(false), + ret = dma_resv_get_singleton(intel_bo_to_drm_bo(obj)->resv, dma_resv_usage_rw(false), &fence); if (ret || !fence) goto flush; @@ -1988,61 +1997,30 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, struct drm_i915_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd) { - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + struct drm_i915_private *dev_priv = to_i915(intel_bo_to_drm_bo(obj)->dev); struct drm_framebuffer *fb = &intel_fb->base; u32 max_stride; - unsigned int tiling, stride; int ret = -EINVAL; int i; - intel_fb->frontbuffer = intel_frontbuffer_get(obj); - if (!intel_fb->frontbuffer) - return -ENOMEM; - - i915_gem_object_lock(obj, NULL); - tiling = i915_gem_object_get_tiling(obj); - stride = i915_gem_object_get_stride(obj); - i915_gem_object_unlock(obj); + ret = intel_fb_bo_framebuffer_init(intel_fb, obj, mode_cmd); + if (ret) + return ret; - if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { - /* - * If there's a fence, enforce that - * the fb modifier and tiling mode match. - */ - if (tiling != I915_TILING_NONE && - tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { - drm_dbg_kms(&dev_priv->drm, - "tiling_mode doesn't match fb modifier\n"); - goto err; - } - } else { - if (tiling == I915_TILING_X) { - mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; - } else if (tiling == I915_TILING_Y) { - drm_dbg_kms(&dev_priv->drm, - "No Y tiling for legacy addfb\n"); - goto err; - } + intel_fb->frontbuffer = intel_frontbuffer_get(obj); + if (!intel_fb->frontbuffer) { + ret = -ENOMEM; + goto err; } + ret = -EINVAL; if (!drm_any_plane_has_format(&dev_priv->drm, mode_cmd->pixel_format, mode_cmd->modifier[0])) { drm_dbg_kms(&dev_priv->drm, "unsupported pixel format %p4cc / modifier 0x%llx\n", &mode_cmd->pixel_format, mode_cmd->modifier[0]); - goto err; - } - - /* - * gen2/3 display engine uses the fence if present, - * so the tiling mode must match the fb modifier exactly. - */ - if (DISPLAY_VER(dev_priv) < 4 && - tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { - drm_dbg_kms(&dev_priv->drm, - "tiling_mode must match fb modifier exactly on gen2/3\n"); - goto err; + goto err_frontbuffer_put; } max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format, @@ -2053,18 +2031,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? "tiled" : "linear", mode_cmd->pitches[0], max_stride); - goto err; - } - - /* - * If there's a fence, enforce that - * the fb pitch and fence stride match. - */ - if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) { - drm_dbg_kms(&dev_priv->drm, - "pitch (%d) must match tiling stride (%d)\n", - mode_cmd->pitches[0], stride); - goto err; + goto err_frontbuffer_put; } /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ @@ -2072,7 +2039,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, drm_dbg_kms(&dev_priv->drm, "plane 0 offset (0x%08x) must be 0\n", mode_cmd->offsets[0]); - goto err; + goto err_frontbuffer_put; } drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd); @@ -2083,7 +2050,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, if (mode_cmd->handles[i] != mode_cmd->handles[0]) { drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n", i); - goto err; + goto err_frontbuffer_put; } stride_alignment = intel_fb_stride_alignment(fb, i); @@ -2091,7 +2058,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, drm_dbg_kms(&dev_priv->drm, "plane %d pitch (%d) must be at least %u byte aligned\n", i, fb->pitches[i], stride_alignment); - goto err; + goto err_frontbuffer_put; } if (intel_fb_is_gen12_ccs_aux_plane(fb, i)) { @@ -2102,16 +2069,16 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, "ccs aux plane %d pitch (%d) must be %d\n", i, fb->pitches[i], ccs_aux_stride); - goto err; + goto err_frontbuffer_put; } } - fb->obj[i] = &obj->base; + fb->obj[i] = intel_bo_to_drm_bo(obj); } ret = intel_fill_fb_info(dev_priv, intel_fb); if (ret) - goto err; + goto err_frontbuffer_put; if (intel_fb_uses_dpt(fb)) { struct i915_address_space *vm; @@ -2120,7 +2087,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, if (IS_ERR(vm)) { drm_dbg_kms(&dev_priv->drm, "failed to create DPT\n"); ret = PTR_ERR(vm); - goto err; + goto err_frontbuffer_put; } intel_fb->dpt_vm = vm; @@ -2137,8 +2104,10 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, err_free_dpt: if (intel_fb_uses_dpt(fb)) intel_dpt_destroy(intel_fb->dpt_vm); -err: +err_frontbuffer_put: intel_frontbuffer_put(intel_fb->frontbuffer); +err: + intel_fb_bo_framebuffer_fini(obj); return ret; } @@ -2150,23 +2119,14 @@ intel_user_framebuffer_create(struct drm_device *dev, struct drm_framebuffer *fb; struct drm_i915_gem_object *obj; struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; - struct drm_i915_private *i915; - - obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]); - if (!obj) - return ERR_PTR(-ENOENT); - - /* object is backed with LMEM for discrete */ - i915 = to_i915(obj->base.dev); - if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) { - /* object is "remote", not in local memory */ - i915_gem_object_put(obj); - drm_dbg_kms(&i915->drm, "framebuffer must reside in local memory\n"); - return ERR_PTR(-EREMOTE); - } + struct drm_i915_private *i915 = to_i915(dev); + + obj = intel_fb_bo_lookup_valid_bo(i915, filp, &mode_cmd); + if (IS_ERR(obj)) + return ERR_CAST(obj); fb = intel_framebuffer_create(obj, &mode_cmd); - i915_gem_object_put(obj); + drm_gem_object_put(intel_bo_to_drm_bo(obj)); return fb; } diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h index e85167d6bc..23db6628f5 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.h +++ b/drivers/gpu/drm/i915/display/intel_fb.h @@ -95,4 +95,6 @@ intel_user_framebuffer_create(struct drm_device *dev, bool intel_fb_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier); bool intel_fb_uses_dpt(const struct drm_framebuffer *fb); +unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier); + #endif /* __INTEL_FB_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.c b/drivers/gpu/drm/i915/display/intel_fb_bo.c new file mode 100644 index 0000000000..4be09541e5 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_fb_bo.c @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#include + +#include "gem/i915_gem_object.h" + +#include "i915_drv.h" +#include "intel_fb.h" +#include "intel_fb_bo.h" + +void intel_fb_bo_framebuffer_fini(struct drm_i915_gem_object *obj) +{ + /* Nothing to do for i915 */ +} + +int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, + struct drm_i915_gem_object *obj, + struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + unsigned int tiling, stride; + + i915_gem_object_lock(obj, NULL); + tiling = i915_gem_object_get_tiling(obj); + stride = i915_gem_object_get_stride(obj); + i915_gem_object_unlock(obj); + + if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { + /* + * If there's a fence, enforce that + * the fb modifier and tiling mode match. + */ + if (tiling != I915_TILING_NONE && + tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { + drm_dbg_kms(&i915->drm, + "tiling_mode doesn't match fb modifier\n"); + return -EINVAL; + } + } else { + if (tiling == I915_TILING_X) { + mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; + } else if (tiling == I915_TILING_Y) { + drm_dbg_kms(&i915->drm, + "No Y tiling for legacy addfb\n"); + return -EINVAL; + } + } + + /* + * gen2/3 display engine uses the fence if present, + * so the tiling mode must match the fb modifier exactly. + */ + if (DISPLAY_VER(i915) < 4 && + tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { + drm_dbg_kms(&i915->drm, + "tiling_mode must match fb modifier exactly on gen2/3\n"); + return -EINVAL; + } + + /* + * If there's a fence, enforce that + * the fb pitch and fence stride match. + */ + if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) { + drm_dbg_kms(&i915->drm, + "pitch (%d) must match tiling stride (%d)\n", + mode_cmd->pitches[0], stride); + return -EINVAL; + } + + return 0; +} + +struct drm_i915_gem_object * +intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, + struct drm_file *filp, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct drm_i915_gem_object *obj; + + obj = i915_gem_object_lookup(filp, mode_cmd->handles[0]); + if (!obj) + return ERR_PTR(-ENOENT); + + /* object is backed with LMEM for discrete */ + if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) { + /* object is "remote", not in local memory */ + i915_gem_object_put(obj); + drm_dbg_kms(&i915->drm, "framebuffer must reside in local memory\n"); + return ERR_PTR(-EREMOTE); + } + + return obj; +} diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.h b/drivers/gpu/drm/i915/display/intel_fb_bo.h new file mode 100644 index 0000000000..232bf898b0 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_fb_bo.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __INTEL_FB_BO_H__ +#define __INTEL_FB_BO_H__ + +struct drm_file; +struct drm_mode_fb_cmd2; +struct drm_i915_gem_object; +struct drm_i915_private; +struct intel_framebuffer; + +void intel_fb_bo_framebuffer_fini(struct drm_i915_gem_object *obj); + +int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, + struct drm_i915_gem_object *obj, + struct drm_mode_fb_cmd2 *mode_cmd); + +struct drm_i915_gem_object * +intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, + struct drm_file *filp, + const struct drm_mode_fb_cmd2 *user_mode_cmd); + +#endif diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c index 7b42aef37d..b6df9baf48 100644 --- a/drivers/gpu/drm/i915/display/intel_fb_pin.c +++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c @@ -255,6 +255,16 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state) return PTR_ERR(vma); plane_state->ggtt_vma = vma; + + /* + * Pre-populate the dma address before we enter the vblank + * evade critical section as i915_gem_object_get_dma_address() + * will trigger might_sleep() even if it won't actually sleep, + * which is the case when the fb has already been pinned. + */ + if (phys_cursor) + plane_state->phys_dma_addr = + i915_gem_object_get_dma_address(intel_fb_obj(fb), 0); } else { struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 4820d21cc9..f17a1afb49 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -608,6 +608,7 @@ static u32 ivb_dpfc_ctl(struct intel_fbc *fbc) static void ivb_fbc_activate(struct intel_fbc *fbc) { struct drm_i915_private *i915 = fbc->i915; + u32 dpfc_ctl; if (DISPLAY_VER(i915) >= 10) glk_fbc_program_cfb_stride(fbc); @@ -617,8 +618,13 @@ static void ivb_fbc_activate(struct intel_fbc *fbc) if (intel_gt_support_legacy_fencing(to_gt(i915))) snb_fbc_program_fence(fbc); + /* wa_14019417088 Alternative WA*/ + dpfc_ctl = ivb_dpfc_ctl(fbc); + if (DISPLAY_VER(i915) >= 20) + intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl); + intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), - DPFC_CTL_EN | ivb_dpfc_ctl(fbc)); + DPFC_CTL_EN | dpfc_ctl); } static bool ivb_fbc_is_compressing(struct intel_fbc *fbc) @@ -1022,10 +1028,13 @@ static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state * struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); unsigned int effective_w, effective_h, max_w, max_h; - if (DISPLAY_VER(i915) >= 10) { + if (DISPLAY_VER(i915) >= 11) { + max_w = 8192; + max_h = 4096; + } else if (DISPLAY_VER(i915) >= 10) { max_w = 5120; max_h = 4096; - } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) { + } else if (DISPLAY_VER(i915) >= 7) { max_w = 4096; max_h = 4096; } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) { @@ -1044,6 +1053,31 @@ static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state * return effective_w <= max_w && effective_h <= max_h; } +static bool intel_fbc_plane_size_valid(const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + unsigned int w, h, max_w, max_h; + + if (DISPLAY_VER(i915) >= 10) { + max_w = 5120; + max_h = 4096; + } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) { + max_w = 4096; + max_h = 4096; + } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) { + max_w = 4096; + max_h = 2048; + } else { + max_w = 2048; + max_h = 1536; + } + + w = drm_rect_width(&plane_state->uapi.src) >> 16; + h = drm_rect_height(&plane_state->uapi.src) >> 16; + + return w <= max_w && h <= max_h; +} + static bool i8xx_fbc_tiling_valid(const struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->hw.fb; @@ -1174,7 +1208,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, return 0; } - if (!i915->params.enable_fbc) { + if (!i915->display.params.enable_fbc) { plane_state->no_fbc_reason = "disabled per module param or by default"; return 0; } @@ -1201,7 +1235,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, * Recommendation is to keep this combination disabled * Bspec: 50422 HSD: 14010260002 */ - if (DISPLAY_VER(i915) >= 12 && crtc_state->has_psr2) { + if (IS_DISPLAY_VER(i915, 12, 14) && crtc_state->has_psr2) { plane_state->no_fbc_reason = "PSR2 enabled"; return 0; } @@ -1241,11 +1275,16 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, return 0; } - if (!intel_fbc_hw_tracking_covers_screen(plane_state)) { + if (!intel_fbc_plane_size_valid(plane_state)) { plane_state->no_fbc_reason = "plane size too big"; return 0; } + if (!intel_fbc_hw_tracking_covers_screen(plane_state)) { + plane_state->no_fbc_reason = "surface size too big"; + return 0; + } + /* * Work around a problem on GEN9+ HW, where enabling FBC on a plane * having a Y offset that isn't divisible by 4 causes FIFO underrun @@ -1751,8 +1790,8 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915) */ static int intel_sanitize_fbc_option(struct drm_i915_private *i915) { - if (i915->params.enable_fbc >= 0) - return !!i915->params.enable_fbc; + if (i915->display.params.enable_fbc >= 0) + return !!i915->display.params.enable_fbc; if (!HAS_FBC(i915)) return 0; @@ -1824,9 +1863,9 @@ void intel_fbc_init(struct drm_i915_private *i915) if (need_fbc_vtd_wa(i915)) DISPLAY_RUNTIME_INFO(i915)->fbc_mask = 0; - i915->params.enable_fbc = intel_sanitize_fbc_option(i915); + i915->display.params.enable_fbc = intel_sanitize_fbc_option(i915); drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n", - i915->params.enable_fbc); + i915->display.params.enable_fbc); for_each_fbc_id(i915, fbc_id) i915->display.fbc[fbc_id] = intel_fbc_create(i915, fbc_id); diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index 31d0d695d5..99894a855e 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -43,7 +43,6 @@ #include #include -#include "gem/i915_gem_lmem.h" #include "gem/i915_gem_mman.h" #include "i915_drv.h" @@ -51,6 +50,7 @@ #include "intel_fb.h" #include "intel_fb_pin.h" #include "intel_fbdev.h" +#include "intel_fbdev_fb.h" #include "intel_frontbuffer.h" struct intel_fbdev { @@ -146,65 +146,6 @@ static const struct fb_ops intelfb_ops = { .fb_mmap = intel_fbdev_mmap, }; -static int intelfb_alloc(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) -{ - struct intel_fbdev *ifbdev = to_intel_fbdev(helper); - struct drm_framebuffer *fb; - struct drm_device *dev = helper->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_mode_fb_cmd2 mode_cmd = {}; - struct drm_i915_gem_object *obj; - int size; - - /* we don't do packed 24bpp */ - if (sizes->surface_bpp == 24) - sizes->surface_bpp = 32; - - mode_cmd.width = sizes->surface_width; - mode_cmd.height = sizes->surface_height; - - mode_cmd.pitches[0] = ALIGN(mode_cmd.width * - DIV_ROUND_UP(sizes->surface_bpp, 8), 64); - mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, - sizes->surface_depth); - - size = mode_cmd.pitches[0] * mode_cmd.height; - size = PAGE_ALIGN(size); - - obj = ERR_PTR(-ENODEV); - if (HAS_LMEM(dev_priv)) { - obj = i915_gem_object_create_lmem(dev_priv, size, - I915_BO_ALLOC_CONTIGUOUS | - I915_BO_ALLOC_USER); - } else { - /* - * If the FB is too big, just don't use it since fbdev is not very - * important and we should probably use that space with FBC or other - * features. - * - * Also skip stolen on MTL as Wa_22018444074 mitigation. - */ - if (!(IS_METEORLAKE(dev_priv)) && size * 2 < dev_priv->dsm.usable_size) - obj = i915_gem_object_create_stolen(dev_priv, size); - if (IS_ERR(obj)) - obj = i915_gem_object_create_shmem(dev_priv, size); - } - - if (IS_ERR(obj)) { - drm_err(&dev_priv->drm, "failed to allocate framebuffer (%pe)\n", obj); - return PTR_ERR(obj); - } - - fb = intel_framebuffer_create(obj, &mode_cmd); - i915_gem_object_put(obj); - if (IS_ERR(fb)) - return PTR_ERR(fb); - - ifbdev->fb = to_intel_framebuffer(fb); - return 0; -} - static int intelfb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { @@ -213,7 +154,6 @@ static int intelfb_create(struct drm_fb_helper *helper, struct drm_device *dev = helper->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); - struct i915_ggtt *ggtt = to_gt(dev_priv)->ggtt; const struct i915_gtt_view view = { .type = I915_GTT_VIEW_NORMAL, }; @@ -222,9 +162,7 @@ static int intelfb_create(struct drm_fb_helper *helper, struct i915_vma *vma; unsigned long flags = 0; bool prealloc = false; - void __iomem *vaddr; struct drm_i915_gem_object *obj; - struct i915_gem_ww_ctx ww; int ret; mutex_lock(&ifbdev->hpd_lock); @@ -245,12 +183,13 @@ static int intelfb_create(struct drm_fb_helper *helper, intel_fb = ifbdev->fb = NULL; } if (!intel_fb || drm_WARN_ON(dev, !intel_fb_obj(&intel_fb->base))) { + struct drm_framebuffer *fb; drm_dbg_kms(&dev_priv->drm, "no BIOS fb, allocating a new one\n"); - ret = intelfb_alloc(helper, sizes); - if (ret) - return ret; - intel_fb = ifbdev->fb; + fb = intel_fbdev_fb_alloc(helper, sizes); + if (IS_ERR(fb)) + return PTR_ERR(fb); + intel_fb = ifbdev->fb = to_intel_framebuffer(fb); } else { drm_dbg_kms(&dev_priv->drm, "re-using BIOS fb\n"); prealloc = true; @@ -283,49 +222,18 @@ static int intelfb_create(struct drm_fb_helper *helper, info->fbops = &intelfb_ops; obj = intel_fb_obj(&intel_fb->base); - if (i915_gem_object_is_lmem(obj)) { - struct intel_memory_region *mem = obj->mm.region; - - /* Use fbdev's framebuffer from lmem for discrete */ - info->fix.smem_start = - (unsigned long)(mem->io_start + - i915_gem_object_get_dma_address(obj, 0)); - info->fix.smem_len = obj->base.size; - } else { - /* Our framebuffer is the entirety of fbdev's system memory */ - info->fix.smem_start = - (unsigned long)(ggtt->gmadr.start + i915_ggtt_offset(vma)); - info->fix.smem_len = vma->size; - } - - for_i915_gem_ww(&ww, ret, false) { - ret = i915_gem_object_lock(vma->obj, &ww); - - if (ret) - continue; - - vaddr = i915_vma_pin_iomap(vma); - if (IS_ERR(vaddr)) { - drm_err(&dev_priv->drm, - "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr); - ret = PTR_ERR(vaddr); - continue; - } - } + ret = intel_fbdev_fb_fill_info(dev_priv, info, obj, vma); if (ret) goto out_unpin; - info->screen_base = vaddr; - info->screen_size = vma->size; - drm_fb_helper_fill_info(info, &ifbdev->helper, sizes); /* If the object is shmemfs backed, it will have given us zeroed pages. * If the object is stolen however, it will be full of whatever * garbage was left in there. */ - if (!i915_gem_object_is_shmem(vma->obj) && !prealloc) + if (!i915_gem_object_is_shmem(obj) && !prealloc) memset_io(info->screen_base, 0, info->screen_size); /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ @@ -424,12 +332,12 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, continue; } - if (obj->base.size > max_size) { + if (intel_bo_to_drm_bo(obj)->size > max_size) { drm_dbg_kms(&i915->drm, "found possible fb from [PLANE:%d:%s]\n", plane->base.base.id, plane->base.name); fb = to_intel_framebuffer(plane_state->uapi.fb); - max_size = obj->base.size; + max_size = intel_bo_to_drm_bo(obj)->size; } } diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c new file mode 100644 index 0000000000..717c3a3237 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#include + +#include "gem/i915_gem_lmem.h" + +#include "i915_drv.h" +#include "intel_display_types.h" +#include "intel_fbdev_fb.h" + +struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct drm_framebuffer *fb; + struct drm_device *dev = helper->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_mode_fb_cmd2 mode_cmd = {}; + struct drm_i915_gem_object *obj; + int size; + + /* we don't do packed 24bpp */ + if (sizes->surface_bpp == 24) + sizes->surface_bpp = 32; + + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + + mode_cmd.pitches[0] = ALIGN(mode_cmd.width * + DIV_ROUND_UP(sizes->surface_bpp, 8), 64); + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, + sizes->surface_depth); + + size = mode_cmd.pitches[0] * mode_cmd.height; + size = PAGE_ALIGN(size); + + obj = ERR_PTR(-ENODEV); + if (HAS_LMEM(dev_priv)) { + obj = i915_gem_object_create_lmem(dev_priv, size, + I915_BO_ALLOC_CONTIGUOUS | + I915_BO_ALLOC_USER); + } else { + /* + * If the FB is too big, just don't use it since fbdev is not very + * important and we should probably use that space with FBC or other + * features. + * + * Also skip stolen on MTL as Wa_22018444074 mitigation. + */ + if (!(IS_METEORLAKE(dev_priv)) && size * 2 < dev_priv->dsm.usable_size) + obj = i915_gem_object_create_stolen(dev_priv, size); + if (IS_ERR(obj)) + obj = i915_gem_object_create_shmem(dev_priv, size); + } + + if (IS_ERR(obj)) { + drm_err(&dev_priv->drm, "failed to allocate framebuffer (%pe)\n", obj); + return ERR_PTR(-ENOMEM); + } + + fb = intel_framebuffer_create(obj, &mode_cmd); + i915_gem_object_put(obj); + + return fb; +} + +int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info, + struct drm_i915_gem_object *obj, struct i915_vma *vma) +{ + struct i915_gem_ww_ctx ww; + void __iomem *vaddr; + int ret; + + if (i915_gem_object_is_lmem(obj)) { + struct intel_memory_region *mem = obj->mm.region; + + /* Use fbdev's framebuffer from lmem for discrete */ + info->fix.smem_start = + (unsigned long)(mem->io_start + + i915_gem_object_get_dma_address(obj, 0)); + info->fix.smem_len = obj->base.size; + } else { + struct i915_ggtt *ggtt = to_gt(i915)->ggtt; + + /* Our framebuffer is the entirety of fbdev's system memory */ + info->fix.smem_start = + (unsigned long)(ggtt->gmadr.start + i915_ggtt_offset(vma)); + info->fix.smem_len = vma->size; + } + + for_i915_gem_ww(&ww, ret, false) { + ret = i915_gem_object_lock(vma->obj, &ww); + + if (ret) + continue; + + vaddr = i915_vma_pin_iomap(vma); + if (IS_ERR(vaddr)) { + drm_err(&i915->drm, + "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr); + ret = PTR_ERR(vaddr); + continue; + } + } + + if (ret) + return ret; + + info->screen_base = vaddr; + info->screen_size = intel_bo_to_drm_bo(obj)->size; + + return 0; +} diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h new file mode 100644 index 0000000000..a395b2c65d --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_FBDEV_FB_H__ +#define __INTEL_FBDEV_FB_H__ + +struct drm_fb_helper; +struct drm_fb_helper_surface_size; +struct drm_i915_gem_object; +struct drm_i915_private; +struct fb_info; +struct i915_vma; + +struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes); +int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info, + struct drm_i915_gem_object *obj, struct i915_vma *vma); + +#endif diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c index e6429dfebe..295a0f24eb 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.c +++ b/drivers/gpu/drm/i915/display/intel_fdi.c @@ -10,6 +10,7 @@ #include "intel_crtc.h" #include "intel_ddi.h" #include "intel_de.h" +#include "intel_dp.h" #include "intel_display_types.h" #include "intel_fdi.h" #include "intel_fdi_regs.h" @@ -338,8 +339,11 @@ int ilk_fdi_compute_config(struct intel_crtc *crtc, pipe_config->fdi_lanes = lane; - intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, - link_bw, &pipe_config->fdi_m_n, false); + intel_link_compute_m_n(to_bpp_x16(pipe_config->pipe_bpp), + lane, fdi_dotclock, + link_bw, + intel_dp_bw_fec_overhead(false), + &pipe_config->fdi_m_n); return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index 40d7b6f3f4..e9e4dcf345 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -899,7 +899,6 @@ int intel_gmbus_setup(struct drm_i915_private *i915) } bus->adapter.owner = THIS_MODULE; - bus->adapter.class = I2C_CLASS_DDC; snprintf(bus->adapter.name, sizeof(bus->adapter.name), "i915 gmbus %s", gmbus_pin->name); diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index c89da3568e..39b3f7c0c7 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -923,7 +923,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector) return 0; } -static int _intel_hdcp_enable(struct intel_connector *connector) +static int intel_hdcp1_enable(struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; @@ -1058,7 +1058,7 @@ static int intel_hdcp_check_link(struct intel_connector *connector) goto out; } - ret = _intel_hdcp_enable(connector); + ret = intel_hdcp1_enable(connector); if (ret) { drm_err(&i915->drm, "Failed to enable hdcp (%d)\n", ret); intel_hdcp_update_value(connector, @@ -2324,10 +2324,10 @@ intel_hdcp_set_streams(struct intel_digital_port *dig_port, return 0; } -int intel_hdcp_enable(struct intel_atomic_state *state, - struct intel_encoder *encoder, - const struct intel_crtc_state *pipe_config, - const struct drm_connector_state *conn_state) +static int _intel_hdcp_enable(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_connector *connector = @@ -2388,7 +2388,7 @@ int intel_hdcp_enable(struct intel_atomic_state *state, */ if (ret && intel_hdcp_capable(connector) && hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) { - ret = _intel_hdcp_enable(connector); + ret = intel_hdcp1_enable(connector); } if (!ret) { @@ -2404,6 +2404,27 @@ int intel_hdcp_enable(struct intel_atomic_state *state, return ret; } +void intel_hdcp_enable(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_connector *connector = + to_intel_connector(conn_state->connector); + struct intel_hdcp *hdcp = &connector->hdcp; + + /* + * Enable hdcp if it's desired or if userspace is enabled and + * driver set its state to undesired + */ + if (conn_state->content_protection == + DRM_MODE_CONTENT_PROTECTION_DESIRED || + (conn_state->content_protection == + DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value == + DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) + _intel_hdcp_enable(state, encoder, crtc_state, conn_state); +} + int intel_hdcp_disable(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); @@ -2491,7 +2512,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, } if (desired_and_not_enabled || content_protection_type_changed) - intel_hdcp_enable(state, encoder, crtc_state, conn_state); + _intel_hdcp_enable(state, encoder, crtc_state, conn_state); } void intel_hdcp_component_fini(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h index 5997c52a09..a9c784fd9b 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp.h @@ -28,10 +28,10 @@ void intel_hdcp_atomic_check(struct drm_connector *connector, int intel_hdcp_init(struct intel_connector *connector, struct intel_digital_port *dig_port, const struct intel_hdcp_shim *hdcp_shim); -int intel_hdcp_enable(struct intel_atomic_state *state, - struct intel_encoder *encoder, - const struct intel_crtc_state *pipe_config, - const struct drm_connector_state *conn_state); +void intel_hdcp_enable(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state); int intel_hdcp_disable(struct intel_connector *connector); void intel_hdcp_update_pipe(struct intel_atomic_state *state, struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index bfa456fa7d..39e4f5f7c8 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -3034,16 +3034,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port, "HDCP init failed, skipping.\n"); } - /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written - * 0xd. Failure to do so will result in spurious interrupts being - * generated on the port when a cable is not attached. - */ - if (IS_G45(dev_priv)) { - u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); - intel_de_write(dev_priv, PEG_BAND_GAP_DATA, - (temp & ~0xf) | 0xd); - } - cec_fill_conn_info_from_drm(&conn_info, connector); intel_hdmi->cec_notifier = diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c index f07047e9cb..04f62f27ad 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c @@ -1361,11 +1361,24 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) bxt_hpd_detection_setup(dev_priv); } +static void g45_hpd_peg_band_gap_wa(struct drm_i915_private *i915) +{ + /* + * For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written + * 0xd. Failure to do so will result in spurious interrupts being + * generated on the port when a cable is not attached. + */ + intel_de_rmw(i915, PEG_BAND_GAP_DATA, 0xf, 0xd); +} + static void i915_hpd_enable_detection(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); u32 hotplug_en = hpd_mask_i915[encoder->hpd_pin]; + if (IS_G45(i915)) + g45_hpd_peg_band_gap_wa(i915); + /* HPD sense and interrupt enable are one and the same */ i915_hotplug_interrupt_update(i915, hotplug_en, hotplug_en); } @@ -1389,6 +1402,9 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; + if (IS_G45(dev_priv)) + g45_hpd_peg_band_gap_wa(dev_priv); + /* Ignore TV since it's buggy */ i915_hotplug_interrupt_update_locked(dev_priv, HOTPLUG_INT_EN_MASK | diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c index c5eb5f2425..9c6d35a405 100644 --- a/drivers/gpu/drm/i915/display/intel_link_bw.c +++ b/drivers/gpu/drm/i915/display/intel_link_bw.c @@ -7,6 +7,7 @@ #include "intel_atomic.h" #include "intel_display_types.h" +#include "intel_dp_mst.h" #include "intel_fdi.h" #include "intel_link_bw.h" @@ -21,6 +22,7 @@ void intel_link_bw_init_limits(struct drm_i915_private *i915, struct intel_link_ { enum pipe pipe; + limits->force_fec_pipes = 0; limits->bpp_limit_reached_pipes = 0; for_each_pipe(i915, pipe) limits->max_bpp_x16[pipe] = INT_MAX; @@ -53,11 +55,11 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state, struct drm_i915_private *i915 = to_i915(state->base.dev); enum pipe max_bpp_pipe = INVALID_PIPE; struct intel_crtc *crtc; - int max_bpp = 0; + int max_bpp_x16 = 0; for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) { struct intel_crtc_state *crtc_state; - int link_bpp; + int link_bpp_x16; if (limits->bpp_limit_reached_pipes & BIT(crtc->pipe)) continue; @@ -68,7 +70,7 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state, return PTR_ERR(crtc_state); if (crtc_state->dsc.compression_enable) - link_bpp = crtc_state->dsc.compressed_bpp; + link_bpp_x16 = crtc_state->dsc.compressed_bpp_x16; else /* * TODO: for YUV420 the actual link bpp is only half @@ -76,10 +78,10 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state, * is based on the pipe bpp value, set the actual link bpp * limit here once the MST BW allocation is fixed. */ - link_bpp = crtc_state->pipe_bpp; + link_bpp_x16 = to_bpp_x16(crtc_state->pipe_bpp); - if (link_bpp > max_bpp) { - max_bpp = link_bpp; + if (link_bpp_x16 > max_bpp_x16) { + max_bpp_x16 = link_bpp_x16; max_bpp_pipe = crtc->pipe; } } @@ -87,7 +89,7 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state, if (max_bpp_pipe == INVALID_PIPE) return -ENOSPC; - limits->max_bpp_x16[max_bpp_pipe] = to_bpp_x16(max_bpp) - 1; + limits->max_bpp_x16[max_bpp_pipe] = max_bpp_x16 - 1; return intel_modeset_pipes_in_mask_early(state, reason, BIT(max_bpp_pipe)); @@ -143,6 +145,10 @@ static int check_all_link_config(struct intel_atomic_state *state, /* TODO: Check additional shared display link configurations like MST */ int ret; + ret = intel_dp_mst_atomic_check_link(state, limits); + if (ret) + return ret; + ret = intel_fdi_atomic_check_link(state, limits); if (ret) return ret; @@ -158,6 +164,12 @@ assert_link_limit_change_valid(struct drm_i915_private *i915, bool bpps_changed = false; enum pipe pipe; + /* FEC can't be forced off after it was forced on. */ + if (drm_WARN_ON(&i915->drm, + (old_limits->force_fec_pipes & new_limits->force_fec_pipes) != + old_limits->force_fec_pipes)) + return false; + for_each_pipe(i915, pipe) { /* The bpp limit can only decrease. */ if (drm_WARN_ON(&i915->drm, @@ -172,7 +184,9 @@ assert_link_limit_change_valid(struct drm_i915_private *i915, /* At least one limit must change. */ if (drm_WARN_ON(&i915->drm, - !bpps_changed)) + !bpps_changed && + new_limits->force_fec_pipes == + old_limits->force_fec_pipes)) return false; return true; diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.h b/drivers/gpu/drm/i915/display/intel_link_bw.h index e07df22a77..2cf57307cc 100644 --- a/drivers/gpu/drm/i915/display/intel_link_bw.h +++ b/drivers/gpu/drm/i915/display/intel_link_bw.h @@ -16,6 +16,7 @@ struct intel_atomic_state; struct intel_crtc_state; struct intel_link_bw_limits { + u8 force_fec_pipes; u8 bpp_limit_reached_pipes; /* in 1/16 bpp units */ int max_bpp_x16[I915_MAX_PIPES]; diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index bcbdd1984f..221f5c6c87 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -185,7 +185,7 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv, /* Convert from 100ms to 100us units */ pps->t4 = val * 1000; - if (DISPLAY_VER(dev_priv) <= 4 && + if (DISPLAY_VER(dev_priv) < 5 && pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) { drm_dbg_kms(&dev_priv->drm, "Panel power timings uninitialized, " @@ -799,8 +799,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) unsigned int val; /* use the module option value if specified */ - if (i915->params.lvds_channel_mode > 0) - return i915->params.lvds_channel_mode == 2; + if (i915->display.params.lvds_channel_mode > 0) + return i915->display.params.lvds_channel_mode == 2; /* single channel LVDS is limited to 112 MHz */ if (fixed_mode->clock > 112999) diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c index b8f43efb0a..caeca3a844 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c @@ -769,8 +769,9 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915) drm_connector_list_iter_begin(&i915->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { + struct intel_crtc_state *crtc_state = NULL; + if (connector->get_hw_state(connector)) { - struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; connector->base.dpms = DRM_MODE_DPMS_ON; @@ -796,6 +797,10 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915) connector->base.dpms = DRM_MODE_DPMS_OFF; connector->base.encoder = NULL; } + + if (connector->sync_state) + connector->sync_state(connector, crtc_state); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] hw state readout: %s\n", connector->base.base.id, connector->base.name, diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c index 5e1c2c7804..076298a8d4 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c @@ -244,7 +244,7 @@ void intel_modeset_verify_crtc(struct intel_atomic_state *state, verify_crtc_state(state, crtc); intel_shared_dpll_state_verify(state, crtc); intel_mpllb_state_verify(state, crtc); - intel_c10pll_state_verify(state, crtc); + intel_cx0pll_state_verify(state, crtc); } void intel_modeset_verify_disabled(struct intel_atomic_state *state) diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index 84078fb82b..1ce785db6a 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -841,7 +841,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv) { struct intel_opregion *opregion = &dev_priv->display.opregion; const struct firmware *fw = NULL; - const char *name = dev_priv->params.vbt_firmware; + const char *name = dev_priv->display.params.vbt_firmware; int ret; if (!name || !*name) diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 483beedac5..0d8e5320a4 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -46,8 +46,8 @@ bool intel_panel_use_ssc(struct drm_i915_private *i915) { - if (i915->params.panel_use_ssc >= 0) - return i915->params.panel_use_ssc != 0; + if (i915->display.params.panel_use_ssc >= 0) + return i915->display.params.panel_use_ssc != 0; return i915->display.vbt.lvds_use_ssc && !intel_has_quirk(i915, QUIRK_LVDS_SSC_DISABLE); } diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c index 866786e6b3..baf679759e 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.c +++ b/drivers/gpu/drm/i915/display/intel_pch_display.c @@ -8,6 +8,7 @@ #include "intel_crt.h" #include "intel_de.h" #include "intel_display_types.h" +#include "intel_dpll.h" #include "intel_fdi.h" #include "intel_fdi_regs.h" #include "intel_lvds.h" diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index 73f0f1714b..a8fa3a2099 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -90,7 +90,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum pipe pipe = intel_dp->pps.pps_pipe; bool pll_enabled, release_cl_override = false; - enum dpio_phy phy = DPIO_PHY(pipe); + enum dpio_phy phy = vlv_pipe_to_phy(pipe); enum dpio_channel ch = vlv_pipe_to_channel(pipe); u32 DP; diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 614b4534ef..925776ba13 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -29,6 +29,7 @@ #include "i915_reg.h" #include "intel_atomic.h" #include "intel_crtc.h" +#include "intel_ddi.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dp.h" @@ -172,6 +173,15 @@ * irrelevant for normal operation. */ +bool intel_encoder_can_psr(struct intel_encoder *encoder) +{ + if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST) + return CAN_PSR(enc_to_intel_dp(encoder)) || + CAN_PANEL_REPLAY(enc_to_intel_dp(encoder)); + else + return false; +} + static bool psr_global_enabled(struct intel_dp *intel_dp) { struct intel_connector *connector = intel_dp->attached_connector; @@ -179,9 +189,9 @@ static bool psr_global_enabled(struct intel_dp *intel_dp) switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) { case I915_PSR_DEBUG_DEFAULT: - if (i915->params.enable_psr == -1) + if (i915->display.params.enable_psr == -1) return connector->panel.vbt.psr.enable; - return i915->params.enable_psr; + return i915->display.params.enable_psr; case I915_PSR_DEBUG_DISABLE: return false; default: @@ -198,7 +208,7 @@ static bool psr2_global_enabled(struct intel_dp *intel_dp) case I915_PSR_DEBUG_FORCE_PSR1: return false; default: - if (i915->params.enable_psr == 1) + if (i915->display.params.enable_psr == 1) return false; return true; } @@ -474,27 +484,41 @@ exit: intel_dp->psr.su_y_granularity = y; } -void intel_psr_init_dpcd(struct intel_dp *intel_dp) +static void _panel_replay_init_dpcd(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = - to_i915(dp_to_dig_port(intel_dp)->base.base.dev); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + u8 pr_dpcd = 0; - drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, - sizeof(intel_dp->psr_dpcd)); + intel_dp->psr.sink_panel_replay_support = false; + drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP, &pr_dpcd); - if (!intel_dp->psr_dpcd[0]) + if (!(pr_dpcd & DP_PANEL_REPLAY_SUPPORT)) { + drm_dbg_kms(&i915->drm, + "Panel replay is not supported by panel\n"); return; - drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n", + } + + drm_dbg_kms(&i915->drm, + "Panel replay is supported by panel\n"); + intel_dp->psr.sink_panel_replay_support = true; +} + +static void _psr_init_dpcd(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = + to_i915(dp_to_dig_port(intel_dp)->base.base.dev); + + drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n", intel_dp->psr_dpcd[0]); if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "PSR support not currently available for this panel\n"); return; } if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "Panel lacks power state control, PSR cannot be enabled\n"); return; } @@ -503,8 +527,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) intel_dp->psr.sink_sync_latency = intel_dp_get_sink_sync_latency(intel_dp); - if (DISPLAY_VER(dev_priv) >= 9 && - (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) { + if (DISPLAY_VER(i915) >= 9 && + intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) { bool y_req = intel_dp->psr_dpcd[1] & DP_PSR2_SU_Y_COORDINATE_REQUIRED; bool alpm = intel_dp_get_alpm_status(intel_dp); @@ -521,14 +545,25 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) * GTC first. */ intel_dp->psr.sink_psr2_support = y_req && alpm; - drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n", + drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n", intel_dp->psr.sink_psr2_support ? "" : "not "); + } +} - if (intel_dp->psr.sink_psr2_support) { - intel_dp->psr.colorimetry_support = - intel_dp_get_colorimetry_status(intel_dp); - intel_dp_get_su_granularity(intel_dp); - } +void intel_psr_init_dpcd(struct intel_dp *intel_dp) +{ + _panel_replay_init_dpcd(intel_dp); + + drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, + sizeof(intel_dp->psr_dpcd)); + + if (intel_dp->psr_dpcd[0]) + _psr_init_dpcd(intel_dp); + + if (intel_dp->psr.sink_psr2_support) { + intel_dp->psr.colorimetry_support = + intel_dp_get_colorimetry_status(intel_dp); + intel_dp_get_su_granularity(intel_dp); } } @@ -574,8 +609,11 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u8 dpcd_val = DP_PSR_ENABLE; - /* Enable ALPM at sink for psr2 */ + if (intel_dp->psr.panel_replay_enabled) + return; + if (intel_dp->psr.psr2_enabled) { + /* Enable ALPM at sink for psr2 */ drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, DP_ALPM_ENABLE | DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE); @@ -592,6 +630,9 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp) if (intel_dp->psr.req_psr2_sdp_prior_scanline) dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE; + if (intel_dp->psr.entry_setup_frames > 0) + dpcd_val |= DP_PSR_FRAME_CAPTURE; + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); @@ -606,7 +647,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) if (DISPLAY_VER(dev_priv) >= 11) val |= EDP_PSR_TP4_TIME_0us; - if (dev_priv->params.psr_safest_params) { + if (dev_priv->display.params.psr_safest_params) { val |= EDP_PSR_TP1_TIME_2500us; val |= EDP_PSR_TP2_TP3_TIME_2500us; goto check_tp3_sel; @@ -690,6 +731,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) if (DISPLAY_VER(dev_priv) >= 8) val |= EDP_PSR_CRC_ENABLE; + if (DISPLAY_VER(dev_priv) >= 20) + val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames); + intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val); } @@ -700,7 +744,7 @@ static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 val = 0; - if (dev_priv->params.psr_safest_params) + if (dev_priv->display.params.psr_safest_params) return EDP_PSR2_TP2_TIME_2500us; if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 && @@ -727,21 +771,49 @@ static int psr2_block_count(struct intel_dp *intel_dp) return psr2_block_count_lines(intel_dp) / 4; } +static u8 frames_before_su_entry(struct intel_dp *intel_dp) +{ + u8 frames_before_su_entry; + + frames_before_su_entry = max_t(u8, + intel_dp->psr.sink_sync_latency + 1, + 2); + + /* Entry setup frames must be at least 1 less than frames before SU entry */ + if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry) + frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1; + + return frames_before_su_entry; +} + +static void dg2_activate_panel_replay(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + intel_de_rmw(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), + 0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME); + + intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0, + TRANS_DP2_PANEL_REPLAY_ENABLE); +} + static void hsw_activate_psr2(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 val = EDP_PSR2_ENABLE; + u32 psr_val = 0; val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp)); - if (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv)) + if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv)) val |= EDP_SU_TRACK_ENABLE; - if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12) + if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13) val |= EDP_Y_COORDINATE_ENABLE; - val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2)); + val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp)); + val |= intel_psr2_get_tp_time(intel_dp); if (DISPLAY_VER(dev_priv) >= 12) { @@ -785,6 +857,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) if (intel_dp->psr.req_psr2_sdp_prior_scanline) val |= EDP_PSR2_SU_SDP_SCANLINE; + if (DISPLAY_VER(dev_priv) >= 20) + psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames); + if (intel_dp->psr.psr2_sel_fetch_enabled) { u32 tmp; @@ -798,7 +873,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is * recommending keep this bit unset while PSR2 is enabled. */ - intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), 0); + intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val); intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val); } @@ -816,13 +891,13 @@ transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_trans return false; } -static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate) +static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state) { - if (!cstate || !cstate->hw.active) + if (!crtc_state->hw.active) return 0; return DIV_ROUND_UP(1000 * 1000, - drm_mode_vrefresh(&cstate->hw.adjusted_mode)); + drm_mode_vrefresh(&crtc_state->hw.adjusted_mode)); } static void psr2_program_idle_frames(struct intel_dp *intel_dp, @@ -943,7 +1018,7 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp, { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - if (!dev_priv->params.enable_psr2_sel_fetch && + if (!dev_priv->display.params.enable_psr2_sel_fetch && intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) { drm_dbg_kms(&dev_priv->drm, "PSR2 sel fetch not enabled, disabled by parameter\n"); @@ -1019,7 +1094,7 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d return true; /* Not supported <13 / Wa_22012279113:adl-p */ - if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b) + if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b) return false; crtc_state->req_psr2_sdp_prior_scanline = true; @@ -1056,7 +1131,7 @@ static bool _compute_psr2_wake_times(struct intel_dp *intel_dp, fast_wake_lines > max_wake_lines) return false; - if (i915->params.psr_safest_params) + if (i915->display.params.psr_safest_params) io_wake_lines = fast_wake_lines = max_wake_lines; /* According to Bspec lower limit should be set as 7 lines. */ @@ -1066,6 +1141,39 @@ static bool _compute_psr2_wake_times(struct intel_dp *intel_dp, return true; } +static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp, + const struct drm_display_mode *adjusted_mode) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd); + int entry_setup_frames = 0; + + if (psr_setup_time < 0) { + drm_dbg_kms(&i915->drm, + "PSR condition failed: Invalid PSR setup time (0x%02x)\n", + intel_dp->psr_dpcd[1]); + return -ETIME; + } + + if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > + adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { + if (DISPLAY_VER(i915) >= 20) { + /* setup entry frames can be up to 3 frames */ + entry_setup_frames = 1; + drm_dbg_kms(&i915->drm, + "PSR setup entry frames %d\n", + entry_setup_frames); + } else { + drm_dbg_kms(&i915->drm, + "PSR condition failed: PSR setup time (%d us) too long\n", + psr_setup_time); + return -ETIME; + } + } + + return entry_setup_frames; +} + static bool intel_psr2_config_valid(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { @@ -1113,7 +1221,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, * over PSR2. */ if (crtc_state->dsc.compression_enable && - (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))) { + (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) { drm_dbg_kms(&dev_priv->drm, "PSR2 cannot be enabled since DSC is enabled\n"); return false; @@ -1206,24 +1314,42 @@ unsupported: return false; } -void intel_psr_compute_config(struct intel_dp *intel_dp, - struct intel_crtc_state *crtc_state, - struct drm_connector_state *conn_state) +static bool _psr_compute_config(struct intel_dp *intel_dp, + struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; - int psr_setup_time; + const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + int entry_setup_frames; /* * Current PSR panels don't work reliably with VRR enabled * So if VRR is enabled, do not enable PSR. */ if (crtc_state->vrr.enable) - return; + return false; if (!CAN_PSR(intel_dp)) - return; + return false; + + entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode); + + if (entry_setup_frames >= 0) { + intel_dp->psr.entry_setup_frames = entry_setup_frames; + } else { + drm_dbg_kms(&dev_priv->drm, + "PSR condition failed: PSR setup timing not met\n"); + return false; + } + + return true; +} + +void intel_psr_compute_config(struct intel_dp *intel_dp, + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; if (!psr_global_enabled(intel_dp)) { drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n"); @@ -1242,23 +1368,25 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, return; } - psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd); - if (psr_setup_time < 0) { + /* + * FIXME figure out what is wrong with PSR+bigjoiner and + * fix it. Presumably something related to the fact that + * PSR is a transcoder level feature. + */ + if (crtc_state->bigjoiner_pipes) { drm_dbg_kms(&dev_priv->drm, - "PSR condition failed: Invalid PSR setup time (0x%02x)\n", - intel_dp->psr_dpcd[1]); + "PSR disabled due to bigjoiner\n"); return; } - if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > - adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { - drm_dbg_kms(&dev_priv->drm, - "PSR condition failed: PSR setup time (%d us) too long\n", - psr_setup_time); + if (CAN_PANEL_REPLAY(intel_dp)) + crtc_state->has_panel_replay = true; + else + crtc_state->has_psr = _psr_compute_config(intel_dp, crtc_state); + + if (!(crtc_state->has_panel_replay || crtc_state->has_psr)) return; - } - crtc_state->has_psr = true; crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); @@ -1279,18 +1407,23 @@ void intel_psr_get_config(struct intel_encoder *encoder, return; intel_dp = &dig_port->dp; - if (!CAN_PSR(intel_dp)) + if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) return; mutex_lock(&intel_dp->psr.lock); if (!intel_dp->psr.enabled) goto unlock; - /* - * Not possible to read EDP_PSR/PSR2_CTL registers as it is - * enabled/disabled because of frontbuffer tracking and others. - */ - pipe_config->has_psr = true; + if (intel_dp->psr.panel_replay_enabled) { + pipe_config->has_panel_replay = true; + } else { + /* + * Not possible to read EDP_PSR/PSR2_CTL registers as it is + * enabled/disabled because of frontbuffer tracking and others. + */ + pipe_config->has_psr = true; + } + pipe_config->has_psr2 = intel_dp->psr.psr2_enabled; pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); @@ -1327,8 +1460,10 @@ static void intel_psr_activate(struct intel_dp *intel_dp) lockdep_assert_held(&intel_dp->psr.lock); - /* psr1 and psr2 are mutually exclusive.*/ - if (intel_dp->psr.psr2_enabled) + /* psr1, psr2 and panel-replay are mutually exclusive.*/ + if (intel_dp->psr.panel_replay_enabled) + dg2_activate_panel_replay(intel_dp); + else if (intel_dp->psr.psr2_enabled) hsw_activate_psr2(intel_dp); else hsw_activate_psr1(intel_dp); @@ -1462,12 +1597,10 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, * All supported adlp panels have 1-based X granularity, this may * cause issues if non-supported panels are used. */ - if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0)) - intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 0, - ADLP_1_BASED_X_GRANULARITY); - else if (IS_ALDERLAKE_P(dev_priv)) - intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0, - ADLP_1_BASED_X_GRANULARITY); + if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) || + IS_ALDERLAKE_P(dev_priv)) + intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder), + 0, ADLP_1_BASED_X_GRANULARITY); /* Wa_16012604467:adlp,mtl[a0,b0] */ if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0)) @@ -1518,6 +1651,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp, drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled); intel_dp->psr.psr2_enabled = crtc_state->has_psr2; + intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay; intel_dp->psr.busy_frontbuffer_bits = 0; intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; intel_dp->psr.transcoder = crtc_state->cpu_transcoder; @@ -1533,8 +1667,12 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp, if (!psr_interrupt_error_check(intel_dp)) return; - drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n", - intel_dp->psr.psr2_enabled ? "2" : "1"); + if (intel_dp->psr.panel_replay_enabled) + drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n"); + else + drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n", + intel_dp->psr.psr2_enabled ? "2" : "1"); + intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc); intel_snps_phy_update_psr_power_state(dev_priv, phy, true); intel_psr_enable_sink(intel_dp); @@ -1563,7 +1701,10 @@ static void intel_psr_exit(struct intel_dp *intel_dp) return; } - if (intel_dp->psr.psr2_enabled) { + if (intel_dp->psr.panel_replay_enabled) { + intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), + TRANS_DP2_PANEL_REPLAY_ENABLE, 0); + } else if (intel_dp->psr.psr2_enabled) { tgl_disallow_dc3co_on_psr2_exit(intel_dp); val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder), @@ -1612,8 +1753,11 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) if (!intel_dp->psr.enabled) return; - drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n", - intel_dp->psr.psr2_enabled ? "2" : "1"); + if (intel_dp->psr.panel_replay_enabled) + drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n"); + else + drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n", + intel_dp->psr.psr2_enabled ? "2" : "1"); intel_psr_exit(intel_dp); intel_psr_wait_exit_locked(intel_dp); @@ -1646,6 +1790,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0); intel_dp->psr.enabled = false; + intel_dp->psr.panel_replay_enabled = false; intel_dp->psr.psr2_enabled = false; intel_dp->psr.psr2_sel_fetch_enabled = false; intel_dp->psr.psr2_sel_fetch_cff_enabled = false; @@ -1793,81 +1938,6 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp) intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0); } -void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - enum pipe pipe = plane->pipe; - - if (!crtc_state->enable_psr2_sel_fetch) - return; - - intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0); -} - -void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct drm_i915_private *i915 = to_i915(plane->base.dev); - enum pipe pipe = plane->pipe; - - if (!crtc_state->enable_psr2_sel_fetch) - return; - - if (plane->id == PLANE_CURSOR) - intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id), - plane_state->ctl); - else - intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id), - PLANE_SEL_FETCH_CTL_ENABLE); -} - -void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int color_plane) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - enum pipe pipe = plane->pipe; - const struct drm_rect *clip; - u32 val; - int x, y; - - if (!crtc_state->enable_psr2_sel_fetch) - return; - - if (plane->id == PLANE_CURSOR) - return; - - clip = &plane_state->psr2_sel_fetch_area; - - val = (clip->y1 + plane_state->uapi.dst.y1) << 16; - val |= plane_state->uapi.dst.x1; - intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val); - - x = plane_state->view.color_plane[color_plane].x; - - /* - * From Bspec: UV surface Start Y Position = half of Y plane Y - * start position. - */ - if (!color_plane) - y = plane_state->view.color_plane[color_plane].y + clip->y1; - else - y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2; - - val = y << 16 | x; - - intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id), - val); - - /* Sizes are 0 based */ - val = (drm_rect_height(clip) - 1) << 16; - val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1; - intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val); -} - void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); @@ -2127,8 +2197,19 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, continue; inter = pipe_clip; - if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) + sel_fetch_area = &new_plane_state->psr2_sel_fetch_area; + if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) { + sel_fetch_area->y1 = -1; + sel_fetch_area->y2 = -1; + /* + * if plane sel fetch was previously enabled -> + * disable it + */ + if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0) + crtc_state->update_planes |= BIT(plane->id); + continue; + } if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) { full_update = true; @@ -2217,7 +2298,7 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder; - if (!crtc_state->has_psr) + if (!(crtc_state->has_psr || crtc_state->has_panel_replay)) return; for_each_intel_encoder_mask_with_psr(state->base.dev, encoder, @@ -2703,7 +2784,7 @@ void intel_psr_init(struct intel_dp *intel_dp) struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - if (!HAS_PSR(dev_priv)) + if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv))) return; /* @@ -2721,7 +2802,10 @@ void intel_psr_init(struct intel_dp *intel_dp) return; } - intel_dp->psr.source_support = true; + if (HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp)) + intel_dp->psr.source_panel_replay_support = true; + else + intel_dp->psr.source_support = true; /* Set link_standby x link_off defaults */ if (DISPLAY_VER(dev_priv) < 12) @@ -2738,12 +2822,19 @@ static int psr_get_status_and_error_status(struct intel_dp *intel_dp, { struct drm_dp_aux *aux = &intel_dp->aux; int ret; + unsigned int offset; + + offset = intel_dp->psr.panel_replay_enabled ? + DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS; - ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status); + ret = drm_dp_dpcd_readb(aux, offset, status); if (ret != 1) return ret; - ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status); + offset = intel_dp->psr.panel_replay_enabled ? + DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS; + + ret = drm_dp_dpcd_readb(aux, offset, error_status); if (ret != 1) return ret; @@ -2964,7 +3055,7 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) status = live_status[status_val]; } - seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val); + seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val); } static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) @@ -2977,18 +3068,22 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) bool enabled; u32 val; - seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support)); + seq_printf(m, "Sink support: PSR = %s", + str_yes_no(psr->sink_support)); + if (psr->sink_support) seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]); - seq_puts(m, "\n"); + seq_printf(m, ", Panel Replay = %s\n", str_yes_no(psr->sink_panel_replay_support)); - if (!psr->sink_support) + if (!(psr->sink_support || psr->sink_panel_replay_support)) return 0; wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&psr->lock); - if (psr->enabled) + if (psr->panel_replay_enabled) + status = "Panel Replay Enabled"; + else if (psr->enabled) status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled"; else status = "disabled"; @@ -3001,14 +3096,17 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) goto unlock; } - if (psr->psr2_enabled) { + if (psr->panel_replay_enabled) { + val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder)); + enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE; + } else if (psr->psr2_enabled) { val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)); enabled = val & EDP_PSR2_ENABLE; } else { val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)); enabled = val & EDP_PSR_ENABLE; } - seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", + seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n", str_enabled_disabled(enabled), val); psr_source_status(intel_dp, m); seq_printf(m, "Busy frontbuffer bits: 0x%08x\n", @@ -3146,6 +3244,16 @@ void intel_psr_debugfs_register(struct drm_i915_private *i915) i915, &i915_edp_psr_status_fops); } +static const char *psr_mode_str(struct intel_dp *intel_dp) +{ + if (intel_dp->psr.panel_replay_enabled) + return "PANEL-REPLAY"; + else if (intel_dp->psr.enabled) + return "PSR"; + + return "unknown"; +} + static int i915_psr_sink_status_show(struct seq_file *m, void *data) { struct intel_connector *connector = m->private; @@ -3160,12 +3268,19 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data) "reserved", "sink internal error", }; + static const char * const panel_replay_status[] = { + "Sink device frame is locked to the Source device", + "Sink device is coasting, using the VTotal target", + "Sink device is governing the frame rate (frame rate unlock is granted)", + "Sink device in the process of re-locking with the Source device", + }; const char *str; int ret; u8 status, error_status; + u32 idx; - if (!CAN_PSR(intel_dp)) { - seq_puts(m, "PSR Unsupported\n"); + if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) { + seq_puts(m, "PSR/Panel-Replay Unsupported\n"); return -ENODEV; } @@ -3176,15 +3291,20 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data) if (ret) return ret; - status &= DP_PSR_SINK_STATE_MASK; - if (status < ARRAY_SIZE(sink_status)) - str = sink_status[status]; - else - str = "unknown"; + str = "unknown"; + if (intel_dp->psr.panel_replay_enabled) { + idx = (status & DP_SINK_FRAME_LOCKED_MASK) >> DP_SINK_FRAME_LOCKED_SHIFT; + if (idx < ARRAY_SIZE(panel_replay_status)) + str = panel_replay_status[idx]; + } else if (intel_dp->psr.enabled) { + idx = status & DP_PSR_SINK_STATE_MASK; + if (idx < ARRAY_SIZE(sink_status)) + str = sink_status[idx]; + } - seq_printf(m, "Sink PSR status: 0x%x [%s]\n", status, str); + seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str); - seq_printf(m, "Sink PSR error status: 0x%x", error_status); + seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status); if (error_status & (DP_PSR_RFB_STORAGE_ERROR | DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | @@ -3193,11 +3313,11 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data) else seq_puts(m, "\n"); if (error_status & DP_PSR_RFB_STORAGE_ERROR) - seq_puts(m, "\tPSR RFB storage error\n"); + seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp)); if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) - seq_puts(m, "\tPSR VSC SDP uncorrectable error\n"); + seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp)); if (error_status & DP_PSR_LINK_CRC_ERROR) - seq_puts(m, "\tPSR Link CRC error\n"); + seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp)); return ret; } @@ -3217,13 +3337,16 @@ void intel_psr_connector_debugfs_add(struct intel_connector *connector) struct drm_i915_private *i915 = to_i915(connector->base.dev); struct dentry *root = connector->base.debugfs_entry; - if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) + /* TODO: Add support for MST connectors as well. */ + if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP && + connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) || + connector->mst_port) return; debugfs_create_file("i915_psr_sink_status", 0444, root, connector, &i915_psr_sink_status_fops); - if (HAS_PSR(i915)) + if (HAS_PSR(i915) || HAS_DP20(i915)) debugfs_create_file("i915_psr_status", 0444, root, connector, &i915_psr_status_fops); } diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index bf35f42df6..143e0595c0 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -21,6 +21,13 @@ struct intel_encoder; struct intel_plane; struct intel_plane_state; +#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \ + (intel_dp)->psr.source_support) + +#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \ + (intel_dp)->psr.source_panel_replay_support) + +bool intel_encoder_can_psr(struct intel_encoder *encoder); void intel_psr_init_dpcd(struct intel_dp *intel_dp); void intel_psr_pre_plane_update(struct intel_atomic_state *state, struct intel_crtc *crtc); @@ -48,16 +55,6 @@ bool intel_psr_enabled(struct intel_dp *intel_dp); int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state); -void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int color_plane); -void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state); - -void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state); void intel_psr_pause(struct intel_dp *intel_dp); void intel_psr_resume(struct intel_dp *intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h index d39951383c..efe4306b37 100644 --- a/drivers/gpu/drm/i915/display/intel_psr_regs.h +++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h @@ -35,6 +35,8 @@ #define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES REG_FIELD_PREP(EDP_PSR_MIN_LINK_ENTRY_TIME_MASK, 3) #define EDP_PSR_MAX_SLEEP_TIME_MASK REG_GENMASK(24, 20) #define EDP_PSR_MAX_SLEEP_TIME(x) REG_FIELD_PREP(EDP_PSR_MAX_SLEEP_TIME_MASK, (x)) +#define LNL_EDP_PSR_ENTRY_SETUP_FRAMES_MASK REG_GENMASK(17, 16) +#define LNL_EDP_PSR_ENTRY_SETUP_FRAMES(x) REG_FIELD_PREP(LNL_EDP_PSR_ENTRY_SETUP_FRAMES_MASK, (x)) #define EDP_PSR_SKIP_AUX_EXIT REG_BIT(12) #define EDP_PSR_TP_MASK REG_BIT(11) #define EDP_PSR_TP_TP1_TP2 REG_FIELD_PREP(EDP_PSR_TP_MASK, 0) diff --git a/drivers/gpu/drm/i915/display/intel_qp_tables.c b/drivers/gpu/drm/i915/display/intel_qp_tables.c index 543cdc46aa..600c815e37 100644 --- a/drivers/gpu/drm/i915/display/intel_qp_tables.c +++ b/drivers/gpu/drm/i915/display/intel_qp_tables.c @@ -34,9 +34,6 @@ * These qp tables are as per the C model * and it has the rows pointing to bpps which increment * in steps of 0.5 - * We do not support fractional bpps as of today, - * hence we would skip the fractional bpps during - * our references for qp calclulations. */ static const u8 rc_range_minqp444_8bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_8BPC_MAX_NUM_BPP] = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 312f88d90a..cc978ee6d1 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "i915_drv.h" #include "i915_reg.h" @@ -1787,17 +1788,28 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, intel_sdvo_get_eld(intel_sdvo, pipe_config); } -static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo) +static void intel_sdvo_disable_audio(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { + struct intel_sdvo *intel_sdvo = to_sdvo(encoder); + + if (!old_crtc_state->has_audio) + return; + intel_sdvo_set_audio_state(intel_sdvo, 0); } -static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo, +static void intel_sdvo_enable_audio(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { + struct intel_sdvo *intel_sdvo = to_sdvo(encoder); const u8 *eld = crtc_state->eld; + if (!crtc_state->has_audio) + return; + intel_sdvo_set_audio_state(intel_sdvo, 0); intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD, @@ -1818,9 +1830,6 @@ static void intel_disable_sdvo(struct intel_atomic_state *state, struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); u32 temp; - if (old_crtc_state->has_audio) - intel_sdvo_disable_audio(intel_sdvo); - intel_sdvo_set_active_outputs(intel_sdvo, 0); if (0) intel_sdvo_set_encoder_power_state(intel_sdvo, @@ -1912,9 +1921,6 @@ static void intel_enable_sdvo(struct intel_atomic_state *state, intel_sdvo_set_encoder_power_state(intel_sdvo, DRM_MODE_DPMS_ON); intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo_connector->output_flag); - - if (pipe_config->has_audio) - intel_sdvo_enable_audio(intel_sdvo, pipe_config, conn_state); } static enum drm_mode_status @@ -3317,7 +3323,6 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo_ddc *ddc, ddc->ddc_bus = ddc_bus; ddc->ddc.owner = THIS_MODULE; - ddc->ddc.class = I2C_CLASS_DDC; snprintf(ddc->ddc.name, I2C_NAME_SIZE, "SDVO %c DDC%d", port_name(sdvo->base.port), ddc_bus); ddc->ddc.dev.parent = &pdev->dev; @@ -3396,6 +3401,8 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv, } intel_encoder->pre_enable = intel_sdvo_pre_enable; intel_encoder->enable = intel_enable_sdvo; + intel_encoder->audio_enable = intel_sdvo_enable_audio; + intel_encoder->audio_disable = intel_sdvo_disable_audio; intel_encoder->get_hw_state = intel_sdvo_get_hw_state; intel_encoder->get_config = intel_sdvo_get_config; diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index ce5a73a4cc..bc61e736f9 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -3,7 +3,7 @@ * Copyright © 2019 Intel Corporation */ -#include +#include #include "i915_reg.h" #include "intel_ddi.h" diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 1fb16510f7..d7b440c8ca 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -48,6 +48,11 @@ #include "intel_frontbuffer.h" #include "intel_sprite.h" +static char sprite_name(struct drm_i915_private *i915, enum pipe pipe, int sprite) +{ + return pipe * DISPLAY_RUNTIME_INFO(i915)->num_sprites[pipe] + sprite + 'A'; +} + static void i9xx_plane_linear_gamma(u16 gamma[8]) { /* The points are not evenly spaced. */ @@ -1636,7 +1641,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, 0, plane_funcs, formats, num_formats, modifiers, DRM_PLANE_TYPE_OVERLAY, - "sprite %c", sprite_name(pipe, sprite)); + "sprite %c", sprite_name(dev_priv, pipe, sprite)); kfree(modifiers); if (ret) diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index f64d348a96..dcf05e00e5 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -1030,18 +1030,25 @@ static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enabl __xelpdp_tc_phy_enable_tcss_power(tc, enable); - if ((!tc_phy_wait_for_ready(tc) || - !xelpdp_tc_phy_wait_for_tcss_power(tc, enable)) && - !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) { - if (enable) { - __xelpdp_tc_phy_enable_tcss_power(tc, false); - xelpdp_tc_phy_wait_for_tcss_power(tc, false); - } + if (enable && !tc_phy_wait_for_ready(tc)) + goto out_disable; - return false; - } + if (!xelpdp_tc_phy_wait_for_tcss_power(tc, enable)) + goto out_disable; return true; + +out_disable: + if (drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) + return false; + + if (!enable) + return false; + + __xelpdp_tc_phy_enable_tcss_power(tc, false); + xelpdp_tc_phy_wait_for_tcss_power(tc, false); + + return false; } static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take) diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index f790fd10ba..992a725de7 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -1417,9 +1417,6 @@ set_tv_mode_timings(struct drm_i915_private *dev_priv, static void set_color_conversion(struct drm_i915_private *dev_priv, const struct color_conversion *color_conversion) { - if (!color_conversion) - return; - intel_de_write(dev_priv, TV_CSC_Y, (color_conversion->ry << 16) | color_conversion->gy); intel_de_write(dev_priv, TV_CSC_Y2, @@ -1454,9 +1451,6 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state, int xpos, ypos; unsigned int xsize, ysize; - if (!tv_mode) - return; /* can't happen (mode_prepare prevents this) */ - tv_ctl = intel_de_read(dev_priv, TV_CTL); tv_ctl &= TV_CTL_SAVE; diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c index 2cec2abf97..fe256bf7b4 100644 --- a/drivers/gpu/drm/i915/display/intel_vblank.c +++ b/drivers/gpu/drm/i915/display/intel_vblank.c @@ -265,6 +265,32 @@ int intel_crtc_scanline_to_hw(struct intel_crtc *crtc, int scanline) return (scanline + vtotal - crtc->scanline_offset) % vtotal; } +/* + * The uncore version of the spin lock functions is used to decide + * whether we need to lock the uncore lock or not. This is only + * needed in i915, not in Xe. + * + * This lock in i915 is needed because some old platforms (at least + * IVB and possibly HSW as well), which are not supported in Xe, need + * all register accesses to the same cacheline to be serialized, + * otherwise they may hang. + */ +static void intel_vblank_section_enter(struct drm_i915_private *i915) + __acquires(i915->uncore.lock) +{ +#ifdef I915 + spin_lock(&i915->uncore.lock); +#endif +} + +static void intel_vblank_section_exit(struct drm_i915_private *i915) + __releases(i915->uncore.lock) +{ +#ifdef I915 + spin_unlock(&i915->uncore.lock); +#endif +} + static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc, bool in_vblank_irq, int *vpos, int *hpos, @@ -302,11 +328,12 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc, } /* - * Lock uncore.lock, as we will do multiple timing critical raw - * register reads, potentially with preemption disabled, so the - * following code must not block on uncore.lock. + * Enter vblank critical section, as we will do multiple + * timing critical raw register reads, potentially with + * preemption disabled, so the following code must not block. */ - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + local_irq_save(irqflags); + intel_vblank_section_enter(dev_priv); /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ @@ -374,7 +401,8 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc, /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + intel_vblank_section_exit(dev_priv); + local_irq_restore(irqflags); /* * While in vblank, position will be negative @@ -412,9 +440,13 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc) unsigned long irqflags; int position; - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + local_irq_save(irqflags); + intel_vblank_section_enter(dev_priv); + position = __intel_get_crtc_scanline(crtc); - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + + intel_vblank_section_exit(dev_priv); + local_irq_restore(irqflags); return position; } @@ -537,7 +569,7 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state, * Need to audit everything to make sure it's safe. */ spin_lock_irqsave(&i915->drm.vblank_time_lock, irqflags); - spin_lock(&i915->uncore.lock); + intel_vblank_section_enter(i915); drm_calc_timestamping_constants(&crtc->base, &adjusted_mode); @@ -546,7 +578,6 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state, crtc->mode_flags = mode_flags; crtc->scanline_offset = intel_crtc_scanline_offset(crtc_state); - - spin_unlock(&i915->uncore.lock); + intel_vblank_section_exit(i915); spin_unlock_irqrestore(&i915->drm.vblank_time_lock, irqflags); } diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index 6757dbae9e..17d6572f9d 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -77,8 +77,8 @@ intel_vdsc_set_min_max_qp(struct drm_dsc_config *vdsc_cfg, int buf, static void calculate_rc_params(struct drm_dsc_config *vdsc_cfg) { + int bpp = to_bpp_int(vdsc_cfg->bits_per_pixel); int bpc = vdsc_cfg->bits_per_component; - int bpp = vdsc_cfg->bits_per_pixel >> 4; int qp_bpc_modifier = (bpc - 8) * 2; int uncompressed_bpg_rate; int first_line_bpg_offset; @@ -148,7 +148,13 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg) static const s8 ofs_und8[] = { 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 }; - + /* + * For 420 format since bits_per_pixel (bpp) is set to target bpp * 2, + * QP table values for target bpp 4.0 to 4.4375 (rounded to 4.0) are + * actually for bpp 8 to 8.875 (rounded to 4.0 * 2 i.e 8). + * Similarly values for target bpp 4.5 to 4.8375 (rounded to 4.5) + * are for bpp 9 to 9.875 (rounded to 4.5 * 2 i.e 9), and so on. + */ bpp_i = bpp - 8; for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) { u8 range_bpg_offset; @@ -178,6 +184,9 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg) range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK; } } else { + /* fractional bpp part * 10000 (for precision up to 4 decimal places) */ + int fractional_bits = to_bpp_frac(vdsc_cfg->bits_per_pixel); + static const s8 ofs_und6[] = { 0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 }; @@ -191,7 +200,14 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg) 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 }; - bpp_i = (2 * (bpp - 6)); + /* + * QP table rows have values in increment of 0.5. + * So 6.0 bpp to 6.4375 will have index 0, 6.5 to 6.9375 will have index 1, + * and so on. + * 0.5 fractional part with 4 decimal precision becomes 5000 + */ + bpp_i = ((bpp - 6) + (fractional_bits < 5000 ? 0 : 1)); + for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) { u8 range_bpg_offset; @@ -248,7 +264,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config; - u16 compressed_bpp = pipe_config->dsc.compressed_bpp; + u16 compressed_bpp = to_bpp_int(pipe_config->dsc.compressed_bpp_x16); int err; int ret; @@ -279,8 +295,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) /* Gen 11 does not support VBR */ vdsc_cfg->vbr_enable = false; - /* Gen 11 only supports integral values of bpp */ - vdsc_cfg->bits_per_pixel = compressed_bpp << 4; + vdsc_cfg->bits_per_pixel = pipe_config->dsc.compressed_bpp_x16; /* * According to DSC 1.2 specs in Section 4.1 if native_420 is set @@ -797,13 +812,13 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state) } static u32 intel_dsc_pps_read(struct intel_crtc_state *crtc_state, int pps, - bool *check_equal) + bool *all_equal) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); i915_reg_t dsc_reg[2]; int i, vdsc_per_pipe, dsc_reg_num; - u32 val = 0; + u32 val; vdsc_per_pipe = intel_dsc_get_vdsc_per_pipe(crtc_state); dsc_reg_num = min_t(int, ARRAY_SIZE(dsc_reg), vdsc_per_pipe); @@ -812,20 +827,13 @@ static u32 intel_dsc_pps_read(struct intel_crtc_state *crtc_state, int pps, intel_dsc_get_pps_reg(crtc_state, pps, dsc_reg, dsc_reg_num); - if (check_equal) - *check_equal = true; - - for (i = 0; i < dsc_reg_num; i++) { - u32 tmp; + *all_equal = true; - tmp = intel_de_read(i915, dsc_reg[i]); + val = intel_de_read(i915, dsc_reg[0]); - if (i == 0) { - val = tmp; - } else if (check_equal && tmp != val) { - *check_equal = false; - break; - } else if (!check_equal) { + for (i = 1; i < dsc_reg_num; i++) { + if (intel_de_read(i915, dsc_reg[i]) != val) { + *all_equal = false; break; } } @@ -874,7 +882,7 @@ static void intel_dsc_get_pps_config(struct intel_crtc_state *crtc_state) if (vdsc_cfg->native_420) vdsc_cfg->bits_per_pixel >>= 1; - crtc_state->dsc.compressed_bpp = vdsc_cfg->bits_per_pixel >> 4; + crtc_state->dsc.compressed_bpp_x16 = vdsc_cfg->bits_per_pixel; /* PPS 2 */ pps_temp = intel_dsc_pps_read_and_verify(crtc_state, 2); diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index eb5bd07439..f542ee1db1 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -117,6 +117,13 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, const struct drm_display_info *info = &connector->base.display_info; int vmin, vmax; + /* + * FIXME all joined pipes share the same transcoder. + * Need to account for that during VRR toggle/push/etc. + */ + if (crtc_state->bigjoiner_pipes) + return; + if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) return; diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 245a64332c..8bba6c2e50 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -18,10 +18,10 @@ #include "intel_fbc.h" #include "intel_frontbuffer.h" #include "intel_psr.h" +#include "intel_psr_regs.h" #include "skl_scaler.h" #include "skl_universal_plane.h" #include "skl_watermark.h" -#include "gt/intel_gt.h" #include "pxp/intel_pxp.h" static const u32 skl_plane_formats[] = { @@ -630,6 +630,18 @@ skl_plane_disable_arm(struct intel_plane *plane, intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0); } +static void icl_plane_disable_sel_fetch_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + + if (!crtc_state->enable_psr2_sel_fetch) + return; + + intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0); +} + static void icl_plane_disable_arm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) @@ -643,7 +655,7 @@ icl_plane_disable_arm(struct intel_plane *plane, skl_write_plane_wm(plane, crtc_state); - intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state); + icl_plane_disable_sel_fetch_arm(plane, crtc_state); intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0); intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0); } @@ -1007,7 +1019,8 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state, * The DPT object contains only one vma, so the VMA's offset * within the DPT is always 0. */ - drm_WARN_ON(&i915->drm, plane_state->dpt_vma->node.start); + drm_WARN_ON(&i915->drm, plane_state->dpt_vma && + plane_state->dpt_vma->node.start); drm_WARN_ON(&i915->drm, offset & 0x1fffff); return offset >> 9; } else { @@ -1197,6 +1210,48 @@ skl_plane_update_arm(struct intel_plane *plane, skl_plane_surf(plane_state, 0)); } +static void icl_plane_update_sel_fetch_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int color_plane) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + const struct drm_rect *clip; + u32 val; + int x, y; + + if (!crtc_state->enable_psr2_sel_fetch) + return; + + clip = &plane_state->psr2_sel_fetch_area; + + val = (clip->y1 + plane_state->uapi.dst.y1) << 16; + val |= plane_state->uapi.dst.x1; + intel_de_write_fw(i915, PLANE_SEL_FETCH_POS(pipe, plane->id), val); + + x = plane_state->view.color_plane[color_plane].x; + + /* + * From Bspec: UV surface Start Y Position = half of Y plane Y + * start position. + */ + if (!color_plane) + y = plane_state->view.color_plane[color_plane].y + clip->y1; + else + y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2; + + val = y << 16 | x; + + intel_de_write_fw(i915, PLANE_SEL_FETCH_OFFSET(pipe, plane->id), + val); + + /* Sizes are 0 based */ + val = (drm_rect_height(clip) - 1) << 16; + val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1; + intel_de_write_fw(i915, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val); +} + static void icl_plane_update_noarm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, @@ -1269,7 +1324,24 @@ icl_plane_update_noarm(struct intel_plane *plane, if (plane_state->force_black) icl_plane_csc_load_black(plane); - intel_psr2_program_plane_sel_fetch_noarm(plane, crtc_state, plane_state, color_plane); + icl_plane_update_sel_fetch_noarm(plane, crtc_state, plane_state, color_plane); +} + +static void icl_plane_update_sel_fetch_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + + if (!crtc_state->enable_psr2_sel_fetch) + return; + + if (drm_rect_height(&plane_state->psr2_sel_fetch_area) > 0) + intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id), + PLANE_SEL_FETCH_CTL_ENABLE); + else + icl_plane_disable_sel_fetch_arm(plane, crtc_state); } static void @@ -1296,7 +1368,7 @@ icl_plane_update_arm(struct intel_plane *plane, if (plane_state->scaler_id >= 0) skl_program_plane_scaler(plane, crtc_state, plane_state); - intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state, plane_state); + icl_plane_update_sel_fetch_arm(plane, crtc_state, plane_state); /* * The control register self-arms if the plane was previously @@ -1855,16 +1927,19 @@ static bool skl_fb_scalable(const struct drm_framebuffer *fb) } } -static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj) +static void check_protection(struct intel_plane_state *plane_state) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *i915 = to_i915(plane->base.dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); - return intel_pxp_key_check(i915->pxp, obj, false) == 0; -} + if (DISPLAY_VER(i915) < 11) + return; -static bool pxp_is_borked(struct drm_i915_gem_object *obj) -{ - return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj); + plane_state->decrypt = intel_pxp_key_check(i915->pxp, obj, false) == 0; + plane_state->force_black = i915_gem_object_is_protected(obj) && + !plane_state->decrypt; } static int skl_plane_check(struct intel_crtc_state *crtc_state, @@ -1911,10 +1986,7 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state, if (ret) return ret; - if (DISPLAY_VER(dev_priv) >= 11) { - plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb)); - plane_state->force_black = pxp_is_borked(intel_fb_obj(fb)); - } + check_protection(plane_state); /* HW only has 8 bits pixel precision, disable plane if invisible */ if (!(plane_state->hw.alpha >> 8)) @@ -2218,6 +2290,9 @@ static u8 skl_get_plane_caps(struct drm_i915_private *i915, if (HAS_4TILE(i915)) caps |= INTEL_PLANE_CAP_TILING_4; + if (!IS_ENABLED(I915) && !HAS_FLAT_CCS(i915)) + return caps; + if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) { caps |= INTEL_PLANE_CAP_CCS_RC; if (DISPLAY_VER(i915) >= 12) @@ -2489,7 +2564,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc, goto error; } - if (!dev_priv->params.enable_dpt && + if (!dev_priv->display.params.enable_dpt && intel_fb_modifier_uses_dpt(dev_priv, fb->modifier)) { drm_dbg_kms(&dev_priv->drm, "DPT disabled, skipping initial FB\n"); goto error; diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c index 99b8ccdc3d..56588d6e24 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.c +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -412,7 +412,7 @@ static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); - if (!i915->params.enable_sagv) + if (!i915->display.params.enable_sagv) return false; if (DISPLAY_VER(i915) >= 12) @@ -3702,7 +3702,8 @@ static int intel_sagv_status_show(struct seq_file *m, void *unused) }; seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915))); - seq_printf(m, "SAGV modparam: %s\n", str_enabled_disabled(i915->params.enable_sagv)); + seq_printf(m, "SAGV modparam: %s\n", + str_enabled_disabled(i915->display.params.enable_sagv)); seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]); seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us); diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index f488394d31..9b33b8a74d 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -561,6 +561,12 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder) glk_dsi_disable_mipi_io(encoder); } +static i915_reg_t port_ctrl_reg(struct drm_i915_private *i915, enum port port) +{ + return IS_GEMINILAKE(i915) || IS_BROXTON(i915) ? + BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); +} + static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -570,7 +576,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) drm_dbg_kms(&dev_priv->drm, "\n"); for_each_dsi_port(port, intel_dsi->ports) { /* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */ - i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ? + i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ? BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A); intel_de_write(dev_priv, MIPI_DEVICE_READY(port), @@ -589,7 +595,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) * On VLV/CHV, wait till Clock lanes are in LP-00 state for MIPI * Port A only. MIPI Port C has no similar bit for checking. */ - if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || port == PORT_A) && + if ((IS_BROXTON(dev_priv) || port == PORT_A) && intel_de_wait_for_clear(dev_priv, port_ctrl, AFE_LATCHOUT, 30)) drm_err(&dev_priv->drm, "DSI LP not going Low\n"); @@ -627,8 +633,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, } for_each_dsi_port(port, intel_dsi->ports) { - i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ? - BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); + i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port); u32 temp; temp = intel_de_read(dev_priv, port_ctrl); @@ -664,8 +669,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder) enum port port; for_each_dsi_port(port, intel_dsi->ports) { - i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ? - BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); + i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port); /* de-assert ip_tg_enable signal */ intel_de_rmw(dev_priv, port_ctrl, DPI_ENABLE, 0); @@ -955,9 +959,8 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, /* XXX: this only works for one DSI output */ for_each_dsi_port(port, intel_dsi->ports) { - i915_reg_t ctrl_reg = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ? - BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); - bool enabled = intel_de_read(dev_priv, ctrl_reg) & DPI_ENABLE; + i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port); + bool enabled = intel_de_read(dev_priv, port_ctrl) & DPI_ENABLE; /* * Due to some hardware limitations on VLV/CHV, the DPI enable @@ -1529,16 +1532,8 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder) } } -static void intel_dsi_encoder_destroy(struct drm_encoder *encoder) -{ - struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder)); - - intel_dsi_vbt_gpio_cleanup(intel_dsi); - intel_encoder_destroy(encoder); -} - static const struct drm_encoder_funcs intel_dsi_funcs = { - .destroy = intel_dsi_encoder_destroy, + .destroy = intel_encoder_destroy, }; static enum drm_mode_status vlv_dsi_mode_valid(struct drm_connector *connector, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index e38f06a6e5..dcbfe32fd3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -279,7 +279,8 @@ static int proto_context_set_protected(struct drm_i915_private *i915, } static struct i915_gem_proto_context * -proto_context_create(struct drm_i915_private *i915, unsigned int flags) +proto_context_create(struct drm_i915_file_private *fpriv, + struct drm_i915_private *i915, unsigned int flags) { struct i915_gem_proto_context *pc, *err; @@ -287,6 +288,7 @@ proto_context_create(struct drm_i915_private *i915, unsigned int flags) if (!pc) return ERR_PTR(-ENOMEM); + pc->fpriv = fpriv; pc->num_user_engines = -1; pc->user_engines = NULL; pc->user_flags = BIT(UCONTEXT_BANNABLE) | @@ -1622,6 +1624,7 @@ i915_gem_create_context(struct drm_i915_private *i915, err = PTR_ERR(ppgtt); goto err_ctx; } + ppgtt->vm.fpriv = pc->fpriv; vm = &ppgtt->vm; } if (vm) @@ -1741,7 +1744,7 @@ int i915_gem_context_open(struct drm_i915_private *i915, /* 0 reserved for invalid/unassigned ppgtt */ xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1); - pc = proto_context_create(i915, 0); + pc = proto_context_create(file_priv, i915, 0); if (IS_ERR(pc)) { err = PTR_ERR(pc); goto err; @@ -1823,6 +1826,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ args->vm_id = id; + ppgtt->vm.fpriv = file_priv; return 0; err_put: @@ -2285,7 +2289,8 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, return -EIO; } - ext_data.pc = proto_context_create(i915, args->flags); + ext_data.pc = proto_context_create(file->driver_priv, i915, + args->flags); if (IS_ERR(ext_data.pc)) return PTR_ERR(ext_data.pc); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h index cb78214a7d..03bc7f9d19 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -188,6 +188,9 @@ struct i915_gem_proto_engine { * CONTEXT_CREATE_SET_PARAM during GEM_CONTEXT_CREATE. */ struct i915_gem_proto_context { + /** @fpriv: Client which creates the context */ + struct drm_i915_file_private *fpriv; + /** @vm: See &i915_gem_context.vm */ struct i915_address_space *vm; @@ -409,9 +412,9 @@ struct i915_gem_context { /** @stale: tracks stale engines to be destroyed */ struct { - /** @lock: guards engines */ + /** @stale.lock: guards engines */ spinlock_t lock; - /** @engines: list of stale engines */ + /** @stale.engines: list of stale engines */ struct list_head engines; } stale; }; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 683fd8d315..555022c065 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -9,6 +9,7 @@ #include #include +#include #include #include "display/intel_frontbuffer.h" @@ -253,6 +254,8 @@ struct i915_execbuffer { struct intel_gt *gt; /* gt for the execbuf */ struct intel_context *context; /* logical state for the request */ struct i915_gem_context *gem_context; /** caller's context */ + intel_wakeref_t wakeref; + intel_wakeref_t wakeref_gt0; /** our requests to build */ struct i915_request *requests[MAX_ENGINE_INSTANCE + 1]; @@ -1156,7 +1159,7 @@ static void reloc_cache_unmap(struct reloc_cache *cache) vaddr = unmask_page(cache->vaddr); if (cache->vaddr & KMAP) - kunmap_atomic(vaddr); + kunmap_local(vaddr); else io_mapping_unmap_atomic((void __iomem *)vaddr); } @@ -1172,7 +1175,7 @@ static void reloc_cache_remap(struct reloc_cache *cache, if (cache->vaddr & KMAP) { struct page *page = i915_gem_object_get_page(obj, cache->page); - vaddr = kmap_atomic(page); + vaddr = kmap_local_page(page); cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr; } else { @@ -1202,7 +1205,7 @@ static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer if (cache->vaddr & CLFLUSH_AFTER) mb(); - kunmap_atomic(vaddr); + kunmap_local(vaddr); i915_gem_object_finish_access(obj); } else { struct i915_ggtt *ggtt = cache_to_ggtt(cache); @@ -1234,7 +1237,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj, struct page *page; if (cache->vaddr) { - kunmap_atomic(unmask_page(cache->vaddr)); + kunmap_local(unmask_page(cache->vaddr)); } else { unsigned int flushes; int err; @@ -1256,7 +1259,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj, if (!obj->mm.dirty) set_page_dirty(page); - vaddr = kmap_atomic(page); + vaddr = kmap_local_page(page); cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr; cache->page = pageno; @@ -1678,7 +1681,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb) urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr); size = nreloc * sizeof(*relocs); - relocs = kvmalloc_array(size, 1, GFP_KERNEL); + relocs = kvmalloc_array(1, size, GFP_KERNEL); if (!relocs) { err = -ENOMEM; goto err; @@ -2719,13 +2722,13 @@ eb_select_engine(struct i915_execbuffer *eb) for_each_child(ce, child) intel_context_get(child); - intel_gt_pm_get(gt); + eb->wakeref = intel_gt_pm_get(ce->engine->gt); /* * Keep GT0 active on MTL so that i915_vma_parked() doesn't * free VMAs while execbuf ioctl is validating VMAs. */ if (gt->info.id) - intel_gt_pm_get(to_gt(gt->i915)); + eb->wakeref_gt0 = intel_gt_pm_get(to_gt(gt->i915)); if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { err = intel_context_alloc_state(ce); @@ -2765,9 +2768,9 @@ eb_select_engine(struct i915_execbuffer *eb) err: if (gt->info.id) - intel_gt_pm_put(to_gt(gt->i915)); + intel_gt_pm_put(to_gt(gt->i915), eb->wakeref_gt0); - intel_gt_pm_put(gt); + intel_gt_pm_put(ce->engine->gt, eb->wakeref); for_each_child(ce, child) intel_context_put(child); intel_context_put(ce); @@ -2785,8 +2788,8 @@ eb_put_engine(struct i915_execbuffer *eb) * i915_vma_parked() from interfering while execbuf validates vmas. */ if (eb->gt->info.id) - intel_gt_pm_put(to_gt(eb->gt->i915)); - intel_gt_pm_put(eb->gt); + intel_gt_pm_put(to_gt(eb->gt->i915), eb->wakeref_gt0); + intel_gt_pm_put(eb->context->engine->gt, eb->wakeref); for_each_child(eb->context, child) intel_context_put(child); intel_context_put(eb->context); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c index 6bc26b4b06..ea7561ae6e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -36,7 +36,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) struct sg_table *st; struct scatterlist *sg; unsigned int npages; /* restricted by sg_alloc_table */ - int max_order = MAX_ORDER; + int max_order = MAX_PAGE_ORDER; unsigned int max_segment; gfp_t gfp; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index c26d875558..58e6c680fe 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -106,6 +106,10 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, INIT_LIST_HEAD(&obj->mm.link); +#ifdef CONFIG_PROC_FS + INIT_LIST_HEAD(&obj->client_link); +#endif + INIT_LIST_HEAD(&obj->lut_list); spin_lock_init(&obj->lut_lock); @@ -293,6 +297,10 @@ void __i915_gem_free_object_rcu(struct rcu_head *head) container_of(head, typeof(*obj), rcu); struct drm_i915_private *i915 = to_i915(obj->base.dev); + /* We need to keep this alive for RCU read access from fdinfo. */ + if (obj->mm.n_placements > 1) + kfree(obj->mm.placements); + i915_gem_object_free(obj); GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); @@ -389,9 +397,6 @@ void __i915_gem_free_object(struct drm_i915_gem_object *obj) if (obj->ops->release) obj->ops->release(obj); - if (obj->mm.n_placements > 1) - kfree(obj->mm.placements); - if (obj->shares_resv_from) i915_vm_resv_put(obj->shares_resv_from); @@ -442,6 +447,8 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj) GEM_BUG_ON(i915_gem_object_is_framebuffer(obj)); + i915_drm_client_remove_object(obj); + /* * Before we free the object, make sure any pure RCU-only * read-side critical sections are complete, e.g. @@ -493,17 +500,15 @@ static void i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) { pgoff_t idx = offset >> PAGE_SHIFT; - void *src_map; void *src_ptr; - src_map = kmap_atomic(i915_gem_object_get_page(obj, idx)); - - src_ptr = src_map + offset_in_page(offset); + src_ptr = kmap_local_page(i915_gem_object_get_page(obj, idx)) + + offset_in_page(offset); if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) drm_clflush_virt_range(src_ptr, size); memcpy(dst, src_ptr, size); - kunmap_atomic(src_map); + kunmap_local(src_ptr); } static void diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 2292404007..0c5cdab278 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -302,6 +302,18 @@ struct drm_i915_gem_object { */ struct i915_address_space *shares_resv_from; +#ifdef CONFIG_PROC_FS + /** + * @client: @i915_drm_client which created the object + */ + struct i915_drm_client *client; + + /** + * @client_link: Link into @i915_drm_client.objects_list + */ + struct list_head client_link; +#endif + union { struct rcu_head rcu; struct llist_node freed; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index 5df128e2f4..ef85c6dc9f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -65,16 +65,13 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) dst = vaddr; for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { struct page *page; - void *src; page = shmem_read_mapping_page(mapping, i); if (IS_ERR(page)) goto err_st; - src = kmap_atomic(page); - memcpy(dst, src, PAGE_SIZE); + memcpy_from_page(dst, page, 0, PAGE_SIZE); drm_clflush_virt_range(dst, PAGE_SIZE); - kunmap_atomic(src); put_page(page); dst += PAGE_SIZE; @@ -113,16 +110,13 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { struct page *page; - char *dst; page = shmem_read_mapping_page(mapping, i); if (IS_ERR(page)) continue; - dst = kmap_atomic(page); drm_clflush_virt_range(src, PAGE_SIZE); - memcpy(dst, src, PAGE_SIZE); - kunmap_atomic(dst); + memcpy_to_page(page, 0, src, PAGE_SIZE); set_page_dirty(page); if (obj->mm.madv == I915_MADV_WILLNEED) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 73a4a4eb29..38b72d8656 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -485,11 +485,13 @@ shmem_pwrite(struct drm_i915_gem_object *obj, if (err < 0) return err; - vaddr = kmap_atomic(page); + vaddr = kmap_local_page(page); + pagefault_disable(); unwritten = __copy_from_user_inatomic(vaddr + pg, user_data, len); - kunmap_atomic(vaddr); + pagefault_enable(); + kunmap_local(vaddr); err = aops->write_end(obj->base.filp, mapping, offset, len, len - unwritten, page, data); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 1a766d8e7c..8c88075eea 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -386,6 +386,27 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915, drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val); + /* Wa_14019821291 */ + if (MEDIA_VER_FULL(i915) == IP_VER(13, 0)) { + /* + * This workaround is primarily implemented by the BIOS. We + * just need to figure out whether the BIOS has applied the + * workaround (meaning the programmed address falls within + * the DSM) and, if so, reserve that part of the DSM to + * prevent accidental reuse. The DSM location should be just + * below the WOPCM. + */ + u64 gscpsmi_base = intel_uncore_read64_2x32(uncore, + MTL_GSCPSMI_BASEADDR_LSB, + MTL_GSCPSMI_BASEADDR_MSB); + if (gscpsmi_base >= i915->dsm.stolen.start && + gscpsmi_base < i915->dsm.stolen.end) { + *base = gscpsmi_base; + *size = i915->dsm.stolen.end - gscpsmi_base; + return; + } + } + switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { case GEN8_STOLEN_RESERVED_1M: *size = 1024 * 1024; diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 6b9f6cf50b..3ff3d8889c 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -115,7 +115,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj) do { struct page *page; - GEM_BUG_ON(order > MAX_ORDER); + GEM_BUG_ON(order > MAX_PAGE_ORDER); page = alloc_pages(GFP | __GFP_ZERO, order); if (!page) goto err; @@ -1082,7 +1082,7 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val) goto err_unlock; for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) { - u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n)); + u32 *ptr = kmap_local_page(i915_gem_object_get_page(obj, n)); if (needs_flush & CLFLUSH_BEFORE) drm_clflush_virt_range(ptr, PAGE_SIZE); @@ -1090,12 +1090,12 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val) if (ptr[dword] != val) { pr_err("n=%lu ptr[%u]=%u, val=%u\n", n, dword, ptr[dword], val); - kunmap_atomic(ptr); + kunmap_local(ptr); err = -EINVAL; break; } - kunmap_atomic(ptr); + kunmap_local(ptr); } i915_gem_object_finish_access(obj); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c index 3bef1beec7..2a0c0634d4 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c @@ -24,7 +24,6 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v) { unsigned int needs_clflush; struct page *page; - void *map; u32 *cpu; int err; @@ -34,8 +33,7 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v) goto out; page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT); - map = kmap_atomic(page); - cpu = map + offset_in_page(offset); + cpu = kmap_local_page(page) + offset_in_page(offset); if (needs_clflush & CLFLUSH_BEFORE) drm_clflush_virt_range(cpu, sizeof(*cpu)); @@ -45,7 +43,7 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v) if (needs_clflush & CLFLUSH_AFTER) drm_clflush_virt_range(cpu, sizeof(*cpu)); - kunmap_atomic(map); + kunmap_local(cpu); i915_gem_object_finish_access(ctx->obj); out: @@ -57,7 +55,6 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v) { unsigned int needs_clflush; struct page *page; - void *map; u32 *cpu; int err; @@ -67,15 +64,14 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v) goto out; page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT); - map = kmap_atomic(page); - cpu = map + offset_in_page(offset); + cpu = kmap_local_page(page) + offset_in_page(offset); if (needs_clflush & CLFLUSH_BEFORE) drm_clflush_virt_range(cpu, sizeof(*cpu)); *v = *cpu; - kunmap_atomic(map); + kunmap_local(cpu); i915_gem_object_finish_access(ctx->obj); out: @@ -85,6 +81,7 @@ out: static int gtt_set(struct context *ctx, unsigned long offset, u32 v) { + intel_wakeref_t wakeref; struct i915_vma *vma; u32 __iomem *map; int err = 0; @@ -99,7 +96,7 @@ static int gtt_set(struct context *ctx, unsigned long offset, u32 v) if (IS_ERR(vma)) return PTR_ERR(vma); - intel_gt_pm_get(vma->vm->gt); + wakeref = intel_gt_pm_get(vma->vm->gt); map = i915_vma_pin_iomap(vma); i915_vma_unpin(vma); @@ -112,12 +109,13 @@ static int gtt_set(struct context *ctx, unsigned long offset, u32 v) i915_vma_unpin_iomap(vma); out_rpm: - intel_gt_pm_put(vma->vm->gt); + intel_gt_pm_put(vma->vm->gt, wakeref); return err; } static int gtt_get(struct context *ctx, unsigned long offset, u32 *v) { + intel_wakeref_t wakeref; struct i915_vma *vma; u32 __iomem *map; int err = 0; @@ -132,7 +130,7 @@ static int gtt_get(struct context *ctx, unsigned long offset, u32 *v) if (IS_ERR(vma)) return PTR_ERR(vma); - intel_gt_pm_get(vma->vm->gt); + wakeref = intel_gt_pm_get(vma->vm->gt); map = i915_vma_pin_iomap(vma); i915_vma_unpin(vma); @@ -145,7 +143,7 @@ static int gtt_get(struct context *ctx, unsigned long offset, u32 *v) i915_vma_unpin_iomap(vma); out_rpm: - intel_gt_pm_put(vma->vm->gt); + intel_gt_pm_put(vma->vm->gt, wakeref); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 7021b6e9b2..89d4dc8b60 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -489,12 +489,12 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value) for (n = 0; n < real_page_count(obj); n++) { u32 *map; - map = kmap_atomic(i915_gem_object_get_page(obj, n)); + map = kmap_local_page(i915_gem_object_get_page(obj, n)); for (m = 0; m < DW_PER_PAGE; m++) map[m] = value; if (!has_llc) drm_clflush_virt_range(map, PAGE_SIZE); - kunmap_atomic(map); + kunmap_local(map); } i915_gem_object_finish_access(obj); @@ -520,7 +520,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj, for (n = 0; n < real_page_count(obj); n++) { u32 *map, m; - map = kmap_atomic(i915_gem_object_get_page(obj, n)); + map = kmap_local_page(i915_gem_object_get_page(obj, n)); if (needs_flush & CLFLUSH_BEFORE) drm_clflush_virt_range(map, PAGE_SIZE); @@ -546,7 +546,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj, } out_unmap: - kunmap_atomic(map); + kunmap_local(map); if (err) break; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c index e57f939007..d684a70f2c 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c @@ -504,7 +504,7 @@ static int igt_dmabuf_export_vmap(void *arg) } if (memchr_inv(ptr, 0, dmabuf->size)) { - pr_err("Exported object not initialiased to zero!\n"); + pr_err("Exported object not initialised to zero!\n"); err = -EINVAL; goto out; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 72957a36a3..2c51a2c452 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -630,14 +630,14 @@ static bool assert_mmap_offset(struct drm_i915_private *i915, static void disable_retire_worker(struct drm_i915_private *i915) { i915_gem_driver_unregister__shrinker(i915); - intel_gt_pm_get(to_gt(i915)); + intel_gt_pm_get_untracked(to_gt(i915)); cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work); } static void restore_retire_worker(struct drm_i915_private *i915) { igt_flush_test(i915); - intel_gt_pm_put(to_gt(i915)); + intel_gt_pm_put_untracked(to_gt(i915)); i915_gem_driver_register__shrinker(i915); } @@ -778,6 +778,7 @@ err_obj: static int gtt_set(struct drm_i915_gem_object *obj) { + intel_wakeref_t wakeref; struct i915_vma *vma; void __iomem *map; int err = 0; @@ -786,7 +787,7 @@ static int gtt_set(struct drm_i915_gem_object *obj) if (IS_ERR(vma)) return PTR_ERR(vma); - intel_gt_pm_get(vma->vm->gt); + wakeref = intel_gt_pm_get(vma->vm->gt); map = i915_vma_pin_iomap(vma); i915_vma_unpin(vma); if (IS_ERR(map)) { @@ -798,12 +799,13 @@ static int gtt_set(struct drm_i915_gem_object *obj) i915_vma_unpin_iomap(vma); out: - intel_gt_pm_put(vma->vm->gt); + intel_gt_pm_put(vma->vm->gt, wakeref); return err; } static int gtt_check(struct drm_i915_gem_object *obj) { + intel_wakeref_t wakeref; struct i915_vma *vma; void __iomem *map; int err = 0; @@ -812,7 +814,7 @@ static int gtt_check(struct drm_i915_gem_object *obj) if (IS_ERR(vma)) return PTR_ERR(vma); - intel_gt_pm_get(vma->vm->gt); + wakeref = intel_gt_pm_get(vma->vm->gt); map = i915_vma_pin_iomap(vma); i915_vma_unpin(vma); if (IS_ERR(map)) { @@ -828,7 +830,7 @@ static int gtt_check(struct drm_i915_gem_object *obj) i915_vma_unpin_iomap(vma); out: - intel_gt_pm_put(vma->vm->gt); + intel_gt_pm_put(vma->vm->gt, wakeref); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c index e199d7dbb8..2b0327cc47 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c @@ -83,7 +83,7 @@ live_context(struct drm_i915_private *i915, struct file *file) int err; u32 id; - pc = proto_context_create(i915, 0); + pc = proto_context_create(fpriv, i915, 0); if (IS_ERR(pc)) return ERR_CAST(pc); @@ -152,7 +152,7 @@ kernel_context(struct drm_i915_private *i915, struct i915_gem_context *ctx; struct i915_gem_proto_context *pc; - pc = proto_context_create(i915, 0); + pc = proto_context_create(NULL, i915, 0); if (IS_ERR(pc)) return ERR_CAST(pc); diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index 86a04afff6..e1bf13e3d3 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -226,7 +226,7 @@ u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs) static int mtl_dummy_pipe_control(struct i915_request *rq) { /* Wa_14016712196 */ - if (IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 71)) || + if (IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(rq->i915)) { u32 *cs; @@ -822,7 +822,7 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs) flags |= PIPE_CONTROL_FLUSH_L3; /* Wa_14016712196 */ - if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) || IS_DG2(i915)) + if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915)) /* dummy PIPE_CONTROL + depth flush */ cs = gen12_emit_pipe_control(cs, 0, PIPE_CONTROL_DEPTH_CACHE_FLUSH, 0); diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c index 9895e18df0..81bf221637 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -5,6 +5,7 @@ #include +#include "gem/i915_gem_internal.h" #include "gem/i915_gem_lmem.h" #include "gen8_ppgtt.h" @@ -222,6 +223,9 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) { struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + if (vm->rsvd.obj) + i915_gem_object_put(vm->rsvd.obj); + if (intel_vgpu_active(vm->i915)) gen8_ppgtt_notify_vgt(ppgtt, false); @@ -950,6 +954,44 @@ err_pd: return ERR_PTR(err); } +static int gen8_init_rsvd(struct i915_address_space *vm) +{ + struct drm_i915_private *i915 = vm->i915; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int ret; + + if (!intel_gt_needs_wa_16018031267(vm->gt)) + return 0; + + /* The memory will be used only by GPU. */ + obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, + I915_BO_ALLOC_VOLATILE | + I915_BO_ALLOC_GPU_ONLY); + if (IS_ERR(obj)) + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto unref; + } + + ret = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH); + if (ret) + goto unref; + + vm->rsvd.vma = i915_vma_make_unshrinkable(vma); + vm->rsvd.obj = obj; + vm->total -= vma->node.size; + return 0; +unref: + i915_gem_object_put(obj); + return ret; +} + /* * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers * with a net effect resembling a 2-level page table in normal x86 terms. Each @@ -1031,6 +1073,10 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt, if (intel_vgpu_active(gt->i915)) gen8_ppgtt_notify_vgt(ppgtt, true); + err = gen8_init_rsvd(&ppgtt->vm); + if (err) + goto err_put; + return ppgtt; err_put: diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index ecc990ec1b..d650beb8ed 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -28,11 +28,14 @@ static void irq_disable(struct intel_breadcrumbs *b) static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) { + intel_wakeref_t wakeref; + /* * Since we are waiting on a request, the GPU should be busy * and should have its own rpm reference. */ - if (GEM_WARN_ON(!intel_gt_pm_get_if_awake(b->irq_engine->gt))) + wakeref = intel_gt_pm_get_if_awake(b->irq_engine->gt); + if (GEM_WARN_ON(!wakeref)) return; /* @@ -41,7 +44,7 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) * which we can add a new waiter and avoid the cost of re-enabling * the irq. */ - WRITE_ONCE(b->irq_armed, true); + WRITE_ONCE(b->irq_armed, wakeref); /* Requests may have completed before we could enable the interrupt. */ if (!b->irq_enabled++ && b->irq_enable(b)) @@ -61,12 +64,14 @@ static void intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) { + intel_wakeref_t wakeref = b->irq_armed; + GEM_BUG_ON(!b->irq_enabled); if (!--b->irq_enabled) b->irq_disable(b); - WRITE_ONCE(b->irq_armed, false); - intel_gt_pm_put_async(b->irq_engine->gt); + WRITE_ONCE(b->irq_armed, 0); + intel_gt_pm_put_async(b->irq_engine->gt, wakeref); } static void intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h index 72dfd3748c..bdf09fd67b 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h @@ -13,6 +13,7 @@ #include #include "intel_engine_types.h" +#include "intel_wakeref.h" /* * Rather than have every client wait upon all user interrupts, @@ -43,7 +44,7 @@ struct intel_breadcrumbs { spinlock_t irq_lock; /* protects the interrupt from hardirq context */ struct irq_work irq_work; /* for use from inside irq_lock */ unsigned int irq_enabled; - bool irq_armed; + intel_wakeref_t irq_armed; /* Not all breadcrumbs are attached to physical HW */ intel_engine_mask_t engine_mask; diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index a53b26178f..a2f1245741 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -6,6 +6,7 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_pm.h" +#include "i915_drm_client.h" #include "i915_drv.h" #include "i915_trace.h" @@ -50,6 +51,7 @@ intel_context_create(struct intel_engine_cs *engine) int intel_context_alloc_state(struct intel_context *ce) { + struct i915_gem_context *ctx; int err = 0; if (mutex_lock_interruptible(&ce->pin_mutex)) @@ -66,6 +68,18 @@ int intel_context_alloc_state(struct intel_context *ce) goto unlock; set_bit(CONTEXT_ALLOC_BIT, &ce->flags); + + rcu_read_lock(); + ctx = rcu_dereference(ce->gem_context); + if (ctx && !kref_get_unless_zero(&ctx->ref)) + ctx = NULL; + rcu_read_unlock(); + if (ctx) { + if (ctx->client) + i915_drm_client_add_context_objects(ctx->client, + ce); + i915_gem_context_put(ctx); + } } unlock: diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index a80e3b7c24..25564c0150 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -212,7 +212,7 @@ static inline void intel_context_enter(struct intel_context *ce) return; ce->ops->enter(ce); - intel_gt_pm_get(ce->vm->gt); + ce->wakeref = intel_gt_pm_get(ce->vm->gt); } static inline void intel_context_mark_active(struct intel_context *ce) @@ -229,7 +229,7 @@ static inline void intel_context_exit(struct intel_context *ce) if (--ce->active_count) return; - intel_gt_pm_put_async(ce->vm->gt); + intel_gt_pm_put_async(ce->vm->gt, ce->wakeref); ce->ops->exit(ce); } diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index aceaac28a3..7eccbd70d8 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -17,6 +17,7 @@ #include "i915_utils.h" #include "intel_engine_types.h" #include "intel_sseu.h" +#include "intel_wakeref.h" #include "uc/intel_guc_fwif.h" @@ -112,6 +113,7 @@ struct intel_context { u32 ring_size; struct intel_ring *ring; struct intel_timeline *timeline; + intel_wakeref_t wakeref; unsigned long flags; #define CONTEXT_BARRIER_BIT 0 diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 4a11219e56..84be97f959 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -47,7 +47,7 @@ #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE) -#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE) +#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE) #define MAX_MMIO_BASES 3 struct engine_info { @@ -908,6 +908,23 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) info->engine_mask &= ~BIT(GSC0); } + /* + * Do not create the command streamer for CCS slices beyond the first. + * All the workload submitted to the first engine will be shared among + * all the slices. + * + * Once the user will be allowed to customize the CCS mode, then this + * check needs to be removed. + */ + if (IS_DG2(gt->i915)) { + u8 first_ccs = __ffs(CCS_MASK(gt)); + + /* Mask off all the CCS engine */ + info->engine_mask &= ~GENMASK(CCS3, CCS0); + /* Put back in the first CCS engine */ + info->engine_mask |= BIT(_CCS(first_ccs)); + } + return info->engine_mask; } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c index 9a527e1f5b..1a8e2b7db0 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c @@ -188,7 +188,7 @@ static void heartbeat(struct work_struct *wrk) * low latency and no jitter] the chance to naturally * complete before being preempted. */ - attr.priority = 0; + attr.priority = I915_PRIORITY_NORMAL; if (rq->sched.attr.priority >= attr.priority) attr.priority = I915_PRIORITY_HEARTBEAT; if (rq->sched.attr.priority >= attr.priority) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index 5a3a5b29d1..fb7bff27b4 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -63,7 +63,7 @@ static int __engine_unpark(struct intel_wakeref *wf) ENGINE_TRACE(engine, "\n"); - intel_gt_pm_get(engine->gt); + engine->wakeref_track = intel_gt_pm_get(engine->gt); /* Discard stale context state from across idling */ ce = engine->kernel_context; @@ -122,6 +122,7 @@ __queue_and_release_pm(struct i915_request *rq, */ GEM_BUG_ON(rq->context->active_count != 1); __intel_gt_pm_get(engine->gt); + rq->context->wakeref = intel_wakeref_track(&engine->gt->wakeref); /* * We have to serialise all potential retirement paths with our @@ -282,7 +283,7 @@ static int __engine_park(struct intel_wakeref *wf) engine->park(engine); /* While gt calls i915_vma_parked(), we have to break the lock cycle */ - intel_gt_pm_put_async(engine->gt); + intel_gt_pm_put_async(engine->gt, engine->wakeref_track); return 0; } @@ -293,7 +294,7 @@ static const struct intel_wakeref_ops wf_ops = { void intel_engine_init__pm(struct intel_engine_cs *engine) { - intel_wakeref_init(&engine->wakeref, engine->i915, &wf_ops); + intel_wakeref_init(&engine->wakeref, engine->i915, &wf_ops, engine->name); intel_engine_init_heartbeat(engine); intel_gsc_idle_msg_enable(engine); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h index d68675925b..1d97c435a0 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h @@ -10,6 +10,7 @@ #include "i915_request.h" #include "intel_engine_types.h" #include "intel_wakeref.h" +#include "intel_gt.h" #include "intel_gt_pm.h" static inline bool diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h index fdd4ddd3a9..a8eac59e37 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h @@ -118,9 +118,15 @@ #define CCID_EXTENDED_STATE_RESTORE BIT(2) #define CCID_EXTENDED_STATE_SAVE BIT(3) #define RING_BB_PER_CTX_PTR(base) _MMIO((base) + 0x1c0) /* gen8+ */ +#define PER_CTX_BB_FORCE BIT(2) +#define PER_CTX_BB_VALID BIT(0) + #define RING_INDIRECT_CTX(base) _MMIO((base) + 0x1c4) /* gen8+ */ #define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base) + 0x1c8) /* gen8+ */ #define ECOSKPD(base) _MMIO((base) + 0x1d0) +#define XEHP_BLITTER_SCHEDULING_MODE_MASK REG_GENMASK(12, 11) +#define XEHP_BLITTER_ROUND_ROBIN_MODE \ + REG_FIELD_PREP(XEHP_BLITTER_SCHEDULING_MODE_MASK, 1) #define ECO_CONSTANT_BUFFER_SR_DISABLE REG_BIT(4) #define ECO_GATING_CX_ONLY REG_BIT(3) #define GEN6_BLITTER_FBC_NOTIFY REG_BIT(3) @@ -257,5 +263,7 @@ #define VDBOX_CGCTL3F18(base) _MMIO((base) + 0x3f18) #define ALNUNIT_CLKGATE_DIS REG_BIT(13) +#define VDBOX_CGCTL3F1C(base) _MMIO((base) + 0x3f1c) +#define MFXPIPE_CLKGATE_DIS REG_BIT(3) #endif /* __INTEL_ENGINE_REGS__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 8769760257..960e6be204 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -446,7 +446,9 @@ struct intel_engine_cs { unsigned long serial; unsigned long wakeref_serial; + intel_wakeref_t wakeref_track; struct intel_wakeref wakeref; + struct file *default_state; struct { diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index 42e09f1589..b061a0a0d6 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -630,7 +630,7 @@ static void __execlists_schedule_out(struct i915_request * const rq, execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); if (engine->fw_domain && !--engine->fw_active) intel_uncore_forcewake_put(engine->uncore, engine->fw_domain); - intel_gt_pm_put_async(engine->gt); + intel_gt_pm_put_async_untracked(engine->gt); /* * If this is part of a virtual engine, its next request may diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 15fc8e4703..21a7e3191c 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -245,16 +245,15 @@ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt) gen8_ggtt_invalidate(ggtt); list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) { - if (intel_guc_tlb_invalidation_is_available(>->uc.guc)) { + if (intel_guc_tlb_invalidation_is_available(>->uc.guc)) guc_ggtt_ct_invalidate(gt); - } else if (GRAPHICS_VER(i915) >= 12) { + else if (GRAPHICS_VER(i915) >= 12) intel_uncore_write_fw(gt->uncore, GEN12_GUC_TLB_INV_CR, GEN12_GUC_TLB_INV_CR_INVALIDATE); - } else { + else intel_uncore_write_fw(gt->uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE); - } } } @@ -297,7 +296,7 @@ static bool should_update_ggtt_with_bind(struct i915_ggtt *ggtt) return intel_gt_is_bind_context_ready(gt); } -static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt) +static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt, intel_wakeref_t *wakeref) { struct intel_context *ce; struct intel_gt *gt = ggtt->vm.gt; @@ -314,7 +313,8 @@ static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt) * would conflict with fs_reclaim trying to allocate memory while * doing rpm_resume(). */ - if (!intel_gt_pm_get_if_awake(gt)) + *wakeref = intel_gt_pm_get_if_awake(gt); + if (!*wakeref) return NULL; intel_engine_pm_get(ce->engine); @@ -322,10 +322,10 @@ static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt) return ce; } -static void gen8_ggtt_bind_put_ce(struct intel_context *ce) +static void gen8_ggtt_bind_put_ce(struct intel_context *ce, intel_wakeref_t wakeref) { intel_engine_pm_put(ce->engine); - intel_gt_pm_put(ce->engine->gt); + intel_gt_pm_put(ce->engine->gt, wakeref); } static bool gen8_ggtt_bind_ptes(struct i915_ggtt *ggtt, u32 offset, @@ -338,12 +338,13 @@ static bool gen8_ggtt_bind_ptes(struct i915_ggtt *ggtt, u32 offset, struct sgt_iter iter; struct i915_request *rq; struct intel_context *ce; + intel_wakeref_t wakeref; u32 *cs; if (!num_entries) return true; - ce = gen8_ggtt_bind_get_ce(ggtt); + ce = gen8_ggtt_bind_get_ce(ggtt, &wakeref); if (!ce) return false; @@ -419,13 +420,13 @@ queue_err_rq: offset += n_ptes; } - gen8_ggtt_bind_put_ce(ce); + gen8_ggtt_bind_put_ce(ce, wakeref); return true; err_rq: i915_request_put(rq); put_ce: - gen8_ggtt_bind_put_ce(ce); + gen8_ggtt_bind_put_ce(ce, wakeref); return false; } diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.h b/drivers/gpu/drm/i915/gt/intel_gsc.h index 7ab3ca0f9f..013c642514 100644 --- a/drivers/gpu/drm/i915/gt/intel_gsc.h +++ b/drivers/gpu/drm/i915/gt/intel_gsc.h @@ -21,8 +21,11 @@ struct mei_aux_device; /** * struct intel_gsc - graphics security controller * - * @gem_obj: scratch memory GSC operations - * @intf : gsc interface + * @intf: gsc interface + * @intf.adev: MEI aux. device for this @intf + * @intf.gem_obj: scratch memory GSC operations + * @intf.irq: IRQ for this device (%-1 for no IRQ) + * @intf.id: this interface's id number/index */ struct intel_gsc { struct intel_gsc_intf { diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index ba1186fc52..6a2c2718bc 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -451,7 +451,7 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt) spin_lock_irqsave(&uncore->lock, flags); intel_uncore_posting_read_fw(uncore, - RING_HEAD(RENDER_RING_BASE)); + RING_TAIL(RENDER_RING_BASE)); spin_unlock_irqrestore(&uncore->lock, flags); } } @@ -1024,6 +1024,12 @@ enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt, return I915_MAP_WC; } +bool intel_gt_needs_wa_16018031267(struct intel_gt *gt) +{ + /* Wa_16018031267, Wa_16018063123 */ + return IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 55), IP_VER(12, 71)); +} + bool intel_gt_needs_wa_22016122933(struct intel_gt *gt) { return MEDIA_VER_FULL(gt->i915) == IP_VER(13, 0) && gt->type == GT_MEDIA; diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 970bedf6b7..003eb93b82 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -87,8 +87,13 @@ static inline bool gt_is_root(struct intel_gt *gt) return !gt->info.id; } +bool intel_gt_needs_wa_16018031267(struct intel_gt *gt); bool intel_gt_needs_wa_22016122933(struct intel_gt *gt); +#define NEEDS_FASTCOLOR_BLT_WABB(engine) ( \ + intel_gt_needs_wa_16018031267(engine->gt) && \ + engine->class == COPY_ENGINE_CLASS && engine->instance == 0) + static inline struct intel_gt *uc_to_gt(struct intel_uc *uc) { return container_of(uc, struct intel_gt, uc); @@ -114,6 +119,11 @@ static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc) return container_of(gsc, struct intel_gt, gsc); } +static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) +{ + return guc_to_gt(guc)->i915; +} + void intel_gt_common_init_early(struct intel_gt *gt); int intel_root_gt_init_early(struct drm_i915_private *i915); int intel_gt_assign_ggtt(struct intel_gt *gt); @@ -167,6 +177,20 @@ void intel_gt_release_all(struct drm_i915_private *i915); (id__)++) \ for_each_if(((gt__) = (i915__)->gt[(id__)])) +/* Simple iterator over all initialised engines */ +#define for_each_engine(engine__, gt__, id__) \ + for ((id__) = 0; \ + (id__) < I915_NUM_ENGINES; \ + (id__)++) \ + for_each_if ((engine__) = (gt__)->engine[(id__)]) + +/* Iterator over subset of engines selected by mask */ +#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \ + for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \ + (tmp__) ? \ + ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \ + 0;) + void intel_gt_info_print(const struct intel_gt_info *info, struct drm_printer *p); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c new file mode 100644 index 0000000000..044219c596 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2024 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_gt.h" +#include "intel_gt_ccs_mode.h" +#include "intel_gt_regs.h" + +void intel_gt_apply_ccs_mode(struct intel_gt *gt) +{ + int cslice; + u32 mode = 0; + int first_ccs = __ffs(CCS_MASK(gt)); + + if (!IS_DG2(gt->i915)) + return; + + /* Build the value for the fixed CCS load balancing */ + for (cslice = 0; cslice < I915_MAX_CCS; cslice++) { + if (CCS_MASK(gt) & BIT(cslice)) + /* + * If available, assign the cslice + * to the first available engine... + */ + mode |= XEHP_CCS_MODE_CSLICE(cslice, first_ccs); + + else + /* + * ... otherwise, mark the cslice as + * unavailable if no CCS dispatches here + */ + mode |= XEHP_CCS_MODE_CSLICE(cslice, + XEHP_CCS_MODE_CSLICE_MASK); + } + + intel_uncore_write(gt->uncore, XEHP_CCS_MODE, mode); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h new file mode 100644 index 0000000000..9e5549caeb --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef __INTEL_GT_CCS_MODE_H__ +#define __INTEL_GT_CCS_MODE_H__ + +struct intel_gt; + +void intel_gt_apply_ccs_mode(struct intel_gt *gt); + +#endif /* __INTEL_GT_CCS_MODE_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c index 8f9b874fdc..3aa1d014c1 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c @@ -6,8 +6,8 @@ #include -#include "i915_drv.h" /* for_each_engine! */ #include "intel_engine.h" +#include "intel_gt.h" #include "intel_gt_debugfs.h" #include "intel_gt_engines_debugfs.h" diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c index 34913912d8..e253750a51 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c @@ -388,8 +388,7 @@ void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags) * registers. This wakeref will be released in the unlock * routine. * - * This is expected to become a formally documented/numbered - * workaround soon. + * Wa_22018931422 */ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_GT); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index f5899d503e..220ac4f92e 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -28,19 +28,20 @@ static void user_forcewake(struct intel_gt *gt, bool suspend) { int count = atomic_read(>->user_wakeref); + intel_wakeref_t wakeref; /* Inside suspend/resume so single threaded, no races to worry about. */ if (likely(!count)) return; - intel_gt_pm_get(gt); + wakeref = intel_gt_pm_get(gt); if (suspend) { GEM_BUG_ON(count > atomic_read(>->wakeref.count)); atomic_sub(count, >->wakeref.count); } else { atomic_add(count, >->wakeref.count); } - intel_gt_pm_put(gt); + intel_gt_pm_put(gt, wakeref); } static void runtime_begin(struct intel_gt *gt) @@ -138,7 +139,7 @@ void intel_gt_pm_init_early(struct intel_gt *gt) * runtime_pm is per-device rather than per-tile, so this is still the * correct structure. */ - intel_wakeref_init(>->wakeref, gt->i915, &wf_ops); + intel_wakeref_init(>->wakeref, gt->i915, &wf_ops, "GT"); seqcount_mutex_init(>->stats.lock, >->wakeref.mutex); } @@ -167,7 +168,7 @@ static void gt_sanitize(struct intel_gt *gt, bool force) enum intel_engine_id id; intel_wakeref_t wakeref; - GT_TRACE(gt, "force:%s", str_yes_no(force)); + GT_TRACE(gt, "force:%s\n", str_yes_no(force)); /* Use a raw wakeref to avoid calling intel_display_power_get early */ wakeref = intel_runtime_pm_get(gt->uncore->rpm); @@ -236,6 +237,7 @@ int intel_gt_resume(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; int err; err = intel_gt_has_unrecoverable_error(gt); @@ -252,7 +254,7 @@ int intel_gt_resume(struct intel_gt *gt) */ gt_sanitize(gt, true); - intel_gt_pm_get(gt); + wakeref = intel_gt_pm_get(gt); intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); intel_rc6_sanitize(>->rc6); @@ -295,7 +297,7 @@ int intel_gt_resume(struct intel_gt *gt) out_fw: intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); - intel_gt_pm_put(gt); + intel_gt_pm_put(gt, wakeref); intel_gt_bind_context_set_ready(gt); return err; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index b1eeb5b339..911fd01602 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -16,19 +16,28 @@ static inline bool intel_gt_pm_is_awake(const struct intel_gt *gt) return intel_wakeref_is_active(>->wakeref); } -static inline void intel_gt_pm_get(struct intel_gt *gt) +static inline void intel_gt_pm_get_untracked(struct intel_gt *gt) { intel_wakeref_get(>->wakeref); } +static inline intel_wakeref_t intel_gt_pm_get(struct intel_gt *gt) +{ + intel_gt_pm_get_untracked(gt); + return intel_wakeref_track(>->wakeref); +} + static inline void __intel_gt_pm_get(struct intel_gt *gt) { __intel_wakeref_get(>->wakeref); } -static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt) +static inline intel_wakeref_t intel_gt_pm_get_if_awake(struct intel_gt *gt) { - return intel_wakeref_get_if_active(>->wakeref); + if (!intel_wakeref_get_if_active(>->wakeref)) + return 0; + + return intel_wakeref_track(>->wakeref); } static inline void intel_gt_pm_might_get(struct intel_gt *gt) @@ -36,12 +45,18 @@ static inline void intel_gt_pm_might_get(struct intel_gt *gt) intel_wakeref_might_get(>->wakeref); } -static inline void intel_gt_pm_put(struct intel_gt *gt) +static inline void intel_gt_pm_put_untracked(struct intel_gt *gt) { intel_wakeref_put(>->wakeref); } -static inline void intel_gt_pm_put_async(struct intel_gt *gt) +static inline void intel_gt_pm_put(struct intel_gt *gt, intel_wakeref_t handle) +{ + intel_wakeref_untrack(>->wakeref, handle); + intel_gt_pm_put_untracked(gt); +} + +static inline void intel_gt_pm_put_async_untracked(struct intel_gt *gt) { intel_wakeref_put_async(>->wakeref); } @@ -51,9 +66,14 @@ static inline void intel_gt_pm_might_put(struct intel_gt *gt) intel_wakeref_might_put(>->wakeref); } -#define with_intel_gt_pm(gt, tmp) \ - for (tmp = 1, intel_gt_pm_get(gt); tmp; \ - intel_gt_pm_put(gt), tmp = 0) +static inline void intel_gt_pm_put_async(struct intel_gt *gt, intel_wakeref_t handle) +{ + intel_wakeref_untrack(>->wakeref, handle); + intel_gt_pm_put_async_untracked(gt); +} + +#define with_intel_gt_pm(gt, wf) \ + for (wf = intel_gt_pm_get(gt); wf; intel_gt_pm_put(gt, wf), wf = 0) /** * with_intel_gt_pm_if_awake - if GT is PM awake, get a reference to prevent @@ -64,7 +84,7 @@ static inline void intel_gt_pm_might_put(struct intel_gt *gt) * @wf: pointer to a temporary wakeref. */ #define with_intel_gt_pm_if_awake(gt, wf) \ - for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt), wf = 0) + for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt, wf), wf = 0) static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt) { diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c index f900cc68d6..7114c116e9 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c @@ -27,7 +27,7 @@ void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt) { atomic_inc(>->user_wakeref); - intel_gt_pm_get(gt); + intel_gt_pm_get_untracked(gt); if (GRAPHICS_VER(gt->i915) >= 6) intel_uncore_forcewake_user_get(gt->uncore); } @@ -36,7 +36,7 @@ void intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt) { if (GRAPHICS_VER(gt->i915) >= 6) intel_uncore_forcewake_user_put(gt->uncore); - intel_gt_pm_put(gt); + intel_gt_pm_put_untracked(gt); atomic_dec(>->user_wakeref); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h index eecd0a87a6..743fe35667 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h @@ -469,6 +469,9 @@ #define XEHP_PSS_MODE2 MCR_REG(0x703c) #define SCOREBOARD_STALL_FLUSH_CONTROL REG_BIT(5) +#define XEHP_PSS_CHICKEN MCR_REG(0x7044) +#define FD_END_COLLECT REG_BIT(5) + #define GEN7_SC_INSTDONE _MMIO(0x7100) #define GEN12_SC_INSTDONE_EXTRA _MMIO(0x7104) #define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108) @@ -537,6 +540,9 @@ #define XEHP_SQCM MCR_REG(0x8724) #define EN_32B_ACCESS REG_BIT(30) +#define MTL_GSCPSMI_BASEADDR_LSB _MMIO(0x880c) +#define MTL_GSCPSMI_BASEADDR_MSB _MMIO(0x8810) + #define HSW_IDICR _MMIO(0x9008) #define IDIHASHMSK(x) (((x) & 0x3f) << 16) @@ -1471,8 +1477,14 @@ #define ECOBITS_PPGTT_CACHE4B (0 << 8) #define GEN12_RCU_MODE _MMIO(0x14800) +#define XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE REG_BIT(1) #define GEN12_RCU_MODE_CCS_ENABLE REG_BIT(0) +#define XEHP_CCS_MODE _MMIO(0x14804) +#define XEHP_CCS_MODE_CSLICE_MASK REG_GENMASK(2, 0) /* CCS0-3 + rsvd */ +#define XEHP_CCS_MODE_CSLICE_WIDTH ilog2(XEHP_CCS_MODE_CSLICE_MASK + 1) +#define XEHP_CCS_MODE_CSLICE(cslice, ccs) (ccs << (cslice * XEHP_CCS_MODE_CSLICE_WIDTH)) + #define CHV_FUSE_GT _MMIO(VLV_GUNIT_BASE + 0x2168) #define CHV_FGT_DISABLE_SS0 (1 << 10) #define CHV_FGT_DISABLE_SS1 (1 << 11) diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 4fbed27ef0..86f73fe558 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -63,6 +63,9 @@ struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz) if (!IS_ERR(obj)) { obj->base.resv = i915_vm_resv_get(vm); obj->shares_resv_from = vm; + + if (vm->fpriv) + i915_drm_client_add_object(vm->fpriv->client, obj); } return obj; @@ -84,6 +87,9 @@ struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz) if (!IS_ERR(obj)) { obj->base.resv = i915_vm_resv_get(vm); obj->shares_resv_from = vm; + + if (vm->fpriv) + i915_drm_client_add_object(vm->fpriv->client, obj); } return obj; @@ -95,6 +101,16 @@ int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj) void *vaddr; type = intel_gt_coherent_map_type(vm->gt, obj, true); + /* + * FIXME: It is suspected that some Address Translation Service (ATS) + * issue on IOMMU is causing CAT errors to occur on some MTL workloads. + * Applying a write barrier to the ppgtt set entry functions appeared + * to have no effect, so we must temporarily use I915_MAP_WC here on + * MTL until a proper ATS solution is found. + */ + if (IS_METEORLAKE(vm->i915)) + type = I915_MAP_WC; + vaddr = i915_gem_object_pin_map_unlocked(obj, type); if (IS_ERR(vaddr)) return PTR_ERR(vaddr); @@ -109,6 +125,16 @@ int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object void *vaddr; type = intel_gt_coherent_map_type(vm->gt, obj, true); + /* + * FIXME: It is suspected that some Address Translation Service (ATS) + * issue on IOMMU is causing CAT errors to occur on some MTL workloads. + * Applying a write barrier to the ppgtt set entry functions appeared + * to have no effect, so we must temporarily use I915_MAP_WC here on + * MTL until a proper ATS solution is found. + */ + if (IS_METEORLAKE(vm->i915)) + type = I915_MAP_WC; + vaddr = i915_gem_object_pin_map(obj, type); if (IS_ERR(vaddr)) return PTR_ERR(vaddr); diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index b471edac26..6b85222ee3 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -249,8 +249,13 @@ struct i915_address_space { struct work_struct release_work; struct drm_mm mm; + struct { + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + } rsvd; struct intel_gt *gt; struct drm_i915_private *i915; + struct drm_i915_file_private *fpriv; struct device *dma; u64 total; /* size addr space maps (ex. 2GB for ggtt) */ u64 reserved; /* size addr space reserved */ diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index eaf66d9031..7c367ba8d9 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -828,6 +828,18 @@ lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine) return 0; } +static void +lrc_setup_bb_per_ctx(u32 *regs, + const struct intel_engine_cs *engine, + u32 ctx_bb_ggtt_addr) +{ + GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1); + regs[lrc_ring_wa_bb_per_ctx(engine) + 1] = + ctx_bb_ggtt_addr | + PER_CTX_BB_FORCE | + PER_CTX_BB_VALID; +} + static void lrc_setup_indirect_ctx(u32 *regs, const struct intel_engine_cs *engine, @@ -1020,7 +1032,13 @@ static u32 context_wa_bb_offset(const struct intel_context *ce) return PAGE_SIZE * ce->wa_bb_page; } -static u32 *context_indirect_bb(const struct intel_context *ce) +/* + * per_ctx below determines which WABB section is used. + * When true, the function returns the location of the + * PER_CTX_BB. When false, the function returns the + * location of the INDIRECT_CTX. + */ +static u32 *context_wabb(const struct intel_context *ce, bool per_ctx) { void *ptr; @@ -1029,6 +1047,7 @@ static u32 *context_indirect_bb(const struct intel_context *ce) ptr = ce->lrc_reg_state; ptr -= LRC_STATE_OFFSET; /* back to start of context image */ ptr += context_wa_bb_offset(ce); + ptr += per_ctx ? PAGE_SIZE : 0; return ptr; } @@ -1105,7 +1124,8 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine) if (GRAPHICS_VER(engine->i915) >= 12) { ce->wa_bb_page = context_size / PAGE_SIZE; - context_size += PAGE_SIZE; + /* INDIRECT_CTX and PER_CTX_BB need separate pages. */ + context_size += PAGE_SIZE * 2; } if (intel_context_is_parent(ce) && intel_engine_uses_guc(engine)) { @@ -1407,12 +1427,85 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs) return gen12_emit_aux_table_inv(ce->engine, cs); } +static u32 *xehp_emit_fastcolor_blt_wabb(const struct intel_context *ce, u32 *cs) +{ + struct intel_gt *gt = ce->engine->gt; + int mocs = gt->mocs.uc_index << 1; + + /** + * Wa_16018031267 / Wa_16018063123 requires that SW forces the + * main copy engine arbitration into round robin mode. We + * additionally need to submit the following WABB blt command + * to produce 4 subblits with each subblit generating 0 byte + * write requests as WABB: + * + * XY_FASTCOLOR_BLT + * BG0 -> 5100000E + * BG1 -> 0000003F (Dest pitch) + * BG2 -> 00000000 (X1, Y1) = (0, 0) + * BG3 -> 00040001 (X2, Y2) = (1, 4) + * BG4 -> scratch + * BG5 -> scratch + * BG6-12 -> 00000000 + * BG13 -> 20004004 (Surf. Width= 2,Surf. Height = 5 ) + * BG14 -> 00000010 (Qpitch = 4) + * BG15 -> 00000000 + */ + *cs++ = XY_FAST_COLOR_BLT_CMD | (16 - 2); + *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) | 0x3f; + *cs++ = 0; + *cs++ = 4 << 16 | 1; + *cs++ = lower_32_bits(i915_vma_offset(ce->vm->rsvd.vma)); + *cs++ = upper_32_bits(i915_vma_offset(ce->vm->rsvd.vma)); + *cs++ = 0; + *cs++ = 0; + *cs++ = 0; + *cs++ = 0; + *cs++ = 0; + *cs++ = 0; + *cs++ = 0; + *cs++ = 0x20004004; + *cs++ = 0x10; + *cs++ = 0; + + return cs; +} + +static u32 * +xehp_emit_per_ctx_bb(const struct intel_context *ce, u32 *cs) +{ + /* Wa_16018031267, Wa_16018063123 */ + if (NEEDS_FASTCOLOR_BLT_WABB(ce->engine)) + cs = xehp_emit_fastcolor_blt_wabb(ce, cs); + + return cs; +} + +static void +setup_per_ctx_bb(const struct intel_context *ce, + const struct intel_engine_cs *engine, + u32 *(*emit)(const struct intel_context *, u32 *)) +{ + /* Place PER_CTX_BB on next page after INDIRECT_CTX */ + u32 * const start = context_wabb(ce, true); + u32 *cs; + + cs = emit(ce, start); + + /* PER_CTX_BB must manually terminate */ + *cs++ = MI_BATCH_BUFFER_END; + + GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs)); + lrc_setup_bb_per_ctx(ce->lrc_reg_state, engine, + lrc_indirect_bb(ce) + PAGE_SIZE); +} + static void setup_indirect_ctx_bb(const struct intel_context *ce, const struct intel_engine_cs *engine, u32 *(*emit)(const struct intel_context *, u32 *)) { - u32 * const start = context_indirect_bb(ce); + u32 * const start = context_wabb(ce, false); u32 *cs; cs = emit(ce, start); @@ -1511,6 +1604,7 @@ u32 lrc_update_regs(const struct intel_context *ce, /* Mutually exclusive wrt to global indirect bb */ GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size); setup_indirect_ctx_bb(ce, engine, fn); + setup_per_ctx_bb(ce, engine, xehp_emit_per_ctx_bb); } return lrc_descriptor(ce) | CTX_DESC_FORCE_RESTORE; diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index f602895f6d..6a3246240e 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -849,13 +849,12 @@ void intel_sseu_print_topology(struct drm_i915_private *i915, const struct sseu_dev_info *sseu, struct drm_printer *p) { - if (sseu->max_slices == 0) { + if (sseu->max_slices == 0) drm_printf(p, "Unavailable\n"); - } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { + else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) sseu_print_xehp_topology(sseu, p); - } else { + else sseu_print_hsw_topology(sseu, p); - } } void intel_sseu_print_ss_info(const char *type, diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 192ac0e59a..59816dd6fb 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -10,6 +10,7 @@ #include "intel_engine_regs.h" #include "intel_gpu_commands.h" #include "intel_gt.h" +#include "intel_gt_ccs_mode.h" #include "intel_gt_mcr.h" #include "intel_gt_print.h" #include "intel_gt_regs.h" @@ -51,7 +52,8 @@ * registers belonging to BCS, VCS or VECS should be implemented in * xcs_engine_wa_init(). Workarounds for registers not belonging to a specific * engine's MMIO range but that are part of of the common RCS/CCS reset domain - * should be implemented in general_render_compute_wa_init(). + * should be implemented in general_render_compute_wa_init(). The settings + * about the CCS load balancing should be added in ccs_engine_wa_mode(). * * - GT workarounds: the list of these WAs is applied whenever these registers * revert to their default values: on GPU reset, suspend/resume [1]_, etc. @@ -777,6 +779,9 @@ static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine, /* Wa_18019271663:dg2 */ wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE); + + /* Wa_14019877138:dg2 */ + wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT); } static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine, @@ -786,8 +791,13 @@ static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine, dg2_ctx_gt_tuning_init(engine, wal); - if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) || - IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER)) + /* + * Due to Wa_16014892111, the DRAW_WATERMARK tuning must be done in + * gen12_emit_indirect_ctx_rcs() rather than here on some early + * steppings. + */ + if (!(IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) || + IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))) wa_add(wal, DRAW_WATERMARK, VERT_WM_VAL, 0x3FF, 0, false); } @@ -905,7 +915,7 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, if (engine->class != RENDER_CLASS) goto done; - if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71))) + if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74))) xelpg_ctx_workarounds_init(engine, wal); else if (IS_PONTEVECCHIO(i915)) ; /* noop; none at this time */ @@ -1640,7 +1650,8 @@ pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) static void xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { - /* Wa_14018778641 / Wa_18018781329 */ + /* Wa_14018575942 / Wa_18018781329 */ + wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB); wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB); /* Wa_22016670082 */ @@ -1662,9 +1673,23 @@ xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) debug_dump_steering(gt); } +static void +wa_16021867713(struct intel_gt *gt, struct i915_wa_list *wal) +{ + struct intel_engine_cs *engine; + int id; + + for_each_engine(engine, gt, id) + if (engine->class == VIDEO_DECODE_CLASS) + wa_write_or(wal, VDBOX_CGCTL3F1C(engine->mmio_base), + MFXPIPE_CLKGATE_DIS); +} + static void xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { + wa_16021867713(gt, wal); + /* * Wa_14018778641 * Wa_18018781329 @@ -1674,6 +1699,9 @@ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) */ wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB); + /* Wa_22016670082 */ + wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE); + debug_dump_steering(gt); } @@ -1690,7 +1718,7 @@ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) */ static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal) { - if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) { + if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) { wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS); wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS); } @@ -1723,7 +1751,7 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal) return; } - if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) + if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) xelpg_gt_workarounds_init(gt, wal); else if (IS_PONTEVECCHIO(i915)) pvc_gt_workarounds_init(gt, wal); @@ -2196,7 +2224,7 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine) if (engine->gt->type == GT_MEDIA) ; /* none yet */ - else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71))) + else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74))) xelpg_whitelist_build(engine); else if (IS_PONTEVECCHIO(i915)) pvc_whitelist_build(engine); @@ -2340,14 +2368,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) 0, true); } - if (IS_DG2_G11(i915) || IS_DG2_G10(i915)) { - /* Wa_22014600077:dg2 */ - wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0, - _MASKED_BIT_ENABLE(ENABLE_EU_COUNT_FOR_TDL_FLUSH), - 0 /* Wa_14012342262 write-only reg, so skip verification */, - true); - } - if (IS_DG2(i915) || IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { /* @@ -2782,6 +2802,11 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) RING_SEMA_WAIT_POLL(engine->mmio_base), 1); } + /* Wa_16018031267, Wa_16018063123 */ + if (NEEDS_FASTCOLOR_BLT_WABB(engine)) + wa_masked_field_set(wal, ECOSKPD(engine->mmio_base), + XEHP_BLITTER_SCHEDULING_MODE_MASK, + XEHP_BLITTER_ROUND_ROBIN_MODE); } static void @@ -2811,7 +2836,7 @@ add_render_compute_tuning_settings(struct intel_gt *gt, { struct drm_i915_private *i915 = gt->i915; - if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) || IS_DG2(i915)) + if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915)) wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512); /* @@ -2827,6 +2852,28 @@ add_render_compute_tuning_settings(struct intel_gt *gt, wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC); } +static void ccs_engine_wa_mode(struct intel_engine_cs *engine, struct i915_wa_list *wal) +{ + struct intel_gt *gt = engine->gt; + + if (!IS_DG2(gt->i915)) + return; + + /* + * Wa_14019159160: This workaround, along with others, leads to + * significant challenges in utilizing load balancing among the + * CCS slices. Consequently, an architectural decision has been + * made to completely disable automatic CCS load balancing. + */ + wa_masked_en(wal, GEN12_RCU_MODE, XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE); + + /* + * After having disabled automatic load balancing we need to + * assign all slices to a single CCS. We will call it CCS mode 1 + */ + intel_gt_apply_ccs_mode(gt); +} + /* * The workarounds in this function apply to shared registers in * the general render reset domain that aren't tied to a @@ -2864,7 +2911,8 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li } if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) || - IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER)) + IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER) || + IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 74), IP_VER(12, 74))) /* Wa_14017856879 */ wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH); @@ -2915,6 +2963,9 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li * Wa_22015475538:dg2 */ wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8); + + /* Wa_18028616096 */ + wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3); } if (IS_DG2_G11(i915)) { @@ -2943,11 +2994,6 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li true); } - if (IS_DG2_G10(i915) || IS_DG2_G12(i915)) { - /* Wa_18028616096 */ - wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3); - } - if (IS_XEHPSDV(i915)) { /* Wa_1409954639 */ wa_mcr_masked_en(wal, @@ -2978,8 +3024,10 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal * to a single RCS/CCS engine's workaround list since * they're reset as part of the general render domain reset. */ - if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) + if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) { general_render_compute_wa_init(engine, wal); + ccs_engine_wa_mode(engine, wal); + } if (engine->class == COMPUTE_CLASS) ccs_engine_wa_init(engine, wal); diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c index 86cecf7a11..5ffa5e30f4 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c @@ -21,20 +21,22 @@ static int cmp_u32(const void *A, const void *B) return *a - *b; } -static void perf_begin(struct intel_gt *gt) +static intel_wakeref_t perf_begin(struct intel_gt *gt) { - intel_gt_pm_get(gt); + intel_wakeref_t wakeref = intel_gt_pm_get(gt); /* Boost gpufreq to max [waitboost] and keep it fixed */ atomic_inc(>->rps.num_waiters); queue_work(gt->i915->unordered_wq, >->rps.work); flush_work(>->rps.work); + + return wakeref; } -static int perf_end(struct intel_gt *gt) +static int perf_end(struct intel_gt *gt, intel_wakeref_t wakeref) { atomic_dec(>->rps.num_waiters); - intel_gt_pm_put(gt); + intel_gt_pm_put(gt, wakeref); return igt_flush_test(gt->i915); } @@ -133,12 +135,13 @@ static int perf_mi_bb_start(void *arg) struct intel_gt *gt = arg; struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; int err = 0; if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */ return 0; - perf_begin(gt); + wakeref = perf_begin(gt); for_each_engine(engine, gt, id) { struct intel_context *ce = engine->kernel_context; struct i915_vma *batch; @@ -207,7 +210,7 @@ out: pr_info("%s: MI_BB_START cycles: %u\n", engine->name, trifilter(cycles)); } - if (perf_end(gt)) + if (perf_end(gt, wakeref)) err = -EIO; return err; @@ -260,12 +263,13 @@ static int perf_mi_noop(void *arg) struct intel_gt *gt = arg; struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; int err = 0; if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */ return 0; - perf_begin(gt); + wakeref = perf_begin(gt); for_each_engine(engine, gt, id) { struct intel_context *ce = engine->kernel_context; struct i915_vma *base, *nop; @@ -364,7 +368,7 @@ out: pr_info("%s: 16K MI_NOOP cycles: %u\n", engine->name, trifilter(cycles)); } - if (perf_end(gt)) + if (perf_end(gt, wakeref)) err = -EIO; return err; diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c index 273d440a53..bc441ce7b3 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c @@ -84,7 +84,7 @@ static struct pulse *pulse_create(void) static void pulse_unlock_wait(struct pulse *p) { - i915_active_unlock_wait(&p->active); + wait_var_event_timeout(&p->active, i915_active_is_idle(&p->active), HZ); } static int __live_idle_pulse(struct intel_engine_cs *engine, diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c index 0971241707..33351deeea 100644 --- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c @@ -81,6 +81,7 @@ static int live_gt_clocks(void *arg) struct intel_gt *gt = arg; struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; int err = 0; if (!gt->clock_frequency) { /* unknown */ @@ -91,7 +92,7 @@ static int live_gt_clocks(void *arg) if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */ return 0; - intel_gt_pm_get(gt); + wakeref = intel_gt_pm_get(gt); intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); for_each_engine(engine, gt, id) { @@ -128,7 +129,7 @@ static int live_gt_clocks(void *arg) } intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); - intel_gt_pm_put(gt); + intel_gt_pm_put(gt, wakeref); return err; } diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 5f826b6dcf..e17b8777d2 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -1555,7 +1555,7 @@ static int live_lrc_isolation(void *arg) return err; } -static int indirect_ctx_submit_req(struct intel_context *ce) +static int wabb_ctx_submit_req(struct intel_context *ce) { struct i915_request *rq; int err = 0; @@ -1579,7 +1579,8 @@ static int indirect_ctx_submit_req(struct intel_context *ce) #define CTX_BB_CANARY_INDEX (CTX_BB_CANARY_OFFSET / sizeof(u32)) static u32 * -emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs) +emit_wabb_ctx_canary(const struct intel_context *ce, + u32 *cs, bool per_ctx) { *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT | @@ -1587,26 +1588,43 @@ emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs) *cs++ = i915_mmio_reg_offset(RING_START(0)); *cs++ = i915_ggtt_offset(ce->state) + context_wa_bb_offset(ce) + - CTX_BB_CANARY_OFFSET; + CTX_BB_CANARY_OFFSET + + (per_ctx ? PAGE_SIZE : 0); *cs++ = 0; return cs; } +static u32 * +emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs) +{ + return emit_wabb_ctx_canary(ce, cs, false); +} + +static u32 * +emit_per_ctx_bb_canary(const struct intel_context *ce, u32 *cs) +{ + return emit_wabb_ctx_canary(ce, cs, true); +} + static void -indirect_ctx_bb_setup(struct intel_context *ce) +wabb_ctx_setup(struct intel_context *ce, bool per_ctx) { - u32 *cs = context_indirect_bb(ce); + u32 *cs = context_wabb(ce, per_ctx); cs[CTX_BB_CANARY_INDEX] = 0xdeadf00d; - setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary); + if (per_ctx) + setup_per_ctx_bb(ce, ce->engine, emit_per_ctx_bb_canary); + else + setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary); } -static bool check_ring_start(struct intel_context *ce) +static bool check_ring_start(struct intel_context *ce, bool per_ctx) { const u32 * const ctx_bb = (void *)(ce->lrc_reg_state) - - LRC_STATE_OFFSET + context_wa_bb_offset(ce); + LRC_STATE_OFFSET + context_wa_bb_offset(ce) + + (per_ctx ? PAGE_SIZE : 0); if (ctx_bb[CTX_BB_CANARY_INDEX] == ce->lrc_reg_state[CTX_RING_START]) return true; @@ -1618,21 +1636,21 @@ static bool check_ring_start(struct intel_context *ce) return false; } -static int indirect_ctx_bb_check(struct intel_context *ce) +static int wabb_ctx_check(struct intel_context *ce, bool per_ctx) { int err; - err = indirect_ctx_submit_req(ce); + err = wabb_ctx_submit_req(ce); if (err) return err; - if (!check_ring_start(ce)) + if (!check_ring_start(ce, per_ctx)) return -EINVAL; return 0; } -static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine) +static int __lrc_wabb_ctx(struct intel_engine_cs *engine, bool per_ctx) { struct intel_context *a, *b; int err; @@ -1667,14 +1685,14 @@ static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine) * As ring start is restored apriori of starting the indirect ctx bb and * as it will be different for each context, it fits to this purpose. */ - indirect_ctx_bb_setup(a); - indirect_ctx_bb_setup(b); + wabb_ctx_setup(a, per_ctx); + wabb_ctx_setup(b, per_ctx); - err = indirect_ctx_bb_check(a); + err = wabb_ctx_check(a, per_ctx); if (err) goto unpin_b; - err = indirect_ctx_bb_check(b); + err = wabb_ctx_check(b, per_ctx); unpin_b: intel_context_unpin(b); @@ -1688,7 +1706,7 @@ put_a: return err; } -static int live_lrc_indirect_ctx_bb(void *arg) +static int lrc_wabb_ctx(void *arg, bool per_ctx) { struct intel_gt *gt = arg; struct intel_engine_cs *engine; @@ -1697,7 +1715,7 @@ static int live_lrc_indirect_ctx_bb(void *arg) for_each_engine(engine, gt, id) { intel_engine_pm_get(engine); - err = __live_lrc_indirect_ctx_bb(engine); + err = __lrc_wabb_ctx(engine, per_ctx); intel_engine_pm_put(engine); if (igt_flush_test(gt->i915)) @@ -1710,6 +1728,16 @@ static int live_lrc_indirect_ctx_bb(void *arg) return err; } +static int live_lrc_indirect_ctx_bb(void *arg) +{ + return lrc_wabb_ctx(arg, false); +} + +static int live_lrc_per_ctx_bb(void *arg) +{ + return lrc_wabb_ctx(arg, true); +} + static void garbage_reset(struct intel_engine_cs *engine, struct i915_request *rq) { @@ -1947,6 +1975,7 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915) SUBTEST(live_lrc_garbage), SUBTEST(live_pphwsp_runtime), SUBTEST(live_lrc_indirect_ctx_bb), + SUBTEST(live_lrc_per_ctx_bb), }; if (!HAS_LOGICAL_RING_CONTEXTS(i915)) diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c index 79aa6ac66a..f40de408cd 100644 --- a/drivers/gpu/drm/i915/gt/selftest_reset.c +++ b/drivers/gpu/drm/i915/gt/selftest_reset.c @@ -261,11 +261,12 @@ static int igt_atomic_reset(void *arg) { struct intel_gt *gt = arg; const typeof(*igt_atomic_phases) *p; + intel_wakeref_t wakeref; int err = 0; /* Check that the resets are usable from atomic context */ - intel_gt_pm_get(gt); + wakeref = intel_gt_pm_get(gt); igt_global_reset_lock(gt); /* Flush any requests before we get started and check basics */ @@ -296,7 +297,7 @@ static int igt_atomic_reset(void *arg) unlock: igt_global_reset_unlock(gt); - intel_gt_pm_put(gt); + intel_gt_pm_put(gt, wakeref); return err; } @@ -307,6 +308,7 @@ static int igt_atomic_engine_reset(void *arg) const typeof(*igt_atomic_phases) *p; struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; int err = 0; /* Check that the resets are usable from atomic context */ @@ -317,7 +319,7 @@ static int igt_atomic_engine_reset(void *arg) if (intel_uc_uses_guc_submission(>->uc)) return 0; - intel_gt_pm_get(gt); + wakeref = intel_gt_pm_get(gt); igt_global_reset_lock(gt); /* Flush any requests before we get started and check basics */ @@ -365,7 +367,7 @@ static int igt_atomic_engine_reset(void *arg) out_unlock: igt_global_reset_unlock(gt); - intel_gt_pm_put(gt); + intel_gt_pm_put(gt, wakeref); return err; } diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index fb30f733b0..dcef8d4989 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -224,6 +224,7 @@ int live_rps_clock_interval(void *arg) struct intel_engine_cs *engine; enum intel_engine_id id; struct igt_spinner spin; + intel_wakeref_t wakeref; int err = 0; if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6) @@ -236,7 +237,7 @@ int live_rps_clock_interval(void *arg) saved_work = rps->work.func; rps->work.func = dummy_rps_work; - intel_gt_pm_get(gt); + wakeref = intel_gt_pm_get(gt); intel_rps_disable(>->rps); intel_gt_check_clock_frequency(gt); @@ -355,7 +356,7 @@ int live_rps_clock_interval(void *arg) } intel_rps_enable(>->rps); - intel_gt_pm_put(gt); + intel_gt_pm_put(gt, wakeref); igt_spinner_fini(&spin); @@ -376,6 +377,7 @@ int live_rps_control(void *arg) struct intel_engine_cs *engine; enum intel_engine_id id; struct igt_spinner spin; + intel_wakeref_t wakeref; int err = 0; /* @@ -398,7 +400,7 @@ int live_rps_control(void *arg) saved_work = rps->work.func; rps->work.func = dummy_rps_work; - intel_gt_pm_get(gt); + wakeref = intel_gt_pm_get(gt); for_each_engine(engine, gt, id) { struct i915_request *rq; ktime_t min_dt, max_dt; @@ -488,7 +490,7 @@ int live_rps_control(void *arg) break; } } - intel_gt_pm_put(gt); + intel_gt_pm_put(gt, wakeref); igt_spinner_fini(&spin); @@ -1023,6 +1025,7 @@ int live_rps_interrupt(void *arg) struct intel_engine_cs *engine; enum intel_engine_id id; struct igt_spinner spin; + intel_wakeref_t wakeref; u32 pm_events; int err = 0; @@ -1033,9 +1036,9 @@ int live_rps_interrupt(void *arg) if (!intel_rps_has_interrupts(rps) || GRAPHICS_VER(gt->i915) < 6) return 0; - intel_gt_pm_get(gt); - pm_events = rps->pm_events; - intel_gt_pm_put(gt); + pm_events = 0; + with_intel_gt_pm(gt, wakeref) + pm_events = rps->pm_events; if (!pm_events) { pr_err("No RPS PM events registered, but RPS is enabled?\n"); return -ENODEV; diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c index 952c8d52d6..302d054029 100644 --- a/drivers/gpu/drm/i915/gt/selftest_slpc.c +++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c @@ -266,6 +266,7 @@ static int run_test(struct intel_gt *gt, int test_type) struct intel_rps *rps = >->rps; struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; struct igt_spinner spin; u32 slpc_min_freq, slpc_max_freq; int err = 0; @@ -311,7 +312,7 @@ static int run_test(struct intel_gt *gt, int test_type) } intel_gt_pm_wait_for_idle(gt); - intel_gt_pm_get(gt); + wakeref = intel_gt_pm_get(gt); for_each_engine(engine, gt, id) { struct i915_request *rq; u32 max_act_freq; @@ -397,7 +398,7 @@ static int run_test(struct intel_gt *gt, int test_type) if (igt_flush_test(gt->i915)) err = -EIO; - intel_gt_pm_put(gt); + intel_gt_pm_put(gt, wakeref); igt_spinner_fini(&spin); intel_gt_pm_wait_for_idle(gt); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c index 5f138de3c1..40817ebcca 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c @@ -322,6 +322,7 @@ static int i915_gsc_proxy_component_bind(struct device *i915_kdev, gsc->proxy.component = data; gsc->proxy.component->mei_dev = mei_kdev; mutex_unlock(&gsc->proxy.mutex); + gt_dbg(gt, "GSC proxy mei component bound\n"); return 0; } @@ -342,6 +343,7 @@ static void i915_gsc_proxy_component_unbind(struct device *i915_kdev, with_intel_runtime_pm(&i915->runtime_pm, wakeref) intel_uncore_rmw(gt->uncore, HECI_H_CSR(MTL_GSC_HECI2_BASE), HECI_H_CSR_IE | HECI_H_CSR_RST, 0); + gt_dbg(gt, "GSC proxy mei component unbound\n"); } static const struct component_ops i915_gsc_proxy_component_ops = { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 3f3df1166b..2b450c43bb 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -330,7 +330,7 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc) static u32 guc_ctl_devid(struct intel_guc *guc) { - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + struct drm_i915_private *i915 = guc_to_i915(guc); return (INTEL_DEVID(i915) << 16) | INTEL_REVID(i915); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 2b6dfe62c8..813cc888e6 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -105,61 +105,67 @@ struct intel_guc { */ struct { /** - * @lock: protects everything in submission_state, - * ce->guc_id.id, and ce->guc_id.ref when transitioning in and - * out of zero + * @submission_state.lock: protects everything in + * submission_state, ce->guc_id.id, and ce->guc_id.ref + * when transitioning in and out of zero */ spinlock_t lock; /** - * @guc_ids: used to allocate new guc_ids, single-lrc + * @submission_state.guc_ids: used to allocate new + * guc_ids, single-lrc */ struct ida guc_ids; /** - * @num_guc_ids: Number of guc_ids, selftest feature to be able - * to reduce this number while testing. + * @submission_state.num_guc_ids: Number of guc_ids, selftest + * feature to be able to reduce this number while testing. */ int num_guc_ids; /** - * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc + * @submission_state.guc_ids_bitmap: used to allocate + * new guc_ids, multi-lrc */ unsigned long *guc_ids_bitmap; /** - * @guc_id_list: list of intel_context with valid guc_ids but no - * refs + * @submission_state.guc_id_list: list of intel_context + * with valid guc_ids but no refs */ struct list_head guc_id_list; /** - * @guc_ids_in_use: Number single-lrc guc_ids in use + * @submission_state.guc_ids_in_use: Number single-lrc + * guc_ids in use */ unsigned int guc_ids_in_use; /** - * @destroyed_contexts: list of contexts waiting to be destroyed - * (deregistered with the GuC) + * @submission_state.destroyed_contexts: list of contexts + * waiting to be destroyed (deregistered with the GuC) */ struct list_head destroyed_contexts; /** - * @destroyed_worker: worker to deregister contexts, need as we - * need to take a GT PM reference and can't from destroy - * function as it might be in an atomic context (no sleeping) + * @submission_state.destroyed_worker: worker to deregister + * contexts, need as we need to take a GT PM reference and + * can't from destroy function as it might be in an atomic + * context (no sleeping) */ struct work_struct destroyed_worker; /** - * @reset_fail_worker: worker to trigger a GT reset after an - * engine reset fails + * @submission_state.reset_fail_worker: worker to trigger + * a GT reset after an engine reset fails */ struct work_struct reset_fail_worker; /** - * @reset_fail_mask: mask of engines that failed to reset + * @submission_state.reset_fail_mask: mask of engines that + * failed to reset */ intel_engine_mask_t reset_fail_mask; /** - * @sched_disable_delay_ms: schedule disable delay, in ms, for - * contexts + * @submission_state.sched_disable_delay_ms: schedule + * disable delay, in ms, for contexts */ unsigned int sched_disable_delay_ms; /** - * @sched_disable_gucid_threshold: threshold of min remaining available - * guc_ids before we start bypassing the schedule disable delay + * @submission_state.sched_disable_gucid_threshold: + * threshold of min remaining available guc_ids before + * we start bypassing the schedule disable delay */ unsigned int sched_disable_gucid_threshold; } submission_state; @@ -243,37 +249,40 @@ struct intel_guc { */ struct { /** - * @lock: Lock protecting the below fields and the engine stats. + * @timestamp.lock: Lock protecting the below fields and + * the engine stats. */ spinlock_t lock; /** - * @gt_stamp: 64 bit extended value of the GT timestamp. + * @timestamp.gt_stamp: 64-bit extended value of the GT + * timestamp. */ u64 gt_stamp; /** - * @ping_delay: Period for polling the GT timestamp for - * overflow. + * @timestamp.ping_delay: Period for polling the GT + * timestamp for overflow. */ unsigned long ping_delay; /** - * @work: Periodic work to adjust GT timestamp, engine and - * context usage for overflows. + * @timestamp.work: Periodic work to adjust GT timestamp, + * engine and context usage for overflows. */ struct delayed_work work; /** - * @shift: Right shift value for the gpm timestamp + * @timestamp.shift: Right shift value for the gpm timestamp */ u32 shift; /** - * @last_stat_jiffies: jiffies at last actual stats collection time - * We use this timestamp to ensure we don't oversample the - * stats because runtime power management events can trigger - * stats collection at much higher rates than required. + * @timestamp.last_stat_jiffies: jiffies at last actual + * stats collection time. We use this timestamp to ensure + * we don't oversample the stats because runtime power + * management events can trigger stats collection at much + * higher rates than required. */ unsigned long last_stat_jiffies; } timestamp; @@ -297,6 +306,10 @@ struct intel_guc { * @number_guc_id_stolen: The number of guc_ids that have been stolen */ int number_guc_id_stolen; + /** + * @fast_response_selftest: Backdoor to CT handler for fast response selftest + */ + u32 fast_response_selftest; #endif }; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c index a4da0208c8..a1cd40d805 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c @@ -355,7 +355,7 @@ guc_capture_alloc_steered_lists(struct intel_guc *guc, static const struct __guc_mmio_reg_descr_group * guc_capture_get_device_reglist(struct intel_guc *guc) { - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + struct drm_i915_private *i915 = guc_to_i915(guc); const struct __guc_mmio_reg_descr_group *lists; if (GRAPHICS_VER(i915) >= 12) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index 89e314b375..0d5197c082 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -265,7 +265,7 @@ int intel_guc_ct_init(struct intel_guc_ct *ct) u32 *cmds; int err; - err = i915_inject_probe_error(guc_to_gt(guc)->i915, -ENXIO); + err = i915_inject_probe_error(guc_to_i915(guc), -ENXIO); if (err) return err; @@ -1076,6 +1076,15 @@ static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *r found = true; break; } + +#ifdef CONFIG_DRM_I915_SELFTEST + if (!found && ct_to_guc(ct)->fast_response_selftest) { + CT_DEBUG(ct, "Assuming unsolicited response due to FAST_REQUEST selftest\n"); + ct_to_guc(ct)->fast_response_selftest++; + found = true; + } +#endif + if (!found) { CT_ERROR(ct, "Unsolicited response message: len %u, data %#x (fence %u, last %u)\n", len, hxg[0], fence, ct->requests.last_fence); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c index 55bc8b55fb..bf16351c93 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -520,7 +520,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log) static int guc_log_relay_create(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + struct drm_i915_private *i915 = guc_to_i915(guc); struct rchan *guc_log_relay_chan; size_t n_subbufs, subbuf_size; int ret; @@ -573,7 +573,7 @@ static void guc_log_relay_destroy(struct intel_guc_log *log) static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + struct drm_i915_private *i915 = guc_to_i915(guc); intel_wakeref_t wakeref; _guc_log_copy_debuglogs_for_relay(log); @@ -589,7 +589,7 @@ static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log) static u32 __get_default_log_level(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + struct drm_i915_private *i915 = guc_to_i915(guc); /* A negative value means "use platform/config default" */ if (i915->params.guc_log_level < 0) { @@ -664,7 +664,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log) int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) { struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + struct drm_i915_private *i915 = guc_to_i915(guc); intel_wakeref_t wakeref; int ret = 0; @@ -796,7 +796,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log) static void guc_log_relay_stop(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + struct drm_i915_private *i915 = guc_to_i915(guc); if (!log->relay.started) return; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c index 1adec6de22..9df7927304 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c @@ -14,7 +14,7 @@ static bool __guc_rc_supported(struct intel_guc *guc) { /* GuC RC is unavailable for pre-Gen12 */ return guc->submission_supported && - GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12; + GRAPHICS_VER(guc_to_i915(guc)) >= 12; } static bool __guc_rc_selected(struct intel_guc *guc) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index 2dfb07cc4b..3e681ab6fb 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -34,7 +34,7 @@ static bool __detect_slpc_supported(struct intel_guc *guc) { /* GuC SLPC is unavailable for pre-Gen12 */ return guc->submission_supported && - GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12; + GRAPHICS_VER(guc_to_i915(guc)) >= 12; } static bool __guc_slpc_selected(struct intel_guc *guc) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 17df71117c..a259f1118c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1107,7 +1107,7 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) if (deregister) guc_signal_context_fence(ce); if (destroyed) { - intel_gt_pm_put_async(guc_to_gt(guc)); + intel_gt_pm_put_async_untracked(guc_to_gt(guc)); release_guc_id(guc, ce); __guc_context_destroy(ce); } @@ -1303,6 +1303,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now) unsigned long flags; u32 reset_count; bool in_reset; + intel_wakeref_t wakeref; spin_lock_irqsave(&guc->timestamp.lock, flags); @@ -1325,7 +1326,8 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now) * start_gt_clk is derived from GuC state. To get a consistent * view of activity, we query the GuC state only if gt is awake. */ - if (!in_reset && intel_gt_pm_get_if_awake(gt)) { + wakeref = in_reset ? 0 : intel_gt_pm_get_if_awake(gt); + if (wakeref) { stats_saved = *stats; gt_stamp_saved = guc->timestamp.gt_stamp; /* @@ -1334,7 +1336,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now) */ guc_update_engine_gt_clks(engine); guc_update_pm_timestamp(guc, now); - intel_gt_pm_put_async(gt); + intel_gt_pm_put_async(gt, wakeref); if (i915_reset_count(gpu_error) != reset_count) { *stats = stats_saved; guc->timestamp.gt_stamp = gt_stamp_saved; @@ -3385,9 +3387,9 @@ static void destroyed_worker_func(struct work_struct *w) struct intel_guc *guc = container_of(w, struct intel_guc, submission_state.destroyed_worker); struct intel_gt *gt = guc_to_gt(guc); - int tmp; + intel_wakeref_t wakeref; - with_intel_gt_pm(gt, tmp) + with_intel_gt_pm(gt, wakeref) deregister_destroyed_contexts(guc); } @@ -4624,12 +4626,12 @@ static bool __guc_submission_supported(struct intel_guc *guc) { /* GuC submission is unavailable for pre-Gen11 */ return intel_guc_is_supported(guc) && - GRAPHICS_VER(guc_to_gt(guc)->i915) >= 11; + GRAPHICS_VER(guc_to_i915(guc)) >= 11; } static bool __guc_submission_selected(struct intel_guc *guc) { - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + struct drm_i915_private *i915 = guc_to_i915(guc); if (!intel_guc_submission_is_supported(guc)) return false; @@ -4894,7 +4896,7 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc, intel_context_put(ce); } else if (context_destroyed(ce)) { /* Context has been destroyed */ - intel_gt_pm_put_async(guc_to_gt(guc)); + intel_gt_pm_put_async_untracked(guc_to_gt(guc)); release_guc_id(guc, ce); __guc_context_destroy(ce); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 27f6561dd7..3872d309ed 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -106,11 +106,6 @@ static void __confirm_options(struct intel_uc *uc) gt_info(gt, "Incompatible option enable_guc=%d - %s\n", i915->params.enable_guc, "GuC is not supported!"); - if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC && - !intel_uc_supports_huc(uc)) - gt_info(gt, "Incompatible option enable_guc=%d - %s\n", - i915->params.enable_guc, "HuC is not supported!"); - if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION && !intel_uc_supports_guc_submission(uc)) gt_info(gt, "Incompatible option enable_guc=%d - %s\n", diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 362639162e..756093eaf2 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -1343,16 +1343,13 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len) for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) { u32 len = min_t(u32, size, PAGE_SIZE - offset); - void *vaddr; if (idx > 0) { idx--; continue; } - vaddr = kmap_atomic(page); - memcpy(dst, vaddr + offset, len); - kunmap_atomic(vaddr); + memcpy_from_page(dst, page, offset, len); offset = 0; dst += len; diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c index bfb7214356..c900aac85a 100644 --- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c @@ -286,11 +286,126 @@ err_wakeref: return ret; } +/* + * Send a context schedule H2G message with an invalid context id. + * This should generate a GUC_RESULT_INVALID_CONTEXT response. + */ +static int bad_h2g(struct intel_guc *guc) +{ + u32 action[] = { + INTEL_GUC_ACTION_SCHED_CONTEXT, + 0x12345678, + }; + + return intel_guc_send_nb(guc, action, ARRAY_SIZE(action), 0); +} + +/* + * Set a spinner running to make sure the system is alive and active, + * then send a bad but asynchronous H2G command and wait to see if an + * error response is returned. If no response is received or if the + * spinner dies then the test will fail. + */ +#define FAST_RESPONSE_TIMEOUT_MS 1000 +static int intel_guc_fast_request(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_context *ce; + struct igt_spinner spin; + struct i915_request *rq; + intel_wakeref_t wakeref; + struct intel_engine_cs *engine = intel_selftest_find_any_engine(gt); + bool spinning = false; + int ret = 0; + + if (!engine) + return 0; + + wakeref = intel_runtime_pm_get(gt->uncore->rpm); + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + ret = PTR_ERR(ce); + gt_err(gt, "Failed to create spinner request: %pe\n", ce); + goto err_pm; + } + + ret = igt_spinner_init(&spin, engine->gt); + if (ret) { + gt_err(gt, "Failed to create spinner: %pe\n", ERR_PTR(ret)); + goto err_pm; + } + spinning = true; + + rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); + intel_context_put(ce); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + gt_err(gt, "Failed to create spinner request: %pe\n", rq); + goto err_spin; + } + + ret = request_add_spin(rq, &spin); + if (ret) { + gt_err(gt, "Failed to add Spinner request: %pe\n", ERR_PTR(ret)); + goto err_rq; + } + + gt->uc.guc.fast_response_selftest = 1; + + ret = bad_h2g(>->uc.guc); + if (ret) { + gt_err(gt, "Failed to send H2G: %pe\n", ERR_PTR(ret)); + goto err_rq; + } + + ret = wait_for(gt->uc.guc.fast_response_selftest != 1 || i915_request_completed(rq), + FAST_RESPONSE_TIMEOUT_MS); + if (ret) { + gt_err(gt, "Request wait failed: %pe\n", ERR_PTR(ret)); + goto err_rq; + } + + if (i915_request_completed(rq)) { + gt_err(gt, "Spinner died waiting for fast request error!\n"); + ret = -EIO; + goto err_rq; + } + + if (gt->uc.guc.fast_response_selftest != 2) { + gt_err(gt, "Unexpected fast response count: %d\n", + gt->uc.guc.fast_response_selftest); + goto err_rq; + } + + igt_spinner_end(&spin); + spinning = false; + + ret = intel_selftest_wait_for_rq(rq); + if (ret) { + gt_err(gt, "Request failed to complete: %pe\n", ERR_PTR(ret)); + goto err_rq; + } + +err_rq: + i915_request_put(rq); + +err_spin: + if (spinning) + igt_spinner_end(&spin); + igt_spinner_fini(&spin); + +err_pm: + intel_runtime_pm_put(gt->uncore->rpm, wakeref); + return ret; +} + int intel_guc_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(intel_guc_scrub_ctbs), SUBTEST(intel_guc_steal_guc_ids), + SUBTEST(intel_guc_fast_request), }; struct intel_gt *gt = to_gt(i915); diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c index 34b5d952e2..26fdc392fc 100644 --- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c @@ -74,7 +74,7 @@ static int intel_hang_guc(void *arg) goto err; } - rq = igt_spinner_create_request(&spin, ce, MI_NOOP); + rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); intel_context_put(ce); if (IS_ERR(rq)) { ret = PTR_ERR(rq); diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 05f9348b7a..d4a3f3e093 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -3047,7 +3047,7 @@ put_obj: static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) { - u32 per_ctx_start[CACHELINE_DWORDS] = {0}; + u32 per_ctx_start[CACHELINE_DWORDS] = {}; unsigned char *bb_start_sva; if (!wa_ctx->per_ctx.valid) diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 835c3fde8a..313efdabee 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -56,7 +56,7 @@ static const struct pixel_format bdw_pixel_formats[] = { {DRM_FORMAT_XBGR8888, 32, "32-bit RGBX (8:8:8:8 MSB-X:B:G:R)"}, /* non-supported format has bpp default to 0 */ - {0, 0, NULL}, + {} }; static const struct pixel_format skl_pixel_formats[] = { @@ -76,7 +76,7 @@ static const struct pixel_format skl_pixel_formats[] = { {DRM_FORMAT_XRGB2101010, 32, "32-bit BGRX (2:10:10:10 MSB-X:R:G:B)"}, /* non-supported format has bpp default to 0 */ - {0, 0, NULL}, + {} }; static int bdw_format_to_drm(int format) @@ -293,7 +293,7 @@ static const struct cursor_mode_format cursor_pixel_formats[] = { {DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"}, /* non-supported format has bpp default to 0 */ - {0, 0, 0, 0, NULL}, + {} }; static int cursor_mode_to_drm(int mode) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index d30f8814d9..efcb00472b 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -538,7 +538,7 @@ static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port) int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc; enum dpio_phy phy = DPIO_PHY0; enum dpio_channel ch = DPIO_CH0; - struct dpll clock = {0}; + struct dpll clock = {}; u32 temp; /* Port to PHY mapping is fixed, see bxt_ddi_phy_info{} */ @@ -2576,7 +2576,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) static int init_skl_mmio_info(struct intel_gvt *gvt) { - struct drm_i915_private *dev_priv = gvt->gt->i915; int ret; MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index ddf49c2dbb..2905df83e1 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -1211,11 +1211,11 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, for (n = offset >> PAGE_SHIFT; remain; n++) { int len = min(remain, PAGE_SIZE - x); - src = kmap_atomic(i915_gem_object_get_page(src_obj, n)); + src = kmap_local_page(i915_gem_object_get_page(src_obj, n)); if (src_needs_clflush) drm_clflush_virt_range(src + x, len); memcpy(ptr, src + x, len); - kunmap_atomic(src); + kunmap_local(src); ptr += len; remain -= len; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e9b79c2c37..db99c2ef66 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -32,6 +32,8 @@ #include +#include "display/intel_display_params.h" + #include "gem/i915_gem_context.h" #include "gt/intel_gt.h" #include "gt/intel_gt_buffer_pool.h" @@ -49,6 +51,7 @@ #include "i915_debugfs.h" #include "i915_debugfs_params.h" #include "i915_driver.h" +#include "i915_gpu_error.h" #include "i915_irq.h" #include "i915_reg.h" #include "i915_scheduler.h" @@ -67,13 +70,13 @@ static int i915_capabilities(struct seq_file *m, void *data) seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915)); intel_device_info_print(INTEL_INFO(i915), RUNTIME_INFO(i915), &p); - intel_display_device_info_print(DISPLAY_INFO(i915), DISPLAY_RUNTIME_INFO(i915), &p); i915_print_iommu_status(i915, &p); intel_gt_info_print(&to_gt(i915)->info, &p); intel_driver_caps_print(&i915->caps, &p); kernel_param_lock(THIS_MODULE); i915_params_dump(&i915->params, &p); + intel_display_params_dump(i915, &p); kernel_param_unlock(THIS_MODULE); return 0; @@ -297,107 +300,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data) return 0; } -#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) -static ssize_t gpu_state_read(struct file *file, char __user *ubuf, - size_t count, loff_t *pos) -{ - struct i915_gpu_coredump *error; - ssize_t ret; - void *buf; - - error = file->private_data; - if (!error) - return 0; - - /* Bounce buffer required because of kernfs __user API convenience. */ - buf = kmalloc(count, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count); - if (ret <= 0) - goto out; - - if (!copy_to_user(ubuf, buf, ret)) - *pos += ret; - else - ret = -EFAULT; - -out: - kfree(buf); - return ret; -} - -static int gpu_state_release(struct inode *inode, struct file *file) -{ - i915_gpu_coredump_put(file->private_data); - return 0; -} - -static int i915_gpu_info_open(struct inode *inode, struct file *file) -{ - struct drm_i915_private *i915 = inode->i_private; - struct i915_gpu_coredump *gpu; - intel_wakeref_t wakeref; - - gpu = NULL; - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES, CORE_DUMP_FLAG_NONE); - - if (IS_ERR(gpu)) - return PTR_ERR(gpu); - - file->private_data = gpu; - return 0; -} - -static const struct file_operations i915_gpu_info_fops = { - .owner = THIS_MODULE, - .open = i915_gpu_info_open, - .read = gpu_state_read, - .llseek = default_llseek, - .release = gpu_state_release, -}; - -static ssize_t -i915_error_state_write(struct file *filp, - const char __user *ubuf, - size_t cnt, - loff_t *ppos) -{ - struct i915_gpu_coredump *error = filp->private_data; - - if (!error) - return 0; - - drm_dbg(&error->i915->drm, "Resetting error state\n"); - i915_reset_error_state(error->i915); - - return cnt; -} - -static int i915_error_state_open(struct inode *inode, struct file *file) -{ - struct i915_gpu_coredump *error; - - error = i915_first_error_state(inode->i_private); - if (IS_ERR(error)) - return PTR_ERR(error); - - file->private_data = error; - return 0; -} - -static const struct file_operations i915_error_state_fops = { - .owner = THIS_MODULE, - .open = i915_error_state_open, - .read = gpu_state_read, - .write = i915_error_state_write, - .llseek = default_llseek, - .release = gpu_state_release, -}; -#endif - static int i915_frequency_info(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = node_to_i915(m->private); @@ -837,10 +739,6 @@ static const struct i915_debugfs_files { {"i915_perf_noa_delay", &i915_perf_noa_delay_fops}, {"i915_wedged", &i915_wedged_fops}, {"i915_gem_drop_caches", &i915_drop_caches_fops}, -#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) - {"i915_error_state", &i915_error_state_fops}, - {"i915_gpu_info", &i915_gpu_info_fops}, -#endif }; void i915_debugfs_register(struct drm_i915_private *dev_priv) @@ -863,4 +761,6 @@ void i915_debugfs_register(struct drm_i915_private *dev_priv) drm_debugfs_create_files(i915_debugfs_list, ARRAY_SIZE(i915_debugfs_list), minor->debugfs_root, minor); + + i915_gpu_error_debugfs_register(dev_priv); } diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 802de2c6de..9967148aed 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -231,16 +231,10 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->gpu_error.lock); - mutex_init(&dev_priv->display.backlight.lock); mutex_init(&dev_priv->sb_lock); cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE); - mutex_init(&dev_priv->display.audio.mutex); - mutex_init(&dev_priv->display.wm.wm_mutex); - mutex_init(&dev_priv->display.pps.mutex); - mutex_init(&dev_priv->display.hdcp.hdcp_mutex); - i915_memcpy_init_early(dev_priv); intel_runtime_pm_init_early(&dev_priv->runtime_pm); @@ -804,7 +798,9 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_cleanup_modeset2; - intel_pxp_init(i915); + ret = intel_pxp_init(i915); + if (ret && ret != -ENODEV) + drm_dbg(&i915->drm, "pxp init failed with %d\n", ret); ret = intel_display_driver_probe(i915); if (ret) @@ -907,6 +903,8 @@ static void i915_driver_release(struct drm_device *dev) intel_runtime_pm_driver_release(rpm); i915_driver_late_release(dev_priv); + + intel_display_device_remove(dev_priv); } static int i915_driver_open(struct drm_device *dev, struct drm_file *file) @@ -1037,7 +1035,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915) intel_power_domains_driver_remove(i915); enable_rpm_wakeref_asserts(&i915->runtime_pm); - intel_runtime_pm_driver_release(&i915->runtime_pm); + intel_runtime_pm_driver_last_release(&i915->runtime_pm); } static bool suspend_to_idle(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c index 2a44b3876c..fa6852713b 100644 --- a/drivers/gpu/drm/i915/i915_drm_client.c +++ b/drivers/gpu/drm/i915/i915_drm_client.c @@ -28,6 +28,10 @@ struct i915_drm_client *i915_drm_client_alloc(void) kref_init(&client->kref); spin_lock_init(&client->ctx_lock); INIT_LIST_HEAD(&client->ctx_list); +#ifdef CONFIG_PROC_FS + spin_lock_init(&client->objects_lock); + INIT_LIST_HEAD(&client->objects_list); +#endif return client; } @@ -41,6 +45,68 @@ void __i915_drm_client_free(struct kref *kref) } #ifdef CONFIG_PROC_FS +static void +obj_meminfo(struct drm_i915_gem_object *obj, + struct drm_memory_stats stats[INTEL_REGION_UNKNOWN]) +{ + const enum intel_region_id id = obj->mm.region ? + obj->mm.region->id : INTEL_REGION_SMEM; + const u64 sz = obj->base.size; + + if (obj->base.handle_count > 1) + stats[id].shared += sz; + else + stats[id].private += sz; + + if (i915_gem_object_has_pages(obj)) { + stats[id].resident += sz; + + if (!dma_resv_test_signaled(obj->base.resv, + DMA_RESV_USAGE_BOOKKEEP)) + stats[id].active += sz; + else if (i915_gem_object_is_shrinkable(obj) && + obj->mm.madv == I915_MADV_DONTNEED) + stats[id].purgeable += sz; + } +} + +static void show_meminfo(struct drm_printer *p, struct drm_file *file) +{ + struct drm_memory_stats stats[INTEL_REGION_UNKNOWN] = {}; + struct drm_i915_file_private *fpriv = file->driver_priv; + struct i915_drm_client *client = fpriv->client; + struct drm_i915_private *i915 = fpriv->i915; + struct drm_i915_gem_object *obj; + struct intel_memory_region *mr; + struct list_head __rcu *pos; + unsigned int id; + + /* Public objects. */ + spin_lock(&file->table_lock); + idr_for_each_entry(&file->object_idr, obj, id) + obj_meminfo(obj, stats); + spin_unlock(&file->table_lock); + + /* Internal objects. */ + rcu_read_lock(); + list_for_each_rcu(pos, &client->objects_list) { + obj = i915_gem_object_get_rcu(list_entry(pos, typeof(*obj), + client_link)); + if (!obj) + continue; + obj_meminfo(obj, stats); + i915_gem_object_put(obj); + } + rcu_read_unlock(); + + for_each_memory_region(mr, i915, id) + drm_print_memory_stats(p, + &stats[id], + DRM_GEM_OBJECT_RESIDENT | + DRM_GEM_OBJECT_PURGEABLE, + mr->uabi_name); +} + static const char * const uabi_class_names[] = { [I915_ENGINE_CLASS_RENDER] = "render", [I915_ENGINE_CLASS_COPY] = "copy", @@ -102,10 +168,52 @@ void i915_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file) * ****************************************************************** */ + show_meminfo(p, file); + if (GRAPHICS_VER(i915) < 8) return; for (i = 0; i < ARRAY_SIZE(uabi_class_names); i++) show_client_class(p, i915, file_priv->client, i); } + +void i915_drm_client_add_object(struct i915_drm_client *client, + struct drm_i915_gem_object *obj) +{ + unsigned long flags; + + GEM_WARN_ON(obj->client); + GEM_WARN_ON(!list_empty(&obj->client_link)); + + spin_lock_irqsave(&client->objects_lock, flags); + obj->client = i915_drm_client_get(client); + list_add_tail_rcu(&obj->client_link, &client->objects_list); + spin_unlock_irqrestore(&client->objects_lock, flags); +} + +void i915_drm_client_remove_object(struct drm_i915_gem_object *obj) +{ + struct i915_drm_client *client = fetch_and_zero(&obj->client); + unsigned long flags; + + /* Object may not be associated with a client. */ + if (!client) + return; + + spin_lock_irqsave(&client->objects_lock, flags); + list_del_rcu(&obj->client_link); + spin_unlock_irqrestore(&client->objects_lock, flags); + + i915_drm_client_put(client); +} + +void i915_drm_client_add_context_objects(struct i915_drm_client *client, + struct intel_context *ce) +{ + if (ce->state) + i915_drm_client_add_object(client, ce->state->obj); + + if (ce->ring != ce->engine->legacy.ring && ce->ring->vma) + i915_drm_client_add_object(client, ce->ring->vma->obj); +} #endif diff --git a/drivers/gpu/drm/i915/i915_drm_client.h b/drivers/gpu/drm/i915/i915_drm_client.h index 67816c912b..a439dd7899 100644 --- a/drivers/gpu/drm/i915/i915_drm_client.h +++ b/drivers/gpu/drm/i915/i915_drm_client.h @@ -12,6 +12,10 @@ #include +#include "i915_file_private.h" +#include "gem/i915_gem_object_types.h" +#include "gt/intel_context_types.h" + #define I915_LAST_UABI_ENGINE_CLASS I915_ENGINE_CLASS_COMPUTE struct drm_file; @@ -25,6 +29,20 @@ struct i915_drm_client { spinlock_t ctx_lock; /* For add/remove from ctx_list. */ struct list_head ctx_list; /* List of contexts belonging to client. */ +#ifdef CONFIG_PROC_FS + /** + * @objects_lock: lock protecting @objects_list + */ + spinlock_t objects_lock; + + /** + * @objects_list: list of objects created by this client + * + * Protected by @objects_lock. + */ + struct list_head objects_list; +#endif + /** * @past_runtime: Accumulation of pphwsp runtimes from closed contexts. */ @@ -49,4 +67,28 @@ struct i915_drm_client *i915_drm_client_alloc(void); void i915_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file); +#ifdef CONFIG_PROC_FS +void i915_drm_client_add_object(struct i915_drm_client *client, + struct drm_i915_gem_object *obj); +void i915_drm_client_remove_object(struct drm_i915_gem_object *obj); +void i915_drm_client_add_context_objects(struct i915_drm_client *client, + struct intel_context *ce); +#else +static inline void i915_drm_client_add_object(struct i915_drm_client *client, + struct drm_i915_gem_object *obj) +{ +} + +static inline void +i915_drm_client_remove_object(struct drm_i915_gem_object *obj) +{ +} + +static inline void +i915_drm_client_add_context_objects(struct i915_drm_client *client, + struct intel_context *ce) +{ +} +#endif + #endif /* !__I915_DRM_CLIENT_H__ */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index dd452c220d..861567362a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -396,20 +396,6 @@ static inline struct intel_gt *to_gt(const struct drm_i915_private *i915) return i915->gt[0]; } -/* Simple iterator over all initialised engines */ -#define for_each_engine(engine__, gt__, id__) \ - for ((id__) = 0; \ - (id__) < I915_NUM_ENGINES; \ - (id__)++) \ - for_each_if ((engine__) = (gt__)->engine[(id__)]) - -/* Iterator over subset of engines selected by mask */ -#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \ - for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \ - (tmp__) ? \ - ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \ - 0;) - #define rb_to_uabi_engine(rb) \ rb_entry_safe(rb, struct intel_engine_cs, uabi_node) @@ -418,11 +404,6 @@ static inline struct intel_gt *to_gt(const struct drm_i915_private *i915) (engine__); \ (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node))) -#define for_each_uabi_class_engine(engine__, class__, i915__) \ - for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \ - (engine__) && (engine__)->uabi_class == (class__); \ - (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node))) - #define INTEL_INFO(i915) ((i915)->__info) #define RUNTIME_INFO(i915) (&(i915)->__runtime) #define DRIVER_CAPS(i915) (&(i915)->caps) @@ -575,6 +556,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_DG2(i915) IS_PLATFORM(i915, INTEL_DG2) #define IS_PONTEVECCHIO(i915) IS_PLATFORM(i915, INTEL_PONTEVECCHIO) #define IS_METEORLAKE(i915) IS_PLATFORM(i915, INTEL_METEORLAKE) +#define IS_LUNARLAKE(i915) 0 #define IS_DG2_G10(i915) \ IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G10) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c166ad5e18..92758b6b41 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1306,8 +1306,6 @@ void i915_gem_init_early(struct drm_i915_private *dev_priv) { i915_gem_init__mm(dev_priv); i915_gem_init__contexts(dev_priv); - - spin_lock_init(&dev_priv->display.fb_tracking.lock); } void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index b4e31e59c7..d04660b600 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -57,6 +57,7 @@ #include "i915_memcpy.h" #include "i915_reg.h" #include "i915_scatterlist.h" +#include "i915_sysfs.h" #include "i915_utils.h" #define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) @@ -520,7 +521,7 @@ __find_vma(struct i915_vma_coredump *vma, const char *name) return NULL; } -struct i915_vma_coredump * +static struct i915_vma_coredump * intel_gpu_error_find_batch(const struct intel_engine_coredump *ee) { return __find_vma(ee->vma, "batch"); @@ -609,9 +610,9 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) va_end(args); } -void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m, - const struct intel_engine_cs *engine, - const struct i915_vma_coredump *vma) +static void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m, + const struct intel_engine_cs *engine, + const struct i915_vma_coredump *vma) { char out[ASCII85_BUFSZ]; struct page *page; @@ -660,6 +661,7 @@ static void err_print_params(struct drm_i915_error_state_buf *m, struct drm_printer p = i915_error_printer(m); i915_params_dump(params, &p); + intel_display_params_dump(m->i915, &p); } static void err_print_pciid(struct drm_i915_error_state_buf *m, @@ -1027,6 +1029,7 @@ static void i915_vma_coredump_free(struct i915_vma_coredump *vma) static void cleanup_params(struct i915_gpu_coredump *error) { i915_params_free(&error->params); + intel_display_params_free(&error->display_params); } static void cleanup_uc(struct intel_uc_coredump *uc) @@ -1988,6 +1991,7 @@ static void capture_gen(struct i915_gpu_coredump *error) error->suspend_count = i915->suspend_count; i915_params_copy(&error->params, &i915->params); + intel_display_params_copy(&error->display_params); memcpy(&error->device_info, INTEL_INFO(i915), sizeof(error->device_info)); @@ -2137,7 +2141,7 @@ __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 du return error; } -struct i915_gpu_coredump * +static struct i915_gpu_coredump * i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags) { static DEFINE_MUTEX(capture_mutex); @@ -2174,7 +2178,7 @@ void i915_error_state_store(struct i915_gpu_coredump *error) ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) { pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n"); pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n"); - pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n"); + pr_info("Please see https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html for details.\n"); pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n"); pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n", @@ -2208,7 +2212,7 @@ void i915_capture_error_state(struct intel_gt *gt, i915_gpu_coredump_put(error); } -struct i915_gpu_coredump * +static struct i915_gpu_coredump * i915_first_error_state(struct drm_i915_private *i915) { struct i915_gpu_coredump *error; @@ -2375,3 +2379,184 @@ void intel_klog_error_capture(struct intel_gt *gt, drm_info(&i915->drm, "[Capture/%d.%d] Dumped %zd bytes\n", l_count, line++, pos_err); } #endif + +static ssize_t gpu_state_read(struct file *file, char __user *ubuf, + size_t count, loff_t *pos) +{ + struct i915_gpu_coredump *error; + ssize_t ret; + void *buf; + + error = file->private_data; + if (!error) + return 0; + + /* Bounce buffer required because of kernfs __user API convenience. */ + buf = kmalloc(count, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count); + if (ret <= 0) + goto out; + + if (!copy_to_user(ubuf, buf, ret)) + *pos += ret; + else + ret = -EFAULT; + +out: + kfree(buf); + return ret; +} + +static int gpu_state_release(struct inode *inode, struct file *file) +{ + i915_gpu_coredump_put(file->private_data); + return 0; +} + +static int i915_gpu_info_open(struct inode *inode, struct file *file) +{ + struct drm_i915_private *i915 = inode->i_private; + struct i915_gpu_coredump *gpu; + intel_wakeref_t wakeref; + + gpu = NULL; + with_intel_runtime_pm(&i915->runtime_pm, wakeref) + gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES, CORE_DUMP_FLAG_NONE); + + if (IS_ERR(gpu)) + return PTR_ERR(gpu); + + file->private_data = gpu; + return 0; +} + +static const struct file_operations i915_gpu_info_fops = { + .owner = THIS_MODULE, + .open = i915_gpu_info_open, + .read = gpu_state_read, + .llseek = default_llseek, + .release = gpu_state_release, +}; + +static ssize_t +i915_error_state_write(struct file *filp, + const char __user *ubuf, + size_t cnt, + loff_t *ppos) +{ + struct i915_gpu_coredump *error = filp->private_data; + + if (!error) + return 0; + + drm_dbg(&error->i915->drm, "Resetting error state\n"); + i915_reset_error_state(error->i915); + + return cnt; +} + +static int i915_error_state_open(struct inode *inode, struct file *file) +{ + struct i915_gpu_coredump *error; + + error = i915_first_error_state(inode->i_private); + if (IS_ERR(error)) + return PTR_ERR(error); + + file->private_data = error; + return 0; +} + +static const struct file_operations i915_error_state_fops = { + .owner = THIS_MODULE, + .open = i915_error_state_open, + .read = gpu_state_read, + .write = i915_error_state_write, + .llseek = default_llseek, + .release = gpu_state_release, +}; + +void i915_gpu_error_debugfs_register(struct drm_i915_private *i915) +{ + struct drm_minor *minor = i915->drm.primary; + + debugfs_create_file("i915_error_state", 0644, minor->debugfs_root, i915, + &i915_error_state_fops); + debugfs_create_file("i915_gpu_info", 0644, minor->debugfs_root, i915, + &i915_gpu_info_fops); +} + +static ssize_t error_state_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + + struct device *kdev = kobj_to_dev(kobj); + struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); + struct i915_gpu_coredump *gpu; + ssize_t ret = 0; + + /* + * FIXME: Concurrent clients triggering resets and reading + clearing + * dumps can cause inconsistent sysfs reads when a user calls in with a + * non-zero offset to complete a prior partial read but the + * gpu_coredump has been cleared or replaced. + */ + + gpu = i915_first_error_state(i915); + if (IS_ERR(gpu)) { + ret = PTR_ERR(gpu); + } else if (gpu) { + ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count); + i915_gpu_coredump_put(gpu); + } else { + const char *str = "No error state collected\n"; + size_t len = strlen(str); + + if (off < len) { + ret = min_t(size_t, count, len - off); + memcpy(buf, str + off, ret); + } + } + + return ret; +} + +static ssize_t error_state_write(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + struct device *kdev = kobj_to_dev(kobj); + struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); + + drm_dbg(&dev_priv->drm, "Resetting error state\n"); + i915_reset_error_state(dev_priv); + + return count; +} + +static const struct bin_attribute error_state_attr = { + .attr.name = "error", + .attr.mode = S_IRUSR | S_IWUSR, + .size = 0, + .read = error_state_read, + .write = error_state_write, +}; + +void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915) +{ + struct device *kdev = i915->drm.primary->kdev; + + if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr)) + drm_err(&i915->drm, "error_state sysfs setup failed\n"); +} + +void i915_gpu_error_sysfs_teardown(struct drm_i915_private *i915) +{ + struct device *kdev = i915->drm.primary->kdev; + + sysfs_remove_bin_file(&kdev->kobj, &error_state_attr); +} diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 48f6c00402..7c255bb1c3 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -15,6 +15,7 @@ #include #include "display/intel_display_device.h" +#include "display/intel_display_params.h" #include "gt/intel_engine.h" #include "gt/intel_engine_types.h" #include "gt/intel_gt_types.h" @@ -215,6 +216,7 @@ struct i915_gpu_coredump { struct intel_display_runtime_info display_runtime_info; struct intel_driver_caps driver_caps; struct i915_params params; + struct intel_display_params display_params; struct intel_overlay_error_state *overlay; @@ -283,14 +285,7 @@ static inline void intel_klog_error_capture(struct intel_gt *gt, __printf(2, 3) void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); -void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m, - const struct intel_engine_cs *engine, - const struct i915_vma_coredump *vma); -struct i915_vma_coredump * -intel_gpu_error_find_batch(const struct intel_engine_coredump *ee); - -struct i915_gpu_coredump *i915_gpu_coredump(struct intel_gt *gt, - intel_engine_mask_t engine_mask, u32 dump_flags); + void i915_capture_error_state(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags); @@ -338,10 +333,13 @@ static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu) kref_put(&gpu->ref, __i915_gpu_coredump_free); } -struct i915_gpu_coredump *i915_first_error_state(struct drm_i915_private *i915); void i915_reset_error_state(struct drm_i915_private *i915); void i915_disable_error_state(struct drm_i915_private *i915, int err); +void i915_gpu_error_debugfs_register(struct drm_i915_private *i915); +void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915); +void i915_gpu_error_sysfs_teardown(struct drm_i915_private *i915); + #else __printf(2, 3) @@ -409,12 +407,6 @@ static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu) { } -static inline struct i915_gpu_coredump * -i915_first_error_state(struct drm_i915_private *i915) -{ - return ERR_PTR(-ENODEV); -} - static inline void i915_reset_error_state(struct drm_i915_private *i915) { } @@ -424,6 +416,18 @@ static inline void i915_disable_error_state(struct drm_i915_private *i915, { } +static inline void i915_gpu_error_debugfs_register(struct drm_i915_private *i915) +{ +} + +static inline void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915) +{ +} + +static inline void i915_gpu_error_sysfs_teardown(struct drm_i915_private *i915) +{ +} + #endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */ #endif /* _I915_GPU_ERROR_H_ */ diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c index 1b021a4902..ba82277254 100644 --- a/drivers/gpu/drm/i915/i915_memcpy.c +++ b/drivers/gpu/drm/i915/i915_memcpy.c @@ -23,6 +23,8 @@ */ #include +#include +#include #include #include "i915_memcpy.h" diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 036c4c3ed6..de43048543 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -67,33 +67,9 @@ i915_param_named(modeset, int, 0400, "Use kernel modesetting [KMS] (0=disable, " "1=on, -1=force vga console preference [default])"); -i915_param_named_unsafe(enable_dc, int, 0400, - "Enable power-saving display C-states. " - "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6; " - "3=up to DC5 with DC3CO; 4=up to DC6 with DC3CO)"); - -i915_param_named_unsafe(enable_fbc, int, 0400, - "Enable frame buffer compression for power savings " - "(default: -1 (use per-chip default))"); - -i915_param_named_unsafe(lvds_channel_mode, int, 0400, - "Specify LVDS channel mode " - "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)"); - -i915_param_named_unsafe(panel_use_ssc, int, 0400, - "Use Spread Spectrum Clock with panels [LVDS/eDP] " - "(default: auto from VBT)"); - -i915_param_named_unsafe(vbt_sdvo_panel_type, int, 0400, - "Override/Ignore selection of SDVO panel mode in the VBT " - "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); - i915_param_named_unsafe(reset, uint, 0400, "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])"); -i915_param_named_unsafe(vbt_firmware, charp, 0400, - "Load VBT from specified file under /lib/firmware"); - #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) i915_param_named(error_capture, bool, 0400, "Record the GPU state following a hang. " @@ -106,55 +82,10 @@ i915_param_named_unsafe(enable_hangcheck, bool, 0400, "WARNING: Disabling this can cause system wide hangs. " "(default: true)"); -i915_param_named_unsafe(enable_psr, int, 0400, - "Enable PSR " - "(0=disabled, 1=enable up to PSR1, 2=enable up to PSR2) " - "Default: -1 (use per-chip default)"); - -i915_param_named(psr_safest_params, bool, 0400, - "Replace PSR VBT parameters by the safest and not optimal ones. This " - "is helpful to detect if PSR issues are related to bad values set in " - " VBT. (0=use VBT parameters, 1=use safest parameters)"); - -i915_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400, - "Enable PSR2 selective fetch " - "(0=disabled, 1=enabled) " - "Default: 0"); - -i915_param_named_unsafe(enable_sagv, bool, 0600, - "Enable system agent voltage/frequency scaling (SAGV) (default: true)"); - i915_param_named_unsafe(force_probe, charp, 0400, "Force probe options for specified supported devices. " "See CONFIG_DRM_I915_FORCE_PROBE for details."); -i915_param_named_unsafe(disable_power_well, int, 0400, - "Disable display power wells when possible " - "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)"); - -i915_param_named_unsafe(enable_ips, int, 0400, "Enable IPS (default: true)"); - -i915_param_named_unsafe(enable_dpt, bool, 0400, - "Enable display page table (DPT) (default: true)"); - -i915_param_named_unsafe(load_detect_test, bool, 0400, - "Force-enable the VGA load detect code for testing (default:false). " - "For developers only."); - -i915_param_named_unsafe(force_reset_modeset_test, bool, 0400, - "Force a modeset during gpu reset for testing (default:false). " - "For developers only."); - -i915_param_named_unsafe(invert_brightness, int, 0400, - "Invert backlight brightness " - "(-1 force normal, 0 machine defaults, 1 force inversion), please " - "report PCI device ID, subsystem vendor and subsystem device ID " - "to dri-devel@lists.freedesktop.org, if your machine needs it. " - "It will then be included in an upcoming module version."); - -i915_param_named(disable_display, bool, 0400, - "Disable display (default: false)"); - i915_param_named(memtest, bool, 0400, "Perform a read/write test of all device memory on module load (default: off)"); @@ -162,19 +93,6 @@ i915_param_named(mmio_debug, int, 0400, "Enable the MMIO debug code for the first N failures (default: off). " "This may negatively affect performance."); -/* Special case writable file */ -i915_param_named(verbose_state_checks, bool, 0600, - "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions."); - -i915_param_named_unsafe(nuclear_pageflip, bool, 0400, - "Force enable atomic functionality on platforms that don't have full support yet."); - -/* WA to get away with the default setting in VBT for early platforms.Will be removed */ -i915_param_named_unsafe(edp_vswing, int, 0400, - "Ignore/Override vswing pre-emph table selection from VBT " - "(0=use value from vbt [default], 1=low power swing(200mV)," - "2=default swing(400mV))"); - i915_param_named_unsafe(enable_guc, int, 0400, "Enable GuC load for GuC submission and/or HuC load. " "Required functionality can be selected using bitmask values. " @@ -196,18 +114,11 @@ i915_param_named_unsafe(dmc_firmware_path, charp, 0400, i915_param_named_unsafe(gsc_firmware_path, charp, 0400, "GSC firmware path to use instead of the default one"); -i915_param_named_unsafe(enable_dp_mst, bool, 0400, - "Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)"); - #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) i915_param_named_unsafe(inject_probe_failure, uint, 0400, "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); #endif -i915_param_named(enable_dpcd_backlight, int, 0400, - "Enable support for DPCD backlight control" - "(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enable, 2=force VESA interface, 3=force Intel interface)"); - #if IS_ENABLED(CONFIG_DRM_I915_GVT) i915_param_named(enable_gvt, bool, 0400, "Enable support for Intel GVT-g graphics virtualization host support(default:false)"); diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index d5194b039a..1315d7fac8 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -46,21 +46,7 @@ struct drm_printer; * debugfs file */ #define I915_PARAMS_FOR_EACH(param) \ - param(char *, vbt_firmware, NULL, 0400) \ param(int, modeset, -1, 0400) \ - param(int, lvds_channel_mode, 0, 0400) \ - param(int, panel_use_ssc, -1, 0600) \ - param(int, vbt_sdvo_panel_type, -1, 0400) \ - param(int, enable_dc, -1, 0400) \ - param(int, enable_fbc, -1, 0600) \ - param(int, enable_psr, -1, 0600) \ - param(bool, enable_dpt, true, 0400) \ - param(bool, psr_safest_params, false, 0400) \ - param(bool, enable_psr2_sel_fetch, true, 0400) \ - param(bool, enable_sagv, true, 0600) \ - param(int, disable_power_well, -1, 0400) \ - param(int, enable_ips, 1, 0600) \ - param(int, invert_brightness, 0, 0600) \ param(int, enable_guc, -1, 0400) \ param(int, guc_log_level, -1, 0400) \ param(char *, guc_firmware_path, NULL, 0400) \ @@ -69,23 +55,15 @@ struct drm_printer; param(char *, gsc_firmware_path, NULL, 0400) \ param(bool, memtest, false, 0400) \ param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \ - param(int, edp_vswing, 0, 0400) \ param(unsigned int, reset, 3, 0600) \ param(unsigned int, inject_probe_failure, 0, 0) \ - param(int, enable_dpcd_backlight, -1, 0600) \ param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \ param(unsigned int, request_timeout_ms, CONFIG_DRM_I915_REQUEST_TIMEOUT, CONFIG_DRM_I915_REQUEST_TIMEOUT ? 0600 : 0) \ param(unsigned int, lmem_size, 0, 0400) \ param(unsigned int, lmem_bar_size, 0, 0400) \ /* leave bools at the end to not create holes */ \ param(bool, enable_hangcheck, true, 0600) \ - param(bool, load_detect_test, false, 0600) \ - param(bool, force_reset_modeset_test, false, 0600) \ param(bool, error_capture, true, IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) ? 0600 : 0) \ - param(bool, disable_display, false, 0400) \ - param(bool, verbose_state_checks, true, 0) \ - param(bool, nuclear_pageflip, false, 0400) \ - param(bool, enable_dp_mst, true, 0600) \ param(bool, enable_gvt, false, IS_ENABLED(CONFIG_DRM_I915_GVT) ? 0400 : 0) #define MEMBER(T, member, ...) T member; diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 2d695818f0..bd9d812b1a 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -3225,7 +3225,7 @@ u32 i915_perf_oa_timestamp_frequency(struct drm_i915_private *i915) struct intel_gt *gt = to_gt(i915); /* Wa_18013179988 */ - if (IS_DG2(i915) || IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) { + if (IS_DG2(i915) || IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) { intel_wakeref_t wakeref; u32 reg, shift; diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h index 13b1ae9b96..46445248d1 100644 --- a/drivers/gpu/drm/i915/i915_perf_types.h +++ b/drivers/gpu/drm/i915/i915_perf_types.h @@ -291,7 +291,8 @@ struct i915_perf_stream { int size_exponent; /** - * @ptr_lock: Locks reads and writes to all head/tail state + * @oa_buffer.ptr_lock: Locks reads and writes to all + * head/tail state * * Consider: the head and tail pointer state needs to be read * consistently from a hrtimer callback (atomic context) and @@ -313,7 +314,8 @@ struct i915_perf_stream { spinlock_t ptr_lock; /** - * @head: Although we can always read back the head pointer register, + * @oa_buffer.head: Although we can always read back + * the head pointer register, * we prefer to avoid trusting the HW state, just to avoid any * risk that some hardware condition could * somehow bump the * head pointer unpredictably and cause us to forward the wrong @@ -322,7 +324,8 @@ struct i915_perf_stream { u32 head; /** - * @tail: The last verified tail that can be read by userspace. + * @oa_buffer.tail: The last verified tail that can be + * read by userspace. */ u32 tail; } oa_buffer; diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index f861863eb7..21eb0c5b32 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -31,6 +31,16 @@ static cpumask_t i915_pmu_cpumask; static unsigned int i915_pmu_target_cpu = -1; +static struct i915_pmu *event_to_pmu(struct perf_event *event) +{ + return container_of(event->pmu, struct i915_pmu, base); +} + +static struct drm_i915_private *pmu_to_i915(struct i915_pmu *pmu) +{ + return container_of(pmu, struct drm_i915_private, pmu); +} + static u8 engine_config_sample(u64 config) { return config & I915_PMU_SAMPLE_MASK; @@ -141,7 +151,7 @@ static u32 frequency_enabled_mask(void) static bool pmu_needs_timer(struct i915_pmu *pmu) { - struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); + struct drm_i915_private *i915 = pmu_to_i915(pmu); u32 enable; /* @@ -213,19 +223,19 @@ static u64 get_rc6(struct intel_gt *gt) struct drm_i915_private *i915 = gt->i915; const unsigned int gt_id = gt->info.id; struct i915_pmu *pmu = &i915->pmu; + intel_wakeref_t wakeref; unsigned long flags; - bool awake = false; u64 val; - if (intel_gt_pm_get_if_awake(gt)) { + wakeref = intel_gt_pm_get_if_awake(gt); + if (wakeref) { val = __get_rc6(gt); - intel_gt_pm_put_async(gt); - awake = true; + intel_gt_pm_put_async(gt, wakeref); } spin_lock_irqsave(&pmu->lock, flags); - if (awake) { + if (wakeref) { store_sample(pmu, gt_id, __I915_SAMPLE_RC6, val); } else { /* @@ -251,7 +261,7 @@ static u64 get_rc6(struct intel_gt *gt) static void init_rc6(struct i915_pmu *pmu) { - struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); + struct drm_i915_private *i915 = pmu_to_i915(pmu); struct intel_gt *gt; unsigned int i; @@ -429,12 +439,14 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns) const unsigned int gt_id = gt->info.id; struct i915_pmu *pmu = &i915->pmu; struct intel_rps *rps = >->rps; + intel_wakeref_t wakeref; if (!frequency_sampling_enabled(pmu, gt_id)) return; /* Report 0/0 (actual/requested) frequency while parked. */ - if (!intel_gt_pm_get_if_awake(gt)) + wakeref = intel_gt_pm_get_if_awake(gt); + if (!wakeref) return; if (pmu->enable & config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt_id))) { @@ -463,14 +475,13 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns) period_ns / 1000); } - intel_gt_pm_put_async(gt); + intel_gt_pm_put_async(gt, wakeref); } static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) { - struct drm_i915_private *i915 = - container_of(hrtimer, struct drm_i915_private, pmu.timer); - struct i915_pmu *pmu = &i915->pmu; + struct i915_pmu *pmu = container_of(hrtimer, struct i915_pmu, timer); + struct drm_i915_private *i915 = pmu_to_i915(pmu); unsigned int period_ns; struct intel_gt *gt; unsigned int i; @@ -505,8 +516,8 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) static void i915_pmu_event_destroy(struct perf_event *event) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); + struct i915_pmu *pmu = event_to_pmu(event); + struct drm_i915_private *i915 = pmu_to_i915(pmu); drm_WARN_ON(&i915->drm, event->parent); @@ -572,8 +583,8 @@ config_status(struct drm_i915_private *i915, u64 config) static int engine_event_init(struct perf_event *event) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); + struct i915_pmu *pmu = event_to_pmu(event); + struct drm_i915_private *i915 = pmu_to_i915(pmu); struct intel_engine_cs *engine; engine = intel_engine_lookup_user(i915, engine_event_class(event), @@ -586,9 +597,8 @@ static int engine_event_init(struct perf_event *event) static int i915_pmu_event_init(struct perf_event *event) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); - struct i915_pmu *pmu = &i915->pmu; + struct i915_pmu *pmu = event_to_pmu(event); + struct drm_i915_private *i915 = pmu_to_i915(pmu); int ret; if (pmu->closed) @@ -628,9 +638,8 @@ static int i915_pmu_event_init(struct perf_event *event) static u64 __i915_pmu_event_read(struct perf_event *event) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); - struct i915_pmu *pmu = &i915->pmu; + struct i915_pmu *pmu = event_to_pmu(event); + struct drm_i915_private *i915 = pmu_to_i915(pmu); u64 val = 0; if (is_engine_event(event)) { @@ -686,10 +695,8 @@ static u64 __i915_pmu_event_read(struct perf_event *event) static void i915_pmu_event_read(struct perf_event *event) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); + struct i915_pmu *pmu = event_to_pmu(event); struct hw_perf_event *hwc = &event->hw; - struct i915_pmu *pmu = &i915->pmu; u64 prev, new; if (pmu->closed) { @@ -707,10 +714,9 @@ static void i915_pmu_event_read(struct perf_event *event) static void i915_pmu_enable(struct perf_event *event) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); + struct i915_pmu *pmu = event_to_pmu(event); + struct drm_i915_private *i915 = pmu_to_i915(pmu); const unsigned int bit = event_bit(event); - struct i915_pmu *pmu = &i915->pmu; unsigned long flags; if (bit == -1) @@ -771,10 +777,9 @@ update: static void i915_pmu_disable(struct perf_event *event) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); + struct i915_pmu *pmu = event_to_pmu(event); + struct drm_i915_private *i915 = pmu_to_i915(pmu); const unsigned int bit = event_bit(event); - struct i915_pmu *pmu = &i915->pmu; unsigned long flags; if (bit == -1) @@ -818,9 +823,7 @@ static void i915_pmu_disable(struct perf_event *event) static void i915_pmu_event_start(struct perf_event *event, int flags) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); - struct i915_pmu *pmu = &i915->pmu; + struct i915_pmu *pmu = event_to_pmu(event); if (pmu->closed) return; @@ -848,9 +851,7 @@ out: static int i915_pmu_event_add(struct perf_event *event, int flags) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); - struct i915_pmu *pmu = &i915->pmu; + struct i915_pmu *pmu = event_to_pmu(event); if (pmu->closed) return -ENODEV; @@ -982,7 +983,7 @@ add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name, static struct attribute ** create_event_attributes(struct i915_pmu *pmu) { - struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); + struct drm_i915_private *i915 = pmu_to_i915(pmu); static const struct { unsigned int counter; const char *name; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index b89cf2041f..6f1c4e2a99 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -195,8 +195,6 @@ #define DPIO_SFR_BYPASS (1 << 1) #define DPIO_CMNRST (1 << 0) -#define DPIO_PHY(pipe) ((pipe) >> 1) - /* * Per pipe/PLL DPIO regs */ diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index e88bb4f043..613decd477 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -155,81 +155,6 @@ static const struct bin_attribute dpf_attrs_1 = { .private = (void *)1 }; -#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) - -static ssize_t error_state_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, - loff_t off, size_t count) -{ - - struct device *kdev = kobj_to_dev(kobj); - struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); - struct i915_gpu_coredump *gpu; - ssize_t ret = 0; - - /* - * FIXME: Concurrent clients triggering resets and reading + clearing - * dumps can cause inconsistent sysfs reads when a user calls in with a - * non-zero offset to complete a prior partial read but the - * gpu_coredump has been cleared or replaced. - */ - - gpu = i915_first_error_state(i915); - if (IS_ERR(gpu)) { - ret = PTR_ERR(gpu); - } else if (gpu) { - ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count); - i915_gpu_coredump_put(gpu); - } else { - const char *str = "No error state collected\n"; - size_t len = strlen(str); - - if (off < len) { - ret = min_t(size_t, count, len - off); - memcpy(buf, str + off, ret); - } - } - - return ret; -} - -static ssize_t error_state_write(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, - loff_t off, size_t count) -{ - struct device *kdev = kobj_to_dev(kobj); - struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); - - drm_dbg(&dev_priv->drm, "Resetting error state\n"); - i915_reset_error_state(dev_priv); - - return count; -} - -static const struct bin_attribute error_state_attr = { - .attr.name = "error", - .attr.mode = S_IRUSR | S_IWUSR, - .size = 0, - .read = error_state_read, - .write = error_state_write, -}; - -static void i915_setup_error_capture(struct device *kdev) -{ - if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr)) - drm_err(&kdev_minor_to_i915(kdev)->drm, - "error_state sysfs setup failed\n"); -} - -static void i915_teardown_error_capture(struct device *kdev) -{ - sysfs_remove_bin_file(&kdev->kobj, &error_state_attr); -} -#else -static void i915_setup_error_capture(struct device *kdev) {} -static void i915_teardown_error_capture(struct device *kdev) {} -#endif - void i915_setup_sysfs(struct drm_i915_private *dev_priv) { struct device *kdev = dev_priv->drm.primary->kdev; @@ -255,7 +180,7 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv) drm_warn(&dev_priv->drm, "failed to register GT sysfs directory\n"); - i915_setup_error_capture(kdev); + i915_gpu_error_sysfs_setup(dev_priv); intel_engines_add_sysfs(dev_priv); } @@ -264,7 +189,7 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv) { struct device *kdev = dev_priv->drm.primary->kdev; - i915_teardown_error_capture(kdev); + i915_gpu_error_sysfs_teardown(dev_priv); device_remove_bin_file(kdev, &dpf_attrs_1); device_remove_bin_file(kdev, &dpf_attrs); diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index c61066498b..f98577967b 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -40,7 +40,7 @@ struct drm_i915_private; struct timer_list; -#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs" +#define FDO_BUG_URL "https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html" #define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \ __stringify(x), (long)(x)) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index d09aad34ba..b70715b141 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -34,6 +34,7 @@ #include "gt/intel_engine.h" #include "gt/intel_engine_heartbeat.h" #include "gt/intel_gt.h" +#include "gt/intel_gt_pm.h" #include "gt/intel_gt_requests.h" #include "gt/intel_tlb.h" @@ -103,12 +104,42 @@ static inline struct i915_vma *active_to_vma(struct i915_active *ref) static int __i915_vma_active(struct i915_active *ref) { - return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT; + struct i915_vma *vma = active_to_vma(ref); + + if (!i915_vma_tryget(vma)) + return -ENOENT; + + /* + * Exclude global GTT VMA from holding a GT wakeref + * while active, otherwise GPU never goes idle. + */ + if (!i915_vma_is_ggtt(vma)) { + /* + * Since we and our _retire() counterpart can be + * called asynchronously, storing a wakeref tracking + * handle inside struct i915_vma is not safe, and + * there is no other good place for that. Hence, + * use untracked variants of intel_gt_pm_get/put(). + */ + intel_gt_pm_get_untracked(vma->vm->gt); + } + + return 0; } static void __i915_vma_retire(struct i915_active *ref) { - i915_vma_put(active_to_vma(ref)); + struct i915_vma *vma = active_to_vma(ref); + + if (!i915_vma_is_ggtt(vma)) { + /* + * Since we can be called from atomic contexts, + * use an async variant of intel_gt_pm_put(). + */ + intel_gt_pm_put_async_untracked(vma->vm->gt); + } + + i915_vma_put(vma); } static struct i915_vma * @@ -1404,7 +1435,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, struct i915_vma_work *work = NULL; struct dma_fence *moving = NULL; struct i915_vma_resource *vma_res = NULL; - intel_wakeref_t wakeref = 0; + intel_wakeref_t wakeref; unsigned int bound; int err; @@ -1424,8 +1455,14 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, if (err) return err; - if (flags & PIN_GLOBAL) - wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); + /* + * In case of a global GTT, we must hold a runtime-pm wakeref + * while global PTEs are updated. In other cases, we hold + * the rpm reference while the VMA is active. Since runtime + * resume may require allocations, which are forbidden inside + * vm->mutex, get the first rpm wakeref outside of the mutex. + */ + wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); if (flags & vma->vm->bind_async_flags) { /* lock VM */ @@ -1561,8 +1598,7 @@ err_fence: if (work) dma_fence_work_commit_imm(&work->base); err_rpm: - if (wakeref) - intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); + intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); if (moving) dma_fence_put(moving); diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index e98b6d69a9..9b6d87c8b5 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -41,7 +41,7 @@ * To virtualize GPU resources GVT-g driver depends on hypervisor technology * e.g KVM/VFIO/mdev, Xen, etc. to provide resource access trapping capability * and be virtualized within GVT-g device module. More architectural design - * doc is available on https://01.org/group/2230/documentation-list. + * doc is available on https://github.com/intel/gvt-linux/wiki. */ static LIST_HEAD(intel_gvt_devices); diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index 3d1fdea981..60a03340bb 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -216,6 +216,22 @@ static int intel_memory_region_memtest(struct intel_memory_region *mem, return err; } +static const char *region_type_str(u16 type) +{ + switch (type) { + case INTEL_MEMORY_SYSTEM: + return "system"; + case INTEL_MEMORY_LOCAL: + return "local"; + case INTEL_MEMORY_STOLEN_LOCAL: + return "stolen-local"; + case INTEL_MEMORY_STOLEN_SYSTEM: + return "stolen-system"; + default: + return "unknown"; + } +} + struct intel_memory_region * intel_memory_region_create(struct drm_i915_private *i915, resource_size_t start, @@ -244,6 +260,9 @@ intel_memory_region_create(struct drm_i915_private *i915, mem->type = type; mem->instance = instance; + snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u", + region_type_str(type), instance); + mutex_init(&mem->objects.lock); INIT_LIST_HEAD(&mem->objects.list); diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index 2953ed5c32..9ba36454e5 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -80,6 +80,7 @@ struct intel_memory_region { u16 instance; enum intel_region_id id; char name[16]; + char uabi_name[16]; bool private; /* not for userspace */ struct { diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 8743153fad..860b51b56a 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -50,184 +50,44 @@ * present for a given platform. */ -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) - -#include - -#define STACKDEPTH 8 - -static noinline depot_stack_handle_t __save_depot_stack(void) +static struct drm_i915_private *rpm_to_i915(struct intel_runtime_pm *rpm) { - unsigned long entries[STACKDEPTH]; - unsigned int n; - - n = stack_trace_save(entries, ARRAY_SIZE(entries), 1); - return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN); + return container_of(rpm, struct drm_i915_private, runtime_pm); } +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) + static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { - spin_lock_init(&rpm->debug.lock); - stack_depot_init(); + ref_tracker_dir_init(&rpm->debug, INTEL_REFTRACK_DEAD_COUNT, dev_name(rpm->kdev)); } -static noinline depot_stack_handle_t +static intel_wakeref_t track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { - depot_stack_handle_t stack, *stacks; - unsigned long flags; - - if (rpm->no_wakeref_tracking) + if (!rpm->available || rpm->no_wakeref_tracking) return -1; - stack = __save_depot_stack(); - if (!stack) - return -1; - - spin_lock_irqsave(&rpm->debug.lock, flags); - - if (!rpm->debug.count) - rpm->debug.last_acquire = stack; - - stacks = krealloc(rpm->debug.owners, - (rpm->debug.count + 1) * sizeof(*stacks), - GFP_NOWAIT | __GFP_NOWARN); - if (stacks) { - stacks[rpm->debug.count++] = stack; - rpm->debug.owners = stacks; - } else { - stack = -1; - } - - spin_unlock_irqrestore(&rpm->debug.lock, flags); - - return stack; + return intel_ref_tracker_alloc(&rpm->debug); } static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, - depot_stack_handle_t stack) + intel_wakeref_t wakeref) { - struct drm_i915_private *i915 = container_of(rpm, - struct drm_i915_private, - runtime_pm); - unsigned long flags, n; - bool found = false; - - if (unlikely(stack == -1)) + if (!rpm->available || rpm->no_wakeref_tracking) return; - spin_lock_irqsave(&rpm->debug.lock, flags); - for (n = rpm->debug.count; n--; ) { - if (rpm->debug.owners[n] == stack) { - memmove(rpm->debug.owners + n, - rpm->debug.owners + n + 1, - (--rpm->debug.count - n) * sizeof(stack)); - found = true; - break; - } - } - spin_unlock_irqrestore(&rpm->debug.lock, flags); - - if (drm_WARN(&i915->drm, !found, - "Unmatched wakeref (tracking %lu), count %u\n", - rpm->debug.count, atomic_read(&rpm->wakeref_count))) { - char *buf; - - buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN); - if (!buf) - return; - - stack_depot_snprint(stack, buf, PAGE_SIZE, 2); - DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf); - - stack = READ_ONCE(rpm->debug.last_release); - if (stack) { - stack_depot_snprint(stack, buf, PAGE_SIZE, 2); - DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf); - } - - kfree(buf); - } + intel_ref_tracker_free(&rpm->debug, wakeref); } -static int cmphandle(const void *_a, const void *_b) +static void untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm) { - const depot_stack_handle_t * const a = _a, * const b = _b; - - if (*a < *b) - return -1; - else if (*a > *b) - return 1; - else - return 0; -} - -static void -__print_intel_runtime_pm_wakeref(struct drm_printer *p, - const struct intel_runtime_pm_debug *dbg) -{ - unsigned long i; - char *buf; - - buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN); - if (!buf) - return; - - if (dbg->last_acquire) { - stack_depot_snprint(dbg->last_acquire, buf, PAGE_SIZE, 2); - drm_printf(p, "Wakeref last acquired:\n%s", buf); - } - - if (dbg->last_release) { - stack_depot_snprint(dbg->last_release, buf, PAGE_SIZE, 2); - drm_printf(p, "Wakeref last released:\n%s", buf); - } - - drm_printf(p, "Wakeref count: %lu\n", dbg->count); - - sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL); - - for (i = 0; i < dbg->count; i++) { - depot_stack_handle_t stack = dbg->owners[i]; - unsigned long rep; - - rep = 1; - while (i + 1 < dbg->count && dbg->owners[i + 1] == stack) - rep++, i++; - stack_depot_snprint(stack, buf, PAGE_SIZE, 2); - drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf); - } - - kfree(buf); -} - -static noinline void -__untrack_all_wakerefs(struct intel_runtime_pm_debug *debug, - struct intel_runtime_pm_debug *saved) -{ - *saved = *debug; - - debug->owners = NULL; - debug->count = 0; - debug->last_release = __save_depot_stack(); -} - -static void -dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug) -{ - if (debug->count) { - struct drm_printer p = drm_debug_printer("i915"); - - __print_intel_runtime_pm_wakeref(&p, debug); - } - - kfree(debug->owners); + ref_tracker_dir_exit(&rpm->debug); } static noinline void __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm) { - struct intel_runtime_pm_debug dbg = {}; unsigned long flags; if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count, @@ -235,60 +95,14 @@ __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm) flags)) return; - __untrack_all_wakerefs(&rpm->debug, &dbg); + ref_tracker_dir_print_locked(&rpm->debug, INTEL_REFTRACK_PRINT_LIMIT); spin_unlock_irqrestore(&rpm->debug.lock, flags); - - dump_and_free_wakeref_tracking(&dbg); -} - -static noinline void -untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm) -{ - struct intel_runtime_pm_debug dbg = {}; - unsigned long flags; - - spin_lock_irqsave(&rpm->debug.lock, flags); - __untrack_all_wakerefs(&rpm->debug, &dbg); - spin_unlock_irqrestore(&rpm->debug.lock, flags); - - dump_and_free_wakeref_tracking(&dbg); } void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, struct drm_printer *p) { - struct intel_runtime_pm_debug dbg = {}; - - do { - unsigned long alloc = dbg.count; - depot_stack_handle_t *s; - - spin_lock_irq(&rpm->debug.lock); - dbg.count = rpm->debug.count; - if (dbg.count <= alloc) { - memcpy(dbg.owners, - rpm->debug.owners, - dbg.count * sizeof(*s)); - } - dbg.last_acquire = rpm->debug.last_acquire; - dbg.last_release = rpm->debug.last_release; - spin_unlock_irq(&rpm->debug.lock); - if (dbg.count <= alloc) - break; - - s = krealloc(dbg.owners, - dbg.count * sizeof(*s), - GFP_NOWAIT | __GFP_NOWARN); - if (!s) - goto out; - - dbg.owners = s; - } while (1); - - __print_intel_runtime_pm_wakeref(p, &dbg); - -out: - kfree(dbg.owners); + intel_ref_tracker_show(&rpm->debug, p); } #else @@ -297,14 +111,14 @@ static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { } -static depot_stack_handle_t +static intel_wakeref_t track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { return -1; } static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, - intel_wakeref_t wref) + intel_wakeref_t wakeref) { } @@ -349,9 +163,7 @@ intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock) static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm, bool wakelock) { - struct drm_i915_private *i915 = container_of(rpm, - struct drm_i915_private, - runtime_pm); + struct drm_i915_private *i915 = rpm_to_i915(rpm); int ret; ret = pm_runtime_get_sync(rpm->kdev); @@ -556,9 +368,7 @@ void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref) */ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm) { - struct drm_i915_private *i915 = container_of(rpm, - struct drm_i915_private, - runtime_pm); + struct drm_i915_private *i915 = rpm_to_i915(rpm); struct device *kdev = rpm->kdev; /* @@ -611,9 +421,7 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm) void intel_runtime_pm_disable(struct intel_runtime_pm *rpm) { - struct drm_i915_private *i915 = container_of(rpm, - struct drm_i915_private, - runtime_pm); + struct drm_i915_private *i915 = rpm_to_i915(rpm); struct device *kdev = rpm->kdev; /* Transfer rpm ownership back to core */ @@ -628,9 +436,7 @@ void intel_runtime_pm_disable(struct intel_runtime_pm *rpm) void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm) { - struct drm_i915_private *i915 = container_of(rpm, - struct drm_i915_private, - runtime_pm); + struct drm_i915_private *i915 = rpm_to_i915(rpm); int count = atomic_read(&rpm->wakeref_count); intel_wakeref_auto_fini(&rpm->userfault_wakeref); @@ -639,14 +445,17 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm) "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n", intel_rpm_raw_wakeref_count(count), intel_rpm_wakelock_count(count)); +} +void intel_runtime_pm_driver_last_release(struct intel_runtime_pm *rpm) +{ + intel_runtime_pm_driver_release(rpm); untrack_all_intel_runtime_pm_wakerefs(rpm); } void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm) { - struct drm_i915_private *i915 = - container_of(rpm, struct drm_i915_private, runtime_pm); + struct drm_i915_private *i915 = rpm_to_i915(rpm); struct pci_dev *pdev = to_pci_dev(i915->drm.dev); struct device *kdev = &pdev->dev; diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h index f79cda7a25..de3579d399 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.h +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h @@ -11,8 +11,6 @@ #include "intel_wakeref.h" -#include "i915_utils.h" - struct device; struct drm_i915_private; struct drm_printer; @@ -77,15 +75,7 @@ struct intel_runtime_pm { * paired rpm_put) we can remove corresponding pairs of and keep * the array trimmed to active wakerefs. */ - struct intel_runtime_pm_debug { - spinlock_t lock; - - depot_stack_handle_t last_acquire; - depot_stack_handle_t last_release; - - depot_stack_handle_t *owners; - unsigned long count; - } debug; + struct ref_tracker_dir debug; #endif }; @@ -189,6 +179,7 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm); void intel_runtime_pm_enable(struct intel_runtime_pm *rpm); void intel_runtime_pm_disable(struct intel_runtime_pm *rpm); void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm); +void intel_runtime_pm_driver_last_release(struct intel_runtime_pm *rpm); intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm); intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm); diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c index 623a690893..dea2f63184 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.c +++ b/drivers/gpu/drm/i915/intel_wakeref.c @@ -99,7 +99,8 @@ static void __intel_wakeref_put_work(struct work_struct *wrk) void __intel_wakeref_init(struct intel_wakeref *wf, struct drm_i915_private *i915, const struct intel_wakeref_ops *ops, - struct intel_wakeref_lockclass *key) + struct intel_wakeref_lockclass *key, + const char *name) { wf->i915 = i915; wf->ops = ops; @@ -111,6 +112,10 @@ void __intel_wakeref_init(struct intel_wakeref *wf, INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work); lockdep_init_map(&wf->work.work.lockdep_map, "wakeref.work", &key->work, 0); + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF) + ref_tracker_dir_init(&wf->debug, INTEL_REFTRACK_DEAD_COUNT, name); +#endif } int intel_wakeref_wait_for_idle(struct intel_wakeref *wf) @@ -191,3 +196,31 @@ void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf) intel_wakeref_auto(wf, 0); INTEL_WAKEREF_BUG_ON(wf->wakeref); } + +void intel_ref_tracker_show(struct ref_tracker_dir *dir, + struct drm_printer *p) +{ + const size_t buf_size = PAGE_SIZE; + char *buf, *sb, *se; + size_t count; + + buf = kmalloc(buf_size, GFP_NOWAIT); + if (!buf) + return; + + count = ref_tracker_dir_snprint(dir, buf, buf_size); + if (!count) + goto free; + /* printk does not like big buffers, so we split it */ + for (sb = buf; *sb; sb = se + 1) { + se = strchrnul(sb, '\n'); + drm_printf(p, "%.*s", (int)(se - sb + 1), sb); + if (!*se) + break; + } + if (count >= buf_size) + drm_printf(p, "\n...dropped %zd extra bytes of leak report.\n", + count + 1 - buf_size); +free: + kfree(buf); +} diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h index ec881b0973..68aa3be482 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.h +++ b/drivers/gpu/drm/i915/intel_wakeref.h @@ -7,16 +7,25 @@ #ifndef INTEL_WAKEREF_H #define INTEL_WAKEREF_H +#include + #include #include #include #include #include #include +#include +#include #include #include #include +typedef unsigned long intel_wakeref_t; + +#define INTEL_REFTRACK_DEAD_COUNT 16 +#define INTEL_REFTRACK_PRINT_LIMIT 16 + #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) #define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr) #else @@ -26,8 +35,6 @@ struct intel_runtime_pm; struct intel_wakeref; -typedef depot_stack_handle_t intel_wakeref_t; - struct intel_wakeref_ops { int (*get)(struct intel_wakeref *wf); int (*put)(struct intel_wakeref *wf); @@ -43,6 +50,10 @@ struct intel_wakeref { const struct intel_wakeref_ops *ops; struct delayed_work work; + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF) + struct ref_tracker_dir debug; +#endif }; struct intel_wakeref_lockclass { @@ -53,11 +64,12 @@ struct intel_wakeref_lockclass { void __intel_wakeref_init(struct intel_wakeref *wf, struct drm_i915_private *i915, const struct intel_wakeref_ops *ops, - struct intel_wakeref_lockclass *key); -#define intel_wakeref_init(wf, i915, ops) do { \ + struct intel_wakeref_lockclass *key, + const char *name); +#define intel_wakeref_init(wf, i915, ops, name) do { \ static struct intel_wakeref_lockclass __key; \ \ - __intel_wakeref_init((wf), (i915), (ops), &__key); \ + __intel_wakeref_init((wf), (i915), (ops), &__key, name); \ } while (0) int __intel_wakeref_get_first(struct intel_wakeref *wf); @@ -261,6 +273,57 @@ __intel_wakeref_defer_park(struct intel_wakeref *wf) */ int intel_wakeref_wait_for_idle(struct intel_wakeref *wf); +#define INTEL_WAKEREF_DEF ((intel_wakeref_t)(-1)) + +static inline intel_wakeref_t intel_ref_tracker_alloc(struct ref_tracker_dir *dir) +{ + struct ref_tracker *user = NULL; + + ref_tracker_alloc(dir, &user, GFP_NOWAIT); + + return (intel_wakeref_t)user ?: INTEL_WAKEREF_DEF; +} + +static inline void intel_ref_tracker_free(struct ref_tracker_dir *dir, + intel_wakeref_t handle) +{ + struct ref_tracker *user; + + user = (handle == INTEL_WAKEREF_DEF) ? NULL : (void *)handle; + + ref_tracker_free(dir, &user); +} + +void intel_ref_tracker_show(struct ref_tracker_dir *dir, + struct drm_printer *p); + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF) + +static inline intel_wakeref_t intel_wakeref_track(struct intel_wakeref *wf) +{ + return intel_ref_tracker_alloc(&wf->debug); +} + +static inline void intel_wakeref_untrack(struct intel_wakeref *wf, + intel_wakeref_t handle) +{ + intel_ref_tracker_free(&wf->debug, handle); +} + +#else + +static inline intel_wakeref_t intel_wakeref_track(struct intel_wakeref *wf) +{ + return -1; +} + +static inline void intel_wakeref_untrack(struct intel_wakeref *wf, + intel_wakeref_t handle) +{ +} + +#endif + struct intel_wakeref_auto { struct drm_i915_private *i915; struct timer_list timer; diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c index dc327cf40b..75278e78ca 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c @@ -199,6 +199,9 @@ int intel_pxp_init(struct drm_i915_private *i915) struct intel_gt *gt; bool is_full_feature = false; + if (intel_gt_is_wedged(to_gt(i915))) + return -ENOTCONN; + /* * NOTE: Get the ctrl_gt before checking intel_pxp_is_supported since * we still need it if PXP's backend tee transport is needed. @@ -303,6 +306,8 @@ static int __pxp_global_teardown_final(struct intel_pxp *pxp) if (!pxp->arb_is_valid) return 0; + + drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: teardown for suspend/fini"); /* * To ensure synchronous and coherent session teardown completion * in response to suspend or shutdown triggers, don't use a worker. @@ -324,6 +329,8 @@ static int __pxp_global_teardown_restart(struct intel_pxp *pxp) if (pxp->arb_is_valid) return 0; + + drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: teardown for restart"); /* * The arb-session is currently inactive and we are doing a reset and restart * due to a runtime event. Use the worker that was designed for this. @@ -332,8 +339,11 @@ static int __pxp_global_teardown_restart(struct intel_pxp *pxp) timeout = intel_pxp_get_backend_timeout_ms(pxp); - if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout))) + if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout))) { + drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: restart backend timed out (%d ms)", + timeout); return -ETIMEDOUT; + } return 0; } @@ -414,10 +424,12 @@ int intel_pxp_start(struct intel_pxp *pxp) int ret = 0; ret = intel_pxp_get_readiness_status(pxp, PXP_READINESS_TIMEOUT); - if (ret < 0) + if (ret < 0) { + drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: tried but not-avail (%d)", ret); return ret; - else if (ret > 1) + } else if (ret > 1) { return -EIO; /* per UAPI spec, user may retry later */ + } mutex_lock(&pxp->arb_mutex); diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c index 91e9622c07..d81750b9bd 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c @@ -40,11 +40,12 @@ void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir) GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT)) { /* immediately mark PXP as inactive on termination */ intel_pxp_mark_termination_in_progress(pxp); - pxp->session_events |= PXP_TERMINATION_REQUEST | PXP_INVAL_REQUIRED; + pxp->session_events |= PXP_TERMINATION_REQUEST | PXP_INVAL_REQUIRED | + PXP_EVENT_TYPE_IRQ; } if (iir & GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT) - pxp->session_events |= PXP_TERMINATION_COMPLETE; + pxp->session_events |= PXP_TERMINATION_COMPLETE | PXP_EVENT_TYPE_IRQ; if (pxp->session_events) queue_work(system_unbound_wq, &pxp->session_work); diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c index 0a3e66b026..091c86e03d 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c @@ -137,8 +137,10 @@ void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_res static void pxp_terminate_complete(struct intel_pxp *pxp) { /* Re-create the arb session after teardown handle complete */ - if (fetch_and_zero(&pxp->hw_state_invalidated)) + if (fetch_and_zero(&pxp->hw_state_invalidated)) { + drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: creating arb_session after invalidation"); pxp_create_arb_session(pxp); + } complete_all(&pxp->termination); } @@ -157,6 +159,8 @@ static void pxp_session_work(struct work_struct *work) if (!events) return; + drm_dbg(>->i915->drm, "PXP: processing event-flags 0x%08x", events); + if (events & PXP_INVAL_REQUIRED) intel_pxp_invalidate(pxp); diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h index 7e11fa8034..07864b584c 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h @@ -124,6 +124,7 @@ struct intel_pxp { #define PXP_TERMINATION_REQUEST BIT(0) #define PXP_TERMINATION_COMPLETE BIT(1) #define PXP_INVAL_REQUIRED BIT(2) +#define PXP_EVENT_TYPE_IRQ BIT(3) }; #endif /* __INTEL_PXP_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/selftests/i915_syncmap.c b/drivers/gpu/drm/i915/selftests/i915_syncmap.c index 47f4ae18a1..88fa845e9f 100644 --- a/drivers/gpu/drm/i915/selftests/i915_syncmap.c +++ b/drivers/gpu/drm/i915/selftests/i915_syncmap.c @@ -77,7 +77,7 @@ __sync_print(struct i915_syncmap *p, for_each_set_bit(i, (unsigned long *)&p->bitmap, KSYNCMAP) { buf = __sync_print(__sync_child(p)[i], buf, sz, depth + 1, - last << 1 | !!(p->bitmap >> (i + 1)), + last << 1 | ((p->bitmap >> (i + 1)) ? 1 : 0), i); } } diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c index 2990dd4d4a..e14ac0ab13 100644 --- a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c +++ b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c @@ -3,6 +3,8 @@ * Copyright © 2021 Intel Corporation */ +#include + //#include "gt/intel_engine_user.h" #include "gt/intel_gt.h" #include "i915_drv.h" @@ -12,7 +14,7 @@ #define REDUCED_TIMESLICE 5 #define REDUCED_PREEMPT 10 -#define WAIT_FOR_RESET_TIME 10000 +#define WAIT_FOR_RESET_TIME_MS 10000 struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt) { @@ -91,7 +93,7 @@ int intel_selftest_wait_for_rq(struct i915_request *rq) { long ret; - ret = i915_request_wait(rq, 0, WAIT_FOR_RESET_TIME); + ret = i915_request_wait(rq, 0, msecs_to_jiffies(WAIT_FOR_RESET_TIME_MS)); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c index 03ea75cd84..4f98aa8a86 100644 --- a/drivers/gpu/drm/i915/selftests/intel_uncore.c +++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c @@ -24,6 +24,8 @@ #include "../i915_selftest.h" +#include "gt/intel_gt.h" + static int intel_fw_table_check(const struct intel_forcewake_range *ranges, unsigned int num_ranges, bool is_watertight) diff --git a/drivers/gpu/drm/i915/soc/intel_gmch.c b/drivers/gpu/drm/i915/soc/intel_gmch.c index f32e9f7877..40874ebfb6 100644 --- a/drivers/gpu/drm/i915/soc/intel_gmch.c +++ b/drivers/gpu/drm/i915/soc/intel_gmch.c @@ -33,18 +33,22 @@ int intel_gmch_bridge_setup(struct drm_i915_private *i915) i915->gmch.pdev); } +static int mchbar_reg(struct drm_i915_private *i915) +{ + return GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915; +} + /* Allocate space for the MCH regs if needed, return nonzero on error */ static int intel_alloc_mchbar_resource(struct drm_i915_private *i915) { - int reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915; u32 temp_lo, temp_hi = 0; u64 mchbar_addr; int ret; if (GRAPHICS_VER(i915) >= 4) - pci_read_config_dword(i915->gmch.pdev, reg + 4, &temp_hi); - pci_read_config_dword(i915->gmch.pdev, reg, &temp_lo); + pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915) + 4, &temp_hi); + pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915), &temp_lo); mchbar_addr = ((u64)temp_hi << 32) | temp_lo; /* If ACPI doesn't have it, assume we need to allocate it ourselves */ @@ -68,10 +72,10 @@ intel_alloc_mchbar_resource(struct drm_i915_private *i915) } if (GRAPHICS_VER(i915) >= 4) - pci_write_config_dword(i915->gmch.pdev, reg + 4, + pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915) + 4, upper_32_bits(i915->gmch.mch_res.start)); - pci_write_config_dword(i915->gmch.pdev, reg, + pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915), lower_32_bits(i915->gmch.mch_res.start)); return 0; } @@ -79,7 +83,6 @@ intel_alloc_mchbar_resource(struct drm_i915_private *i915) /* Setup MCHBAR if possible, return true if we should disable it again */ void intel_gmch_bar_setup(struct drm_i915_private *i915) { - int mchbar_reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915; u32 temp; bool enabled; @@ -92,7 +95,7 @@ void intel_gmch_bar_setup(struct drm_i915_private *i915) pci_read_config_dword(i915->gmch.pdev, DEVEN, &temp); enabled = !!(temp & DEVEN_MCHBAR_EN); } else { - pci_read_config_dword(i915->gmch.pdev, mchbar_reg, &temp); + pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915), &temp); enabled = temp & 1; } @@ -110,15 +113,13 @@ void intel_gmch_bar_setup(struct drm_i915_private *i915) pci_write_config_dword(i915->gmch.pdev, DEVEN, temp | DEVEN_MCHBAR_EN); } else { - pci_read_config_dword(i915->gmch.pdev, mchbar_reg, &temp); - pci_write_config_dword(i915->gmch.pdev, mchbar_reg, temp | 1); + pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915), &temp); + pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915), temp | 1); } } void intel_gmch_bar_teardown(struct drm_i915_private *i915) { - int mchbar_reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915; - if (i915->gmch.mchbar_need_disable) { if (IS_I915G(i915) || IS_I915GM(i915)) { u32 deven_val; @@ -131,10 +132,10 @@ void intel_gmch_bar_teardown(struct drm_i915_private *i915) } else { u32 mchbar_val; - pci_read_config_dword(i915->gmch.pdev, mchbar_reg, + pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915), &mchbar_val); mchbar_val &= ~1; - pci_write_config_dword(i915->gmch.pdev, mchbar_reg, + pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915), mchbar_val); } } diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_sideband.c index b98dec3ad8..ffa195560d 100644 --- a/drivers/gpu/drm/i915/vlv_sideband.c +++ b/drivers/gpu/drm/i915/vlv_sideband.c @@ -166,23 +166,6 @@ u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr) return val; } -u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg) -{ - u32 val = 0; - - vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port, - SB_CRRDDA_NP, reg, &val); - - return val; -} - -void vlv_iosf_sb_write(struct drm_i915_private *i915, - u8 port, u32 reg, u32 val) -{ - vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port, - SB_CRWRDA_NP, reg, &val); -} - u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg) { u32 val = 0; @@ -227,9 +210,9 @@ static u32 vlv_dpio_phy_iosf_port(struct drm_i915_private *i915, enum dpio_phy p return IOSF_PORT_DPIO; } -u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg) +u32 vlv_dpio_read(struct drm_i915_private *i915, enum dpio_phy phy, int reg) { - u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe)); + u32 port = vlv_dpio_phy_iosf_port(i915, phy); u32 val = 0; vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val); @@ -239,16 +222,16 @@ u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg) * so ideally we should check the register offset instead... */ drm_WARN(&i915->drm, val == 0xffffffff, - "DPIO read pipe %c reg 0x%x == 0x%x\n", - pipe_name(pipe), reg, val); + "DPIO PHY%d read reg 0x%x == 0x%x\n", + phy, reg, val); return val; } void vlv_dpio_write(struct drm_i915_private *i915, - enum pipe pipe, int reg, u32 val) + enum dpio_phy phy, int reg, u32 val) { - u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe)); + u32 port = vlv_dpio_phy_iosf_port(i915, phy); vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val); } diff --git a/drivers/gpu/drm/i915/vlv_sideband.h b/drivers/gpu/drm/i915/vlv_sideband.h index 9ce283d96b..c20cf41b2d 100644 --- a/drivers/gpu/drm/i915/vlv_sideband.h +++ b/drivers/gpu/drm/i915/vlv_sideband.h @@ -11,7 +11,7 @@ #include "vlv_sideband_reg.h" -enum pipe; +enum dpio_phy; struct drm_i915_private; enum { @@ -26,9 +26,6 @@ enum { }; void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports); -u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg); -void vlv_iosf_sb_write(struct drm_i915_private *i915, - u8 port, u32 reg, u32 val); void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports); static inline void vlv_bunit_get(struct drm_i915_private *i915) @@ -75,9 +72,9 @@ static inline void vlv_dpio_get(struct drm_i915_private *i915) vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_DPIO)); } -u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg); +u32 vlv_dpio_read(struct drm_i915_private *i915, enum dpio_phy phy, int reg); void vlv_dpio_write(struct drm_i915_private *i915, - enum pipe pipe, int reg, u32 val); + enum dpio_phy phy, int reg, u32 val); static inline void vlv_dpio_put(struct drm_i915_private *i915) { diff --git a/drivers/gpu/drm/imagination/Kconfig b/drivers/gpu/drm/imagination/Kconfig new file mode 100644 index 0000000000..3bfa2ac212 --- /dev/null +++ b/drivers/gpu/drm/imagination/Kconfig @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0-only OR MIT +# Copyright (c) 2023 Imagination Technologies Ltd. + +config DRM_POWERVR + tristate "Imagination Technologies PowerVR (Series 6 and later) & IMG Graphics" + depends on ARM64 + depends on DRM + depends on PM + select DRM_EXEC + select DRM_GEM_SHMEM_HELPER + select DRM_SCHED + select DRM_GPUVM + select FW_LOADER + help + Choose this option if you have a system that has an Imagination + Technologies PowerVR (Series 6 or later) or IMG GPU. + + If "M" is selected, the module will be called powervr. diff --git a/drivers/gpu/drm/imagination/Makefile b/drivers/gpu/drm/imagination/Makefile new file mode 100644 index 0000000000..ec6db8e9b4 --- /dev/null +++ b/drivers/gpu/drm/imagination/Makefile @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: GPL-2.0-only OR MIT +# Copyright (c) 2023 Imagination Technologies Ltd. + +subdir-ccflags-y := -I$(srctree)/$(src) + +powervr-y := \ + pvr_ccb.o \ + pvr_cccb.o \ + pvr_context.o \ + pvr_device.o \ + pvr_device_info.o \ + pvr_drv.o \ + pvr_free_list.o \ + pvr_fw.o \ + pvr_fw_meta.o \ + pvr_fw_mips.o \ + pvr_fw_startstop.o \ + pvr_fw_trace.o \ + pvr_gem.o \ + pvr_hwrt.o \ + pvr_job.o \ + pvr_mmu.o \ + pvr_params.o \ + pvr_power.o \ + pvr_queue.o \ + pvr_stream.o \ + pvr_stream_defs.o \ + pvr_sync.o \ + pvr_vm.o \ + pvr_vm_mips.o + +powervr-$(CONFIG_DEBUG_FS) += \ + pvr_debugfs.o + +obj-$(CONFIG_DRM_POWERVR) += powervr.o diff --git a/drivers/gpu/drm/imagination/pvr_ccb.c b/drivers/gpu/drm/imagination/pvr_ccb.c new file mode 100644 index 0000000000..4deeac7ed4 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_ccb.c @@ -0,0 +1,645 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_ccb.h" +#include "pvr_device.h" +#include "pvr_drv.h" +#include "pvr_free_list.h" +#include "pvr_fw.h" +#include "pvr_gem.h" +#include "pvr_power.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#define RESERVE_SLOT_TIMEOUT (1 * HZ) /* 1s */ +#define RESERVE_SLOT_MIN_RETRIES 10 + +static void +ccb_ctrl_init(void *cpu_ptr, void *priv) +{ + struct rogue_fwif_ccb_ctl *ctrl = cpu_ptr; + struct pvr_ccb *pvr_ccb = priv; + + ctrl->write_offset = 0; + ctrl->read_offset = 0; + ctrl->wrap_mask = pvr_ccb->num_cmds - 1; + ctrl->cmd_size = pvr_ccb->cmd_size; +} + +/** + * pvr_ccb_init() - Initialise a CCB + * @pvr_dev: Device pointer. + * @pvr_ccb: Pointer to CCB structure to initialise. + * @num_cmds_log2: Log2 of number of commands in this CCB. + * @cmd_size: Command size for this CCB. + * + * Return: + * * Zero on success, or + * * Any error code returned by pvr_fw_object_create_and_map(). + */ +static int +pvr_ccb_init(struct pvr_device *pvr_dev, struct pvr_ccb *pvr_ccb, + u32 num_cmds_log2, size_t cmd_size) +{ + u32 num_cmds = 1 << num_cmds_log2; + u32 ccb_size = num_cmds * cmd_size; + int err; + + pvr_ccb->num_cmds = num_cmds; + pvr_ccb->cmd_size = cmd_size; + + err = drmm_mutex_init(from_pvr_device(pvr_dev), &pvr_ccb->lock); + if (err) + return err; + + /* + * Map CCB and control structure as uncached, so we don't have to flush + * CPU cache repeatedly when polling for space. + */ + pvr_ccb->ctrl = pvr_fw_object_create_and_map(pvr_dev, sizeof(*pvr_ccb->ctrl), + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + ccb_ctrl_init, pvr_ccb, &pvr_ccb->ctrl_obj); + if (IS_ERR(pvr_ccb->ctrl)) + return PTR_ERR(pvr_ccb->ctrl); + + pvr_ccb->ccb = pvr_fw_object_create_and_map(pvr_dev, ccb_size, + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + NULL, NULL, &pvr_ccb->ccb_obj); + if (IS_ERR(pvr_ccb->ccb)) { + err = PTR_ERR(pvr_ccb->ccb); + goto err_free_ctrl; + } + + pvr_fw_object_get_fw_addr(pvr_ccb->ctrl_obj, &pvr_ccb->ctrl_fw_addr); + pvr_fw_object_get_fw_addr(pvr_ccb->ccb_obj, &pvr_ccb->ccb_fw_addr); + + WRITE_ONCE(pvr_ccb->ctrl->write_offset, 0); + WRITE_ONCE(pvr_ccb->ctrl->read_offset, 0); + WRITE_ONCE(pvr_ccb->ctrl->wrap_mask, num_cmds - 1); + WRITE_ONCE(pvr_ccb->ctrl->cmd_size, cmd_size); + + return 0; + +err_free_ctrl: + pvr_fw_object_unmap_and_destroy(pvr_ccb->ctrl_obj); + + return err; +} + +/** + * pvr_ccb_fini() - Release CCB structure + * @pvr_ccb: CCB to release. + */ +void +pvr_ccb_fini(struct pvr_ccb *pvr_ccb) +{ + pvr_fw_object_unmap_and_destroy(pvr_ccb->ccb_obj); + pvr_fw_object_unmap_and_destroy(pvr_ccb->ctrl_obj); +} + +/** + * pvr_ccb_slot_available_locked() - Test whether any slots are available in CCB + * @pvr_ccb: CCB to test. + * @write_offset: Address to store number of next available slot. May be %NULL. + * + * Caller must hold @pvr_ccb->lock. + * + * Return: + * * %true if a slot is available, or + * * %false if no slot is available. + */ +static __always_inline bool +pvr_ccb_slot_available_locked(struct pvr_ccb *pvr_ccb, u32 *write_offset) +{ + struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl; + u32 next_write_offset = (READ_ONCE(ctrl->write_offset) + 1) & READ_ONCE(ctrl->wrap_mask); + + lockdep_assert_held(&pvr_ccb->lock); + + if (READ_ONCE(ctrl->read_offset) != next_write_offset) { + if (write_offset) + *write_offset = next_write_offset; + return true; + } + + return false; +} + +static void +process_fwccb_command(struct pvr_device *pvr_dev, struct rogue_fwif_fwccb_cmd *cmd) +{ + switch (cmd->cmd_type) { + case ROGUE_FWIF_FWCCB_CMD_REQUEST_GPU_RESTART: + pvr_power_reset(pvr_dev, false); + break; + + case ROGUE_FWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION: + pvr_free_list_process_reconstruct_req(pvr_dev, + &cmd->cmd_data.cmd_freelists_reconstruction); + break; + + case ROGUE_FWIF_FWCCB_CMD_FREELIST_GROW: + pvr_free_list_process_grow_req(pvr_dev, &cmd->cmd_data.cmd_free_list_gs); + break; + + default: + drm_info(from_pvr_device(pvr_dev), "Received unknown FWCCB command %x\n", + cmd->cmd_type); + break; + } +} + +/** + * pvr_fwccb_process() - Process any pending FWCCB commands + * @pvr_dev: Target PowerVR device + */ +void pvr_fwccb_process(struct pvr_device *pvr_dev) +{ + struct rogue_fwif_fwccb_cmd *fwccb = pvr_dev->fwccb.ccb; + struct rogue_fwif_ccb_ctl *ctrl = pvr_dev->fwccb.ctrl; + u32 read_offset; + + mutex_lock(&pvr_dev->fwccb.lock); + + while ((read_offset = READ_ONCE(ctrl->read_offset)) != READ_ONCE(ctrl->write_offset)) { + struct rogue_fwif_fwccb_cmd cmd = fwccb[read_offset]; + + WRITE_ONCE(ctrl->read_offset, (read_offset + 1) & READ_ONCE(ctrl->wrap_mask)); + + /* Drop FWCCB lock while we process command. */ + mutex_unlock(&pvr_dev->fwccb.lock); + + process_fwccb_command(pvr_dev, &cmd); + + mutex_lock(&pvr_dev->fwccb.lock); + } + + mutex_unlock(&pvr_dev->fwccb.lock); +} + +/** + * pvr_kccb_capacity() - Returns the maximum number of usable KCCB slots. + * @pvr_dev: Target PowerVR device + * + * Return: + * * The maximum number of active slots. + */ +static u32 pvr_kccb_capacity(struct pvr_device *pvr_dev) +{ + /* Capacity is the number of slot minus one to cope with the wrapping + * mechanisms. If we were to use all slots, we might end up with + * read_offset == write_offset, which the FW considers as a KCCB-is-empty + * condition. + */ + return pvr_dev->kccb.slot_count - 1; +} + +/** + * pvr_kccb_used_slot_count_locked() - Get the number of used slots + * @pvr_dev: Device pointer. + * + * KCCB lock must be held. + * + * Return: + * * The number of slots currently used. + */ +static u32 +pvr_kccb_used_slot_count_locked(struct pvr_device *pvr_dev) +{ + struct pvr_ccb *pvr_ccb = &pvr_dev->kccb.ccb; + struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl; + u32 wr_offset = READ_ONCE(ctrl->write_offset); + u32 rd_offset = READ_ONCE(ctrl->read_offset); + u32 used_count; + + lockdep_assert_held(&pvr_ccb->lock); + + if (wr_offset >= rd_offset) + used_count = wr_offset - rd_offset; + else + used_count = wr_offset + pvr_dev->kccb.slot_count - rd_offset; + + return used_count; +} + +/** + * pvr_kccb_send_cmd_reserved_powered() - Send command to the KCCB, with the PM ref + * held and a slot pre-reserved + * @pvr_dev: Device pointer. + * @cmd: Command to sent. + * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL. + */ +void +pvr_kccb_send_cmd_reserved_powered(struct pvr_device *pvr_dev, + struct rogue_fwif_kccb_cmd *cmd, + u32 *kccb_slot) +{ + struct pvr_ccb *pvr_ccb = &pvr_dev->kccb.ccb; + struct rogue_fwif_kccb_cmd *kccb = pvr_ccb->ccb; + struct rogue_fwif_ccb_ctl *ctrl = pvr_ccb->ctrl; + u32 old_write_offset; + u32 new_write_offset; + + WARN_ON(pvr_dev->lost); + + mutex_lock(&pvr_ccb->lock); + + if (WARN_ON(!pvr_dev->kccb.reserved_count)) + goto out_unlock; + + old_write_offset = READ_ONCE(ctrl->write_offset); + + /* We reserved the slot, we should have one available. */ + if (WARN_ON(!pvr_ccb_slot_available_locked(pvr_ccb, &new_write_offset))) + goto out_unlock; + + memcpy(&kccb[old_write_offset], cmd, + sizeof(struct rogue_fwif_kccb_cmd)); + if (kccb_slot) { + *kccb_slot = old_write_offset; + /* Clear return status for this slot. */ + WRITE_ONCE(pvr_dev->kccb.rtn[old_write_offset], + ROGUE_FWIF_KCCB_RTN_SLOT_NO_RESPONSE); + } + mb(); /* memory barrier */ + WRITE_ONCE(ctrl->write_offset, new_write_offset); + pvr_dev->kccb.reserved_count--; + + /* Kick MTS */ + pvr_fw_mts_schedule(pvr_dev, + PVR_FWIF_DM_GP & ~ROGUE_CR_MTS_SCHEDULE_DM_CLRMSK); + +out_unlock: + mutex_unlock(&pvr_ccb->lock); +} + +/** + * pvr_kccb_try_reserve_slot() - Try to reserve a KCCB slot + * @pvr_dev: Device pointer. + * + * Return: + * * true if a KCCB slot was reserved, or + * * false otherwise. + */ +static bool pvr_kccb_try_reserve_slot(struct pvr_device *pvr_dev) +{ + bool reserved = false; + u32 used_count; + + mutex_lock(&pvr_dev->kccb.ccb.lock); + + used_count = pvr_kccb_used_slot_count_locked(pvr_dev); + if (pvr_dev->kccb.reserved_count < pvr_kccb_capacity(pvr_dev) - used_count) { + pvr_dev->kccb.reserved_count++; + reserved = true; + } + + mutex_unlock(&pvr_dev->kccb.ccb.lock); + + return reserved; +} + +/** + * pvr_kccb_reserve_slot_sync() - Try to reserve a slot synchronously + * @pvr_dev: Device pointer. + * + * Return: + * * 0 on success, or + * * -EBUSY if no slots were reserved after %RESERVE_SLOT_TIMEOUT, with a minimum of + * %RESERVE_SLOT_MIN_RETRIES retries. + */ +static int pvr_kccb_reserve_slot_sync(struct pvr_device *pvr_dev) +{ + unsigned long start_timestamp = jiffies; + bool reserved = false; + u32 retries = 0; + + while ((jiffies - start_timestamp) < (u32)RESERVE_SLOT_TIMEOUT || + retries < RESERVE_SLOT_MIN_RETRIES) { + reserved = pvr_kccb_try_reserve_slot(pvr_dev); + if (reserved) + break; + + usleep_range(1, 50); + + if (retries < U32_MAX) + retries++; + } + + return reserved ? 0 : -EBUSY; +} + +/** + * pvr_kccb_send_cmd_powered() - Send command to the KCCB, with a PM ref held + * @pvr_dev: Device pointer. + * @cmd: Command to sent. + * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL. + * + * Returns: + * * Zero on success, or + * * -EBUSY if timeout while waiting for a free KCCB slot. + */ +int +pvr_kccb_send_cmd_powered(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *cmd, + u32 *kccb_slot) +{ + int err; + + err = pvr_kccb_reserve_slot_sync(pvr_dev); + if (err) + return err; + + pvr_kccb_send_cmd_reserved_powered(pvr_dev, cmd, kccb_slot); + return 0; +} + +/** + * pvr_kccb_send_cmd() - Send command to the KCCB + * @pvr_dev: Device pointer. + * @cmd: Command to sent. + * @kccb_slot: Address to store the KCCB slot for this command. May be %NULL. + * + * Returns: + * * Zero on success, or + * * -EBUSY if timeout while waiting for a free KCCB slot. + */ +int +pvr_kccb_send_cmd(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *cmd, + u32 *kccb_slot) +{ + int err; + + err = pvr_power_get(pvr_dev); + if (err) + return err; + + err = pvr_kccb_send_cmd_powered(pvr_dev, cmd, kccb_slot); + + pvr_power_put(pvr_dev); + + return err; +} + +/** + * pvr_kccb_wait_for_completion() - Wait for a KCCB command to complete + * @pvr_dev: Device pointer. + * @slot_nr: KCCB slot to wait on. + * @timeout: Timeout length (in jiffies). + * @rtn_out: Location to store KCCB command result. May be %NULL. + * + * Returns: + * * Zero on success, or + * * -ETIMEDOUT on timeout. + */ +int +pvr_kccb_wait_for_completion(struct pvr_device *pvr_dev, u32 slot_nr, + u32 timeout, u32 *rtn_out) +{ + int ret = wait_event_timeout(pvr_dev->kccb.rtn_q, READ_ONCE(pvr_dev->kccb.rtn[slot_nr]) & + ROGUE_FWIF_KCCB_RTN_SLOT_CMD_EXECUTED, timeout); + + if (ret && rtn_out) + *rtn_out = READ_ONCE(pvr_dev->kccb.rtn[slot_nr]); + + return ret ? 0 : -ETIMEDOUT; +} + +/** + * pvr_kccb_is_idle() - Returns whether the device's KCCB is idle + * @pvr_dev: Device pointer + * + * Returns: + * * %true if the KCCB is idle (contains no commands), or + * * %false if the KCCB contains pending commands. + */ +bool +pvr_kccb_is_idle(struct pvr_device *pvr_dev) +{ + struct rogue_fwif_ccb_ctl *ctrl = pvr_dev->kccb.ccb.ctrl; + bool idle; + + mutex_lock(&pvr_dev->kccb.ccb.lock); + + idle = (READ_ONCE(ctrl->write_offset) == READ_ONCE(ctrl->read_offset)); + + mutex_unlock(&pvr_dev->kccb.ccb.lock); + + return idle; +} + +static const char * +pvr_kccb_fence_get_driver_name(struct dma_fence *f) +{ + return PVR_DRIVER_NAME; +} + +static const char * +pvr_kccb_fence_get_timeline_name(struct dma_fence *f) +{ + return "kccb"; +} + +static const struct dma_fence_ops pvr_kccb_fence_ops = { + .get_driver_name = pvr_kccb_fence_get_driver_name, + .get_timeline_name = pvr_kccb_fence_get_timeline_name, +}; + +/** + * struct pvr_kccb_fence - Fence object used to wait for a KCCB slot + */ +struct pvr_kccb_fence { + /** @base: Base dma_fence object. */ + struct dma_fence base; + + /** @node: Node used to insert the fence in the pvr_device::kccb::waiters list. */ + struct list_head node; +}; + +/** + * pvr_kccb_wake_up_waiters() - Check the KCCB waiters + * @pvr_dev: Target PowerVR device + * + * Signal as many KCCB fences as we have slots available. + */ +void pvr_kccb_wake_up_waiters(struct pvr_device *pvr_dev) +{ + struct pvr_kccb_fence *fence, *tmp_fence; + u32 used_count, available_count; + + /* Wake up those waiting for KCCB slot execution. */ + wake_up_all(&pvr_dev->kccb.rtn_q); + + /* Then iterate over all KCCB fences and signal as many as we can. */ + mutex_lock(&pvr_dev->kccb.ccb.lock); + used_count = pvr_kccb_used_slot_count_locked(pvr_dev); + + if (WARN_ON(used_count + pvr_dev->kccb.reserved_count > pvr_kccb_capacity(pvr_dev))) + goto out_unlock; + + available_count = pvr_kccb_capacity(pvr_dev) - used_count - pvr_dev->kccb.reserved_count; + list_for_each_entry_safe(fence, tmp_fence, &pvr_dev->kccb.waiters, node) { + if (!available_count) + break; + + list_del(&fence->node); + pvr_dev->kccb.reserved_count++; + available_count--; + dma_fence_signal(&fence->base); + dma_fence_put(&fence->base); + } + +out_unlock: + mutex_unlock(&pvr_dev->kccb.ccb.lock); +} + +/** + * pvr_kccb_fini() - Cleanup device KCCB + * @pvr_dev: Target PowerVR device + */ +void pvr_kccb_fini(struct pvr_device *pvr_dev) +{ + pvr_ccb_fini(&pvr_dev->kccb.ccb); + WARN_ON(!list_empty(&pvr_dev->kccb.waiters)); + WARN_ON(pvr_dev->kccb.reserved_count); +} + +/** + * pvr_kccb_init() - Initialise device KCCB + * @pvr_dev: Target PowerVR device + * + * Returns: + * * 0 on success, or + * * Any error returned by pvr_ccb_init(). + */ +int +pvr_kccb_init(struct pvr_device *pvr_dev) +{ + pvr_dev->kccb.slot_count = 1 << ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT; + INIT_LIST_HEAD(&pvr_dev->kccb.waiters); + pvr_dev->kccb.fence_ctx.id = dma_fence_context_alloc(1); + spin_lock_init(&pvr_dev->kccb.fence_ctx.lock); + + return pvr_ccb_init(pvr_dev, &pvr_dev->kccb.ccb, + ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT, + sizeof(struct rogue_fwif_kccb_cmd)); +} + +/** + * pvr_kccb_fence_alloc() - Allocate a pvr_kccb_fence object + * + * Return: + * * NULL if the allocation fails, or + * * A valid dma_fence pointer otherwise. + */ +struct dma_fence *pvr_kccb_fence_alloc(void) +{ + struct pvr_kccb_fence *kccb_fence; + + kccb_fence = kzalloc(sizeof(*kccb_fence), GFP_KERNEL); + if (!kccb_fence) + return NULL; + + return &kccb_fence->base; +} + +/** + * pvr_kccb_fence_put() - Drop a KCCB fence reference + * @fence: The fence to drop the reference on. + * + * If the fence hasn't been initialized yet, dma_fence_free() is called. This + * way we have a single function taking care of both cases. + */ +void pvr_kccb_fence_put(struct dma_fence *fence) +{ + if (!fence) + return; + + if (!fence->ops) { + dma_fence_free(fence); + } else { + WARN_ON(fence->ops != &pvr_kccb_fence_ops); + dma_fence_put(fence); + } +} + +/** + * pvr_kccb_reserve_slot() - Reserve a KCCB slot for later use + * @pvr_dev: Target PowerVR device + * @f: KCCB fence object previously allocated with pvr_kccb_fence_alloc() + * + * Try to reserve a KCCB slot, and if there's no slot available, + * initializes the fence object and queue it to the waiters list. + * + * If NULL is returned, that means the slot is reserved. In that case, + * the @f is freed and shouldn't be accessed after that point. + * + * Return: + * * NULL if a slot was available directly, or + * * A valid dma_fence object to wait on if no slot was available. + */ +struct dma_fence * +pvr_kccb_reserve_slot(struct pvr_device *pvr_dev, struct dma_fence *f) +{ + struct pvr_kccb_fence *fence = container_of(f, struct pvr_kccb_fence, base); + struct dma_fence *out_fence = NULL; + u32 used_count; + + mutex_lock(&pvr_dev->kccb.ccb.lock); + + used_count = pvr_kccb_used_slot_count_locked(pvr_dev); + if (pvr_dev->kccb.reserved_count >= pvr_kccb_capacity(pvr_dev) - used_count) { + dma_fence_init(&fence->base, &pvr_kccb_fence_ops, + &pvr_dev->kccb.fence_ctx.lock, + pvr_dev->kccb.fence_ctx.id, + atomic_inc_return(&pvr_dev->kccb.fence_ctx.seqno)); + out_fence = dma_fence_get(&fence->base); + list_add_tail(&fence->node, &pvr_dev->kccb.waiters); + } else { + pvr_kccb_fence_put(f); + pvr_dev->kccb.reserved_count++; + } + + mutex_unlock(&pvr_dev->kccb.ccb.lock); + + return out_fence; +} + +/** + * pvr_kccb_release_slot() - Release a KCCB slot reserved with + * pvr_kccb_reserve_slot() + * @pvr_dev: Target PowerVR device + * + * Should only be called if something failed after the + * pvr_kccb_reserve_slot() call and you know you won't call + * pvr_kccb_send_cmd_reserved(). + */ +void pvr_kccb_release_slot(struct pvr_device *pvr_dev) +{ + mutex_lock(&pvr_dev->kccb.ccb.lock); + if (!WARN_ON(!pvr_dev->kccb.reserved_count)) + pvr_dev->kccb.reserved_count--; + mutex_unlock(&pvr_dev->kccb.ccb.lock); +} + +/** + * pvr_fwccb_init() - Initialise device FWCCB + * @pvr_dev: Target PowerVR device + * + * Returns: + * * 0 on success, or + * * Any error returned by pvr_ccb_init(). + */ +int +pvr_fwccb_init(struct pvr_device *pvr_dev) +{ + return pvr_ccb_init(pvr_dev, &pvr_dev->fwccb, + ROGUE_FWIF_FWCCB_NUMCMDS_LOG2, + sizeof(struct rogue_fwif_fwccb_cmd)); +} diff --git a/drivers/gpu/drm/imagination/pvr_ccb.h b/drivers/gpu/drm/imagination/pvr_ccb.h new file mode 100644 index 0000000000..4c8aef31ee --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_ccb.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_CCB_H +#define PVR_CCB_H + +#include "pvr_rogue_fwif.h" + +#include +#include + +/* Forward declaration from pvr_device.h. */ +struct pvr_device; + +/* Forward declaration from pvr_gem.h. */ +struct pvr_fw_object; + +struct pvr_ccb { + /** @ctrl_obj: FW object representing CCB control structure. */ + struct pvr_fw_object *ctrl_obj; + /** @ccb_obj: FW object representing CCB. */ + struct pvr_fw_object *ccb_obj; + + /** @ctrl_fw_addr: FW virtual address of CCB control structure. */ + u32 ctrl_fw_addr; + /** @ccb_fw_addr: FW virtual address of CCB. */ + u32 ccb_fw_addr; + + /** @num_cmds: Number of commands in this CCB. */ + u32 num_cmds; + + /** @cmd_size: Size of each command in this CCB, in bytes. */ + u32 cmd_size; + + /** @lock: Mutex protecting @ctrl and @ccb. */ + struct mutex lock; + /** + * @ctrl: Kernel mapping of CCB control structure. @lock must be held + * when accessing. + */ + struct rogue_fwif_ccb_ctl *ctrl; + /** @ccb: Kernel mapping of CCB. @lock must be held when accessing. */ + void *ccb; +}; + +int pvr_kccb_init(struct pvr_device *pvr_dev); +void pvr_kccb_fini(struct pvr_device *pvr_dev); +int pvr_fwccb_init(struct pvr_device *pvr_dev); +void pvr_ccb_fini(struct pvr_ccb *ccb); + +void pvr_fwccb_process(struct pvr_device *pvr_dev); + +struct dma_fence *pvr_kccb_fence_alloc(void); +void pvr_kccb_fence_put(struct dma_fence *fence); +struct dma_fence * +pvr_kccb_reserve_slot(struct pvr_device *pvr_dev, struct dma_fence *f); +void pvr_kccb_release_slot(struct pvr_device *pvr_dev); +int pvr_kccb_send_cmd(struct pvr_device *pvr_dev, + struct rogue_fwif_kccb_cmd *cmd, u32 *kccb_slot); +int pvr_kccb_send_cmd_powered(struct pvr_device *pvr_dev, + struct rogue_fwif_kccb_cmd *cmd, + u32 *kccb_slot); +void pvr_kccb_send_cmd_reserved_powered(struct pvr_device *pvr_dev, + struct rogue_fwif_kccb_cmd *cmd, + u32 *kccb_slot); +int pvr_kccb_wait_for_completion(struct pvr_device *pvr_dev, u32 slot_nr, u32 timeout, + u32 *rtn_out); +bool pvr_kccb_is_idle(struct pvr_device *pvr_dev); +void pvr_kccb_wake_up_waiters(struct pvr_device *pvr_dev); + +#endif /* PVR_CCB_H */ diff --git a/drivers/gpu/drm/imagination/pvr_cccb.c b/drivers/gpu/drm/imagination/pvr_cccb.c new file mode 100644 index 0000000000..4fabab41be --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_cccb.c @@ -0,0 +1,267 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_ccb.h" +#include "pvr_cccb.h" +#include "pvr_device.h" +#include "pvr_gem.h" +#include "pvr_hwrt.h" + +#include +#include +#include +#include +#include + +static __always_inline u32 +get_ccb_space(u32 w_off, u32 r_off, u32 ccb_size) +{ + return (((r_off) - (w_off)) + ((ccb_size) - 1)) & ((ccb_size) - 1); +} + +static void +cccb_ctrl_init(void *cpu_ptr, void *priv) +{ + struct rogue_fwif_cccb_ctl *ctrl = cpu_ptr; + struct pvr_cccb *pvr_cccb = priv; + + WRITE_ONCE(ctrl->write_offset, 0); + WRITE_ONCE(ctrl->read_offset, 0); + WRITE_ONCE(ctrl->dep_offset, 0); + WRITE_ONCE(ctrl->wrap_mask, pvr_cccb->wrap_mask); +} + +/** + * pvr_cccb_init() - Initialise a Client CCB + * @pvr_dev: Device pointer. + * @pvr_cccb: Pointer to Client CCB structure to initialise. + * @size_log2: Log2 size of Client CCB in bytes. + * @name: Name of owner of Client CCB. Used for fence context. + * + * Return: + * * Zero on success, or + * * Any error code returned by pvr_fw_object_create_and_map(). + */ +int +pvr_cccb_init(struct pvr_device *pvr_dev, struct pvr_cccb *pvr_cccb, + u32 size_log2, const char *name) +{ + size_t size = 1 << size_log2; + int err; + + pvr_cccb->size = size; + pvr_cccb->write_offset = 0; + pvr_cccb->wrap_mask = size - 1; + + /* + * Map CCCB and control structure as uncached, so we don't have to flush + * CPU cache repeatedly when polling for space. + */ + pvr_cccb->ctrl = pvr_fw_object_create_and_map(pvr_dev, sizeof(*pvr_cccb->ctrl), + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + cccb_ctrl_init, pvr_cccb, + &pvr_cccb->ctrl_obj); + if (IS_ERR(pvr_cccb->ctrl)) + return PTR_ERR(pvr_cccb->ctrl); + + pvr_cccb->cccb = pvr_fw_object_create_and_map(pvr_dev, size, + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + NULL, NULL, &pvr_cccb->cccb_obj); + if (IS_ERR(pvr_cccb->cccb)) { + err = PTR_ERR(pvr_cccb->cccb); + goto err_free_ctrl; + } + + pvr_fw_object_get_fw_addr(pvr_cccb->ctrl_obj, &pvr_cccb->ctrl_fw_addr); + pvr_fw_object_get_fw_addr(pvr_cccb->cccb_obj, &pvr_cccb->cccb_fw_addr); + + return 0; + +err_free_ctrl: + pvr_fw_object_unmap_and_destroy(pvr_cccb->ctrl_obj); + + return err; +} + +/** + * pvr_cccb_fini() - Release Client CCB structure + * @pvr_cccb: Client CCB to release. + */ +void +pvr_cccb_fini(struct pvr_cccb *pvr_cccb) +{ + pvr_fw_object_unmap_and_destroy(pvr_cccb->cccb_obj); + pvr_fw_object_unmap_and_destroy(pvr_cccb->ctrl_obj); +} + +/** + * pvr_cccb_cmdseq_fits() - Check if a command sequence fits in the CCCB + * @pvr_cccb: Target Client CCB. + * @size: Size of the command sequence. + * + * Check if a command sequence fits in the CCCB we have at hand. + * + * Return: + * * true if the command sequence fits in the CCCB, or + * * false otherwise. + */ +bool pvr_cccb_cmdseq_fits(struct pvr_cccb *pvr_cccb, size_t size) +{ + struct rogue_fwif_cccb_ctl *ctrl = pvr_cccb->ctrl; + u32 read_offset, remaining; + bool fits = false; + + read_offset = READ_ONCE(ctrl->read_offset); + remaining = pvr_cccb->size - pvr_cccb->write_offset; + + /* Always ensure we have enough room for a padding command at the end of the CCCB. + * If our command sequence does not fit, reserve the remaining space for a padding + * command. + */ + if (size + PADDING_COMMAND_SIZE > remaining) + size += remaining; + + if (get_ccb_space(pvr_cccb->write_offset, read_offset, pvr_cccb->size) >= size) + fits = true; + + return fits; +} + +/** + * pvr_cccb_write_command_with_header() - Write a command + command header to a + * Client CCB + * @pvr_cccb: Target Client CCB. + * @cmd_type: Client CCB command type. Must be one of %ROGUE_FWIF_CCB_CMD_TYPE_*. + * @cmd_size: Size of command in bytes. + * @cmd_data: Pointer to command to write. + * @ext_job_ref: External job reference. + * @int_job_ref: Internal job reference. + * + * Caller must make sure there's enough space in CCCB to queue this command. This + * can be done by calling pvr_cccb_cmdseq_fits(). + * + * This function is not protected by any lock. The caller must ensure there's + * no concurrent caller, which should be guaranteed by the drm_sched model (job + * submission is serialized in drm_sched_main()). + */ +void +pvr_cccb_write_command_with_header(struct pvr_cccb *pvr_cccb, u32 cmd_type, u32 cmd_size, + void *cmd_data, u32 ext_job_ref, u32 int_job_ref) +{ + u32 sz_with_hdr = pvr_cccb_get_size_of_cmd_with_hdr(cmd_size); + struct rogue_fwif_ccb_cmd_header cmd_header = { + .cmd_type = cmd_type, + .cmd_size = ALIGN(cmd_size, 8), + .ext_job_ref = ext_job_ref, + .int_job_ref = int_job_ref, + }; + struct rogue_fwif_cccb_ctl *ctrl = pvr_cccb->ctrl; + u32 remaining = pvr_cccb->size - pvr_cccb->write_offset; + u32 required_size, cccb_space, read_offset; + + /* + * Always ensure we have enough room for a padding command at the end of + * the CCCB. + */ + if (remaining < sz_with_hdr + PADDING_COMMAND_SIZE) { + /* + * Command would need to wrap, so we need to pad the remainder + * of the CCCB. + */ + required_size = sz_with_hdr + remaining; + } else { + required_size = sz_with_hdr; + } + + read_offset = READ_ONCE(ctrl->read_offset); + cccb_space = get_ccb_space(pvr_cccb->write_offset, read_offset, pvr_cccb->size); + if (WARN_ON(cccb_space < required_size)) + return; + + if (required_size != sz_with_hdr) { + /* Add padding command */ + struct rogue_fwif_ccb_cmd_header pad_cmd = { + .cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_PADDING, + .cmd_size = remaining - sizeof(pad_cmd), + }; + + memcpy(&pvr_cccb->cccb[pvr_cccb->write_offset], &pad_cmd, sizeof(pad_cmd)); + pvr_cccb->write_offset = 0; + } + + memcpy(&pvr_cccb->cccb[pvr_cccb->write_offset], &cmd_header, sizeof(cmd_header)); + memcpy(&pvr_cccb->cccb[pvr_cccb->write_offset + sizeof(cmd_header)], cmd_data, cmd_size); + pvr_cccb->write_offset += sz_with_hdr; +} + +static void fill_cmd_kick_data(struct pvr_cccb *cccb, u32 ctx_fw_addr, + struct pvr_hwrt_data *hwrt, + struct rogue_fwif_kccb_cmd_kick_data *k) +{ + k->context_fw_addr = ctx_fw_addr; + k->client_woff_update = cccb->write_offset; + k->client_wrap_mask_update = cccb->wrap_mask; + + if (hwrt) { + u32 cleanup_state_offset = offsetof(struct rogue_fwif_hwrtdata, cleanup_state); + + pvr_fw_object_get_fw_addr_offset(hwrt->fw_obj, cleanup_state_offset, + &k->cleanup_ctl_fw_addr[k->num_cleanup_ctl++]); + } +} + +/** + * pvr_cccb_send_kccb_kick: Send KCCB kick to trigger command processing + * @pvr_dev: Device pointer. + * @pvr_cccb: Pointer to CCCB to process. + * @cctx_fw_addr: FW virtual address for context owning this Client CCB. + * @hwrt: HWRT data set associated with this kick. May be %NULL. + * + * You must call pvr_kccb_reserve_slot() and wait for the returned fence to + * signal (if this function didn't return NULL) before calling + * pvr_cccb_send_kccb_kick(). + */ +void +pvr_cccb_send_kccb_kick(struct pvr_device *pvr_dev, + struct pvr_cccb *pvr_cccb, u32 cctx_fw_addr, + struct pvr_hwrt_data *hwrt) +{ + struct rogue_fwif_kccb_cmd cmd_kick = { + .cmd_type = ROGUE_FWIF_KCCB_CMD_KICK, + }; + + fill_cmd_kick_data(pvr_cccb, cctx_fw_addr, hwrt, &cmd_kick.cmd_data.cmd_kick_data); + + /* Make sure the writes to the CCCB are flushed before sending the KICK. */ + wmb(); + + pvr_kccb_send_cmd_reserved_powered(pvr_dev, &cmd_kick, NULL); +} + +void +pvr_cccb_send_kccb_combined_kick(struct pvr_device *pvr_dev, + struct pvr_cccb *geom_cccb, + struct pvr_cccb *frag_cccb, + u32 geom_ctx_fw_addr, + u32 frag_ctx_fw_addr, + struct pvr_hwrt_data *hwrt, + bool frag_is_pr) +{ + struct rogue_fwif_kccb_cmd cmd_kick = { + .cmd_type = ROGUE_FWIF_KCCB_CMD_COMBINED_GEOM_FRAG_KICK, + }; + + fill_cmd_kick_data(geom_cccb, geom_ctx_fw_addr, hwrt, + &cmd_kick.cmd_data.combined_geom_frag_cmd_kick_data.geom_cmd_kick_data); + + /* If this is a partial-render job, we don't attach resources to cleanup-ctl array, + * because the resources are already retained by the geometry job. + */ + fill_cmd_kick_data(frag_cccb, frag_ctx_fw_addr, frag_is_pr ? NULL : hwrt, + &cmd_kick.cmd_data.combined_geom_frag_cmd_kick_data.frag_cmd_kick_data); + + /* Make sure the writes to the CCCB are flushed before sending the KICK. */ + wmb(); + + pvr_kccb_send_cmd_reserved_powered(pvr_dev, &cmd_kick, NULL); +} diff --git a/drivers/gpu/drm/imagination/pvr_cccb.h b/drivers/gpu/drm/imagination/pvr_cccb.h new file mode 100644 index 0000000000..943fe8f2c9 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_cccb.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_CCCB_H +#define PVR_CCCB_H + +#include "pvr_rogue_fwif.h" +#include "pvr_rogue_fwif_shared.h" + +#include +#include + +#define PADDING_COMMAND_SIZE sizeof(struct rogue_fwif_ccb_cmd_header) + +/* Forward declaration from pvr_device.h. */ +struct pvr_device; + +/* Forward declaration from pvr_gem.h. */ +struct pvr_fw_object; + +/* Forward declaration from pvr_hwrt.h. */ +struct pvr_hwrt_data; + +struct pvr_cccb { + /** @ctrl_obj: FW object representing CCCB control structure. */ + struct pvr_fw_object *ctrl_obj; + + /** @ccb_obj: FW object representing CCCB. */ + struct pvr_fw_object *cccb_obj; + + /** + * @ctrl: Kernel mapping of CCCB control structure. @lock must be held + * when accessing. + */ + struct rogue_fwif_cccb_ctl *ctrl; + + /** @cccb: Kernel mapping of CCCB. @lock must be held when accessing.*/ + u8 *cccb; + + /** @ctrl_fw_addr: FW virtual address of CCCB control structure. */ + u32 ctrl_fw_addr; + /** @ccb_fw_addr: FW virtual address of CCCB. */ + u32 cccb_fw_addr; + + /** @size: Size of CCCB in bytes. */ + size_t size; + + /** @write_offset: CCCB write offset. */ + u32 write_offset; + + /** @wrap_mask: CCCB wrap mask. */ + u32 wrap_mask; +}; + +int pvr_cccb_init(struct pvr_device *pvr_dev, struct pvr_cccb *cccb, + u32 size_log2, const char *name); +void pvr_cccb_fini(struct pvr_cccb *cccb); + +void pvr_cccb_write_command_with_header(struct pvr_cccb *pvr_cccb, + u32 cmd_type, u32 cmd_size, void *cmd_data, + u32 ext_job_ref, u32 int_job_ref); +void pvr_cccb_send_kccb_kick(struct pvr_device *pvr_dev, + struct pvr_cccb *pvr_cccb, u32 cctx_fw_addr, + struct pvr_hwrt_data *hwrt); +void pvr_cccb_send_kccb_combined_kick(struct pvr_device *pvr_dev, + struct pvr_cccb *geom_cccb, + struct pvr_cccb *frag_cccb, + u32 geom_ctx_fw_addr, + u32 frag_ctx_fw_addr, + struct pvr_hwrt_data *hwrt, + bool frag_is_pr); +bool pvr_cccb_cmdseq_fits(struct pvr_cccb *pvr_cccb, size_t size); + +/** + * pvr_cccb_get_size_of_cmd_with_hdr() - Get the size of a command and its header. + * @cmd_size: Command size. + * + * Returns the size of the command and its header. + */ +static __always_inline u32 +pvr_cccb_get_size_of_cmd_with_hdr(u32 cmd_size) +{ + WARN_ON(!IS_ALIGNED(cmd_size, 8)); + return sizeof(struct rogue_fwif_ccb_cmd_header) + ALIGN(cmd_size, 8); +} + +/** + * pvr_cccb_cmdseq_can_fit() - Check if a command sequence can fit in the CCCB. + * @pvr_cccb: Target Client CCB. + * @size: Command sequence size. + * + * Returns: + * * true it the CCCB is big enough to contain a command sequence, or + * * false otherwise. + */ +static __always_inline bool +pvr_cccb_cmdseq_can_fit(struct pvr_cccb *pvr_cccb, size_t size) +{ + /* We divide the capacity by two to simplify our CCCB fencing logic: + * we want to be sure that, no matter what we had queued before, we + * are able to either queue our command sequence at the end or add a + * padding command and queue the command sequence at the beginning + * of the CCCB. If the command sequence size is bigger than half the + * CCCB capacity, we'd have to queue the padding command and make sure + * the FW is done processing it before queueing our command sequence. + */ + return size + PADDING_COMMAND_SIZE <= pvr_cccb->size / 2; +} + +#endif /* PVR_CCCB_H */ diff --git a/drivers/gpu/drm/imagination/pvr_context.c b/drivers/gpu/drm/imagination/pvr_context.c new file mode 100644 index 0000000000..eded5e955c --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_context.c @@ -0,0 +1,464 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_cccb.h" +#include "pvr_context.h" +#include "pvr_device.h" +#include "pvr_drv.h" +#include "pvr_gem.h" +#include "pvr_job.h" +#include "pvr_power.h" +#include "pvr_rogue_fwif.h" +#include "pvr_rogue_fwif_common.h" +#include "pvr_rogue_fwif_resetframework.h" +#include "pvr_stream.h" +#include "pvr_stream_defs.h" +#include "pvr_vm.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int +remap_priority(struct pvr_file *pvr_file, s32 uapi_priority, + enum pvr_context_priority *priority_out) +{ + switch (uapi_priority) { + case DRM_PVR_CTX_PRIORITY_LOW: + *priority_out = PVR_CTX_PRIORITY_LOW; + break; + case DRM_PVR_CTX_PRIORITY_NORMAL: + *priority_out = PVR_CTX_PRIORITY_MEDIUM; + break; + case DRM_PVR_CTX_PRIORITY_HIGH: + if (!capable(CAP_SYS_NICE) && !drm_is_current_master(from_pvr_file(pvr_file))) + return -EACCES; + *priority_out = PVR_CTX_PRIORITY_HIGH; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int get_fw_obj_size(enum drm_pvr_ctx_type type) +{ + switch (type) { + case DRM_PVR_CTX_TYPE_RENDER: + return sizeof(struct rogue_fwif_fwrendercontext); + case DRM_PVR_CTX_TYPE_COMPUTE: + return sizeof(struct rogue_fwif_fwcomputecontext); + case DRM_PVR_CTX_TYPE_TRANSFER_FRAG: + return sizeof(struct rogue_fwif_fwtransfercontext); + } + + return -EINVAL; +} + +static int +process_static_context_state(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs, + u64 stream_user_ptr, u32 stream_size, void *dest) +{ + void *stream; + int err; + + stream = kzalloc(stream_size, GFP_KERNEL); + if (!stream) + return -ENOMEM; + + if (copy_from_user(stream, u64_to_user_ptr(stream_user_ptr), stream_size)) { + err = -EFAULT; + goto err_free; + } + + err = pvr_stream_process(pvr_dev, cmd_defs, stream, stream_size, dest); + if (err) + goto err_free; + + kfree(stream); + + return 0; + +err_free: + kfree(stream); + + return err; +} + +static int init_render_fw_objs(struct pvr_context *ctx, + struct drm_pvr_ioctl_create_context_args *args, + void *fw_ctx_map) +{ + struct rogue_fwif_static_rendercontext_state *static_rendercontext_state; + struct rogue_fwif_fwrendercontext *fw_render_context = fw_ctx_map; + + if (!args->static_context_state_len) + return -EINVAL; + + static_rendercontext_state = &fw_render_context->static_render_context_state; + + /* Copy static render context state from userspace. */ + return process_static_context_state(ctx->pvr_dev, + &pvr_static_render_context_state_stream, + args->static_context_state, + args->static_context_state_len, + &static_rendercontext_state->ctxswitch_regs[0]); +} + +static int init_compute_fw_objs(struct pvr_context *ctx, + struct drm_pvr_ioctl_create_context_args *args, + void *fw_ctx_map) +{ + struct rogue_fwif_fwcomputecontext *fw_compute_context = fw_ctx_map; + struct rogue_fwif_cdm_registers_cswitch *ctxswitch_regs; + + if (!args->static_context_state_len) + return -EINVAL; + + ctxswitch_regs = &fw_compute_context->static_compute_context_state.ctxswitch_regs; + + /* Copy static render context state from userspace. */ + return process_static_context_state(ctx->pvr_dev, + &pvr_static_compute_context_state_stream, + args->static_context_state, + args->static_context_state_len, + ctxswitch_regs); +} + +static int init_transfer_fw_objs(struct pvr_context *ctx, + struct drm_pvr_ioctl_create_context_args *args, + void *fw_ctx_map) +{ + if (args->static_context_state_len) + return -EINVAL; + + return 0; +} + +static int init_fw_objs(struct pvr_context *ctx, + struct drm_pvr_ioctl_create_context_args *args, + void *fw_ctx_map) +{ + switch (ctx->type) { + case DRM_PVR_CTX_TYPE_RENDER: + return init_render_fw_objs(ctx, args, fw_ctx_map); + case DRM_PVR_CTX_TYPE_COMPUTE: + return init_compute_fw_objs(ctx, args, fw_ctx_map); + case DRM_PVR_CTX_TYPE_TRANSFER_FRAG: + return init_transfer_fw_objs(ctx, args, fw_ctx_map); + } + + return -EINVAL; +} + +static void +ctx_fw_data_init(void *cpu_ptr, void *priv) +{ + struct pvr_context *ctx = priv; + + memcpy(cpu_ptr, ctx->data, ctx->data_size); +} + +/** + * pvr_context_destroy_queues() - Destroy all queues attached to a context. + * @ctx: Context to destroy queues on. + * + * Should be called when the last reference to a context object is dropped. + * It releases all resources attached to the queues bound to this context. + */ +static void pvr_context_destroy_queues(struct pvr_context *ctx) +{ + switch (ctx->type) { + case DRM_PVR_CTX_TYPE_RENDER: + pvr_queue_destroy(ctx->queues.fragment); + pvr_queue_destroy(ctx->queues.geometry); + break; + case DRM_PVR_CTX_TYPE_COMPUTE: + pvr_queue_destroy(ctx->queues.compute); + break; + case DRM_PVR_CTX_TYPE_TRANSFER_FRAG: + pvr_queue_destroy(ctx->queues.transfer); + break; + } +} + +/** + * pvr_context_create_queues() - Create all queues attached to a context. + * @ctx: Context to create queues on. + * @args: Context creation arguments passed by userspace. + * @fw_ctx_map: CPU mapping of the FW context object. + * + * Return: + * * 0 on success, or + * * A negative error code otherwise. + */ +static int pvr_context_create_queues(struct pvr_context *ctx, + struct drm_pvr_ioctl_create_context_args *args, + void *fw_ctx_map) +{ + int err; + + switch (ctx->type) { + case DRM_PVR_CTX_TYPE_RENDER: + ctx->queues.geometry = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_GEOMETRY, + args, fw_ctx_map); + if (IS_ERR(ctx->queues.geometry)) { + err = PTR_ERR(ctx->queues.geometry); + ctx->queues.geometry = NULL; + goto err_destroy_queues; + } + + ctx->queues.fragment = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_FRAGMENT, + args, fw_ctx_map); + if (IS_ERR(ctx->queues.fragment)) { + err = PTR_ERR(ctx->queues.fragment); + ctx->queues.fragment = NULL; + goto err_destroy_queues; + } + return 0; + + case DRM_PVR_CTX_TYPE_COMPUTE: + ctx->queues.compute = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_COMPUTE, + args, fw_ctx_map); + if (IS_ERR(ctx->queues.compute)) { + err = PTR_ERR(ctx->queues.compute); + ctx->queues.compute = NULL; + goto err_destroy_queues; + } + return 0; + + case DRM_PVR_CTX_TYPE_TRANSFER_FRAG: + ctx->queues.transfer = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_TRANSFER_FRAG, + args, fw_ctx_map); + if (IS_ERR(ctx->queues.transfer)) { + err = PTR_ERR(ctx->queues.transfer); + ctx->queues.transfer = NULL; + goto err_destroy_queues; + } + return 0; + } + + return -EINVAL; + +err_destroy_queues: + pvr_context_destroy_queues(ctx); + return err; +} + +/** + * pvr_context_kill_queues() - Kill queues attached to context. + * @ctx: Context to kill queues on. + * + * Killing the queues implies making them unusable for future jobs, while still + * letting the currently submitted jobs a chance to finish. Queue resources will + * stay around until pvr_context_destroy_queues() is called. + */ +static void pvr_context_kill_queues(struct pvr_context *ctx) +{ + switch (ctx->type) { + case DRM_PVR_CTX_TYPE_RENDER: + pvr_queue_kill(ctx->queues.fragment); + pvr_queue_kill(ctx->queues.geometry); + break; + case DRM_PVR_CTX_TYPE_COMPUTE: + pvr_queue_kill(ctx->queues.compute); + break; + case DRM_PVR_CTX_TYPE_TRANSFER_FRAG: + pvr_queue_kill(ctx->queues.transfer); + break; + } +} + +/** + * pvr_context_create() - Create a context. + * @pvr_file: File to attach the created context to. + * @args: Context creation arguments. + * + * Return: + * * 0 on success, or + * * A negative error code on failure. + */ +int pvr_context_create(struct pvr_file *pvr_file, struct drm_pvr_ioctl_create_context_args *args) +{ + struct pvr_device *pvr_dev = pvr_file->pvr_dev; + struct pvr_context *ctx; + int ctx_size; + int err; + + /* Context creation flags are currently unused and must be zero. */ + if (args->flags) + return -EINVAL; + + ctx_size = get_fw_obj_size(args->type); + if (ctx_size < 0) + return ctx_size; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->data_size = ctx_size; + ctx->type = args->type; + ctx->flags = args->flags; + ctx->pvr_dev = pvr_dev; + kref_init(&ctx->ref_count); + + err = remap_priority(pvr_file, args->priority, &ctx->priority); + if (err) + goto err_free_ctx; + + ctx->vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle); + if (IS_ERR(ctx->vm_ctx)) { + err = PTR_ERR(ctx->vm_ctx); + goto err_free_ctx; + } + + ctx->data = kzalloc(ctx_size, GFP_KERNEL); + if (!ctx->data) { + err = -ENOMEM; + goto err_put_vm; + } + + err = pvr_context_create_queues(ctx, args, ctx->data); + if (err) + goto err_free_ctx_data; + + err = init_fw_objs(ctx, args, ctx->data); + if (err) + goto err_destroy_queues; + + err = pvr_fw_object_create(pvr_dev, ctx_size, PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + ctx_fw_data_init, ctx, &ctx->fw_obj); + if (err) + goto err_free_ctx_data; + + err = xa_alloc(&pvr_dev->ctx_ids, &ctx->ctx_id, ctx, xa_limit_32b, GFP_KERNEL); + if (err) + goto err_destroy_fw_obj; + + err = xa_alloc(&pvr_file->ctx_handles, &args->handle, ctx, xa_limit_32b, GFP_KERNEL); + if (err) { + /* + * It's possible that another thread could have taken a reference on the context at + * this point as it is in the ctx_ids xarray. Therefore instead of directly + * destroying the context, drop a reference instead. + */ + pvr_context_put(ctx); + return err; + } + + return 0; + +err_destroy_fw_obj: + pvr_fw_object_destroy(ctx->fw_obj); + +err_destroy_queues: + pvr_context_destroy_queues(ctx); + +err_free_ctx_data: + kfree(ctx->data); + +err_put_vm: + pvr_vm_context_put(ctx->vm_ctx); + +err_free_ctx: + kfree(ctx); + return err; +} + +static void +pvr_context_release(struct kref *ref_count) +{ + struct pvr_context *ctx = + container_of(ref_count, struct pvr_context, ref_count); + struct pvr_device *pvr_dev = ctx->pvr_dev; + + xa_erase(&pvr_dev->ctx_ids, ctx->ctx_id); + pvr_context_destroy_queues(ctx); + pvr_fw_object_destroy(ctx->fw_obj); + kfree(ctx->data); + pvr_vm_context_put(ctx->vm_ctx); + kfree(ctx); +} + +/** + * pvr_context_put() - Release reference on context + * @ctx: Target context. + */ +void +pvr_context_put(struct pvr_context *ctx) +{ + if (ctx) + kref_put(&ctx->ref_count, pvr_context_release); +} + +/** + * pvr_context_destroy() - Destroy context + * @pvr_file: Pointer to pvr_file structure. + * @handle: Userspace context handle. + * + * Removes context from context list and drops initial reference. Context will + * then be destroyed once all outstanding references are dropped. + * + * Return: + * * 0 on success, or + * * -%EINVAL if context not in context list. + */ +int +pvr_context_destroy(struct pvr_file *pvr_file, u32 handle) +{ + struct pvr_context *ctx = xa_erase(&pvr_file->ctx_handles, handle); + + if (!ctx) + return -EINVAL; + + /* Make sure nothing can be queued to the queues after that point. */ + pvr_context_kill_queues(ctx); + + /* Release the reference held by the handle set. */ + pvr_context_put(ctx); + + return 0; +} + +/** + * pvr_destroy_contexts_for_file: Destroy any contexts associated with the given file + * @pvr_file: Pointer to pvr_file structure. + * + * Removes all contexts associated with @pvr_file from the device context list and drops initial + * references. Contexts will then be destroyed once all outstanding references are dropped. + */ +void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file) +{ + struct pvr_context *ctx; + unsigned long handle; + + xa_for_each(&pvr_file->ctx_handles, handle, ctx) + pvr_context_destroy(pvr_file, handle); +} + +/** + * pvr_context_device_init() - Device level initialization for queue related resources. + * @pvr_dev: The device to initialize. + */ +void pvr_context_device_init(struct pvr_device *pvr_dev) +{ + xa_init_flags(&pvr_dev->ctx_ids, XA_FLAGS_ALLOC1); +} + +/** + * pvr_context_device_fini() - Device level cleanup for queue related resources. + * @pvr_dev: The device to cleanup. + */ +void pvr_context_device_fini(struct pvr_device *pvr_dev) +{ + WARN_ON(!xa_empty(&pvr_dev->ctx_ids)); + xa_destroy(&pvr_dev->ctx_ids); +} diff --git a/drivers/gpu/drm/imagination/pvr_context.h b/drivers/gpu/drm/imagination/pvr_context.h new file mode 100644 index 0000000000..0c7b97dfa6 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_context.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_CONTEXT_H +#define PVR_CONTEXT_H + +#include + +#include +#include +#include +#include +#include +#include + +#include "pvr_cccb.h" +#include "pvr_device.h" +#include "pvr_queue.h" + +/* Forward declaration from pvr_gem.h. */ +struct pvr_fw_object; + +enum pvr_context_priority { + PVR_CTX_PRIORITY_LOW = 0, + PVR_CTX_PRIORITY_MEDIUM, + PVR_CTX_PRIORITY_HIGH, +}; + +/** + * struct pvr_context - Context data + */ +struct pvr_context { + /** @ref_count: Refcount for context. */ + struct kref ref_count; + + /** @pvr_dev: Pointer to owning device. */ + struct pvr_device *pvr_dev; + + /** @vm_ctx: Pointer to associated VM context. */ + struct pvr_vm_context *vm_ctx; + + /** @type: Type of context. */ + enum drm_pvr_ctx_type type; + + /** @flags: Context flags. */ + u32 flags; + + /** @priority: Context priority*/ + enum pvr_context_priority priority; + + /** @fw_obj: FW object representing FW-side context data. */ + struct pvr_fw_object *fw_obj; + + /** @data: Pointer to local copy of FW context data. */ + void *data; + + /** @data_size: Size of FW context data, in bytes. */ + u32 data_size; + + /** @ctx_id: FW context ID. */ + u32 ctx_id; + + /** + * @faulty: Set to 1 when the context queues had unfinished job when + * a GPU reset happened. + * + * In that case, the context is in an inconsistent state and can't be + * used anymore. + */ + atomic_t faulty; + + /** @queues: Union containing all kind of queues. */ + union { + struct { + /** @geometry: Geometry queue. */ + struct pvr_queue *geometry; + + /** @fragment: Fragment queue. */ + struct pvr_queue *fragment; + }; + + /** @compute: Compute queue. */ + struct pvr_queue *compute; + + /** @compute: Transfer queue. */ + struct pvr_queue *transfer; + } queues; +}; + +static __always_inline struct pvr_queue * +pvr_context_get_queue_for_job(struct pvr_context *ctx, enum drm_pvr_job_type type) +{ + switch (type) { + case DRM_PVR_JOB_TYPE_GEOMETRY: + return ctx->type == DRM_PVR_CTX_TYPE_RENDER ? ctx->queues.geometry : NULL; + case DRM_PVR_JOB_TYPE_FRAGMENT: + return ctx->type == DRM_PVR_CTX_TYPE_RENDER ? ctx->queues.fragment : NULL; + case DRM_PVR_JOB_TYPE_COMPUTE: + return ctx->type == DRM_PVR_CTX_TYPE_COMPUTE ? ctx->queues.compute : NULL; + case DRM_PVR_JOB_TYPE_TRANSFER_FRAG: + return ctx->type == DRM_PVR_CTX_TYPE_TRANSFER_FRAG ? ctx->queues.transfer : NULL; + } + + return NULL; +} + +/** + * pvr_context_get() - Take additional reference on context. + * @ctx: Context pointer. + * + * Call pvr_context_put() to release. + * + * Returns: + * * The requested context on success, or + * * %NULL if no context pointer passed. + */ +static __always_inline struct pvr_context * +pvr_context_get(struct pvr_context *ctx) +{ + if (ctx) + kref_get(&ctx->ref_count); + + return ctx; +} + +/** + * pvr_context_lookup() - Lookup context pointer from handle and file. + * @pvr_file: Pointer to pvr_file structure. + * @handle: Context handle. + * + * Takes reference on context. Call pvr_context_put() to release. + * + * Return: + * * The requested context on success, or + * * %NULL on failure (context does not exist, or does not belong to @pvr_file). + */ +static __always_inline struct pvr_context * +pvr_context_lookup(struct pvr_file *pvr_file, u32 handle) +{ + struct pvr_context *ctx; + + /* Take the array lock to protect against context removal. */ + xa_lock(&pvr_file->ctx_handles); + ctx = pvr_context_get(xa_load(&pvr_file->ctx_handles, handle)); + xa_unlock(&pvr_file->ctx_handles); + + return ctx; +} + +/** + * pvr_context_lookup_id() - Lookup context pointer from ID. + * @pvr_dev: Device pointer. + * @id: FW context ID. + * + * Takes reference on context. Call pvr_context_put() to release. + * + * Return: + * * The requested context on success, or + * * %NULL on failure (context does not exist). + */ +static __always_inline struct pvr_context * +pvr_context_lookup_id(struct pvr_device *pvr_dev, u32 id) +{ + struct pvr_context *ctx; + + /* Take the array lock to protect against context removal. */ + xa_lock(&pvr_dev->ctx_ids); + + /* Contexts are removed from the ctx_ids set in the context release path, + * meaning the ref_count reached zero before they get removed. We need + * to make sure we're not trying to acquire a context that's being + * destroyed. + */ + ctx = xa_load(&pvr_dev->ctx_ids, id); + if (!kref_get_unless_zero(&ctx->ref_count)) + ctx = NULL; + + xa_unlock(&pvr_dev->ctx_ids); + + return ctx; +} + +static __always_inline u32 +pvr_context_get_fw_addr(struct pvr_context *ctx) +{ + u32 ctx_fw_addr = 0; + + pvr_fw_object_get_fw_addr(ctx->fw_obj, &ctx_fw_addr); + + return ctx_fw_addr; +} + +void pvr_context_put(struct pvr_context *ctx); + +int pvr_context_create(struct pvr_file *pvr_file, struct drm_pvr_ioctl_create_context_args *args); + +int pvr_context_destroy(struct pvr_file *pvr_file, u32 handle); + +void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file); + +void pvr_context_device_init(struct pvr_device *pvr_dev); + +void pvr_context_device_fini(struct pvr_device *pvr_dev); + +#endif /* PVR_CONTEXT_H */ diff --git a/drivers/gpu/drm/imagination/pvr_debugfs.c b/drivers/gpu/drm/imagination/pvr_debugfs.c new file mode 100644 index 0000000000..6b77c9b4bd --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_debugfs.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_debugfs.h" + +#include "pvr_device.h" +#include "pvr_fw_trace.h" +#include "pvr_params.h" + +#include +#include +#include +#include +#include + +#include +#include +#include + +static const struct pvr_debugfs_entry pvr_debugfs_entries[] = { + {"pvr_params", pvr_params_debugfs_init}, + {"pvr_fw", pvr_fw_trace_debugfs_init}, +}; + +void +pvr_debugfs_init(struct drm_minor *minor) +{ + struct drm_device *drm_dev = minor->dev; + struct pvr_device *pvr_dev = to_pvr_device(drm_dev); + struct dentry *root = minor->debugfs_root; + size_t i; + + for (i = 0; i < ARRAY_SIZE(pvr_debugfs_entries); ++i) { + const struct pvr_debugfs_entry *entry = &pvr_debugfs_entries[i]; + struct dentry *dir; + + dir = debugfs_create_dir(entry->name, root); + if (IS_ERR(dir)) { + drm_warn(drm_dev, + "failed to create debugfs dir '%s' (err=%d)", + entry->name, (int)PTR_ERR(dir)); + continue; + } + + entry->init(pvr_dev, dir); + } +} + +/* + * Since all entries are created under &drm_minor->debugfs_root, there's no + * need for a pvr_debugfs_fini() as DRM will clean up everything under its root + * automatically. + */ diff --git a/drivers/gpu/drm/imagination/pvr_debugfs.h b/drivers/gpu/drm/imagination/pvr_debugfs.h new file mode 100644 index 0000000000..ebacbd13b8 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_debugfs.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_DEBUGFS_H +#define PVR_DEBUGFS_H + +/* Forward declaration from . */ +struct drm_minor; + +#if defined(CONFIG_DEBUG_FS) +/* Forward declaration from "pvr_device.h". */ +struct pvr_device; + +/* Forward declaration from . */ +struct dentry; + +struct pvr_debugfs_entry { + const char *name; + void (*init)(struct pvr_device *pvr_dev, struct dentry *dir); +}; + +void pvr_debugfs_init(struct drm_minor *minor); +#else /* defined(CONFIG_DEBUG_FS) */ +#include + +static __always_inline void pvr_debugfs_init(struct drm_minor *minor) {} +#endif /* defined(CONFIG_DEBUG_FS) */ + +#endif /* PVR_DEBUGFS_H */ diff --git a/drivers/gpu/drm/imagination/pvr_device.c b/drivers/gpu/drm/imagination/pvr_device.c new file mode 100644 index 0000000000..1704c02685 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_device.c @@ -0,0 +1,658 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_device.h" +#include "pvr_device_info.h" + +#include "pvr_fw.h" +#include "pvr_params.h" +#include "pvr_power.h" +#include "pvr_queue.h" +#include "pvr_rogue_cr_defs.h" +#include "pvr_stream.h" +#include "pvr_vm.h" + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Major number for the supported version of the firmware. */ +#define PVR_FW_VERSION_MAJOR 1 + +/** + * pvr_device_reg_init() - Initialize kernel access to a PowerVR device's + * control registers. + * @pvr_dev: Target PowerVR device. + * + * Sets struct pvr_device->regs. + * + * This method of mapping the device control registers into memory ensures that + * they are unmapped when the driver is detached (i.e. no explicit cleanup is + * required). + * + * Return: + * * 0 on success, or + * * Any error returned by devm_platform_ioremap_resource(). + */ +static int +pvr_device_reg_init(struct pvr_device *pvr_dev) +{ + struct drm_device *drm_dev = from_pvr_device(pvr_dev); + struct platform_device *plat_dev = to_platform_device(drm_dev->dev); + struct resource *regs_resource; + void __iomem *regs; + + pvr_dev->regs_resource = NULL; + pvr_dev->regs = NULL; + + regs = devm_platform_get_and_ioremap_resource(plat_dev, 0, ®s_resource); + if (IS_ERR(regs)) + return dev_err_probe(drm_dev->dev, PTR_ERR(regs), + "failed to ioremap gpu registers\n"); + + pvr_dev->regs = regs; + pvr_dev->regs_resource = regs_resource; + + return 0; +} + +/** + * pvr_device_clk_init() - Initialize clocks required by a PowerVR device + * @pvr_dev: Target PowerVR device. + * + * Sets struct pvr_device->core_clk, struct pvr_device->sys_clk and + * struct pvr_device->mem_clk. + * + * Three clocks are required by the PowerVR device: core, sys and mem. On + * return, this function guarantees that the clocks are in one of the following + * states: + * + * * All successfully initialized, + * * Core errored, sys and mem uninitialized, + * * Core deinitialized, sys errored, mem uninitialized, or + * * Core and sys deinitialized, mem errored. + * + * Return: + * * 0 on success, + * * Any error returned by devm_clk_get(), or + * * Any error returned by devm_clk_get_optional(). + */ +static int pvr_device_clk_init(struct pvr_device *pvr_dev) +{ + struct drm_device *drm_dev = from_pvr_device(pvr_dev); + struct clk *core_clk; + struct clk *sys_clk; + struct clk *mem_clk; + + core_clk = devm_clk_get(drm_dev->dev, "core"); + if (IS_ERR(core_clk)) + return dev_err_probe(drm_dev->dev, PTR_ERR(core_clk), + "failed to get core clock\n"); + + sys_clk = devm_clk_get_optional(drm_dev->dev, "sys"); + if (IS_ERR(sys_clk)) + return dev_err_probe(drm_dev->dev, PTR_ERR(sys_clk), + "failed to get sys clock\n"); + + mem_clk = devm_clk_get_optional(drm_dev->dev, "mem"); + if (IS_ERR(mem_clk)) + return dev_err_probe(drm_dev->dev, PTR_ERR(mem_clk), + "failed to get mem clock\n"); + + pvr_dev->core_clk = core_clk; + pvr_dev->sys_clk = sys_clk; + pvr_dev->mem_clk = mem_clk; + + return 0; +} + +/** + * pvr_device_process_active_queues() - Process all queue related events. + * @pvr_dev: PowerVR device to check + * + * This is called any time we receive a FW event. It iterates over all + * active queues and calls pvr_queue_process() on them. + */ +static void pvr_device_process_active_queues(struct pvr_device *pvr_dev) +{ + struct pvr_queue *queue, *tmp_queue; + LIST_HEAD(active_queues); + + mutex_lock(&pvr_dev->queues.lock); + + /* Move all active queues to a temporary list. Queues that remain + * active after we're done processing them are re-inserted to + * the queues.active list by pvr_queue_process(). + */ + list_splice_init(&pvr_dev->queues.active, &active_queues); + + list_for_each_entry_safe(queue, tmp_queue, &active_queues, node) + pvr_queue_process(queue); + + mutex_unlock(&pvr_dev->queues.lock); +} + +static irqreturn_t pvr_device_irq_thread_handler(int irq, void *data) +{ + struct pvr_device *pvr_dev = data; + irqreturn_t ret = IRQ_NONE; + + /* We are in the threaded handler, we can keep dequeuing events until we + * don't see any. This should allow us to reduce the number of interrupts + * when the GPU is receiving a massive amount of short jobs. + */ + while (pvr_fw_irq_pending(pvr_dev)) { + pvr_fw_irq_clear(pvr_dev); + + if (pvr_dev->fw_dev.booted) { + pvr_fwccb_process(pvr_dev); + pvr_kccb_wake_up_waiters(pvr_dev); + pvr_device_process_active_queues(pvr_dev); + } + + pm_runtime_mark_last_busy(from_pvr_device(pvr_dev)->dev); + + ret = IRQ_HANDLED; + } + + /* Unmask FW irqs before returning, so new interrupts can be received. */ + pvr_fw_irq_enable(pvr_dev); + return ret; +} + +static irqreturn_t pvr_device_irq_handler(int irq, void *data) +{ + struct pvr_device *pvr_dev = data; + + if (!pvr_fw_irq_pending(pvr_dev)) + return IRQ_NONE; /* Spurious IRQ - ignore. */ + + /* Mask the FW interrupts before waking up the thread. Will be unmasked + * when the thread handler is done processing events. + */ + pvr_fw_irq_disable(pvr_dev); + return IRQ_WAKE_THREAD; +} + +/** + * pvr_device_irq_init() - Initialise IRQ required by a PowerVR device + * @pvr_dev: Target PowerVR device. + * + * Returns: + * * 0 on success, + * * Any error returned by platform_get_irq_byname(), or + * * Any error returned by request_irq(). + */ +static int +pvr_device_irq_init(struct pvr_device *pvr_dev) +{ + struct drm_device *drm_dev = from_pvr_device(pvr_dev); + struct platform_device *plat_dev = to_platform_device(drm_dev->dev); + + init_waitqueue_head(&pvr_dev->kccb.rtn_q); + + pvr_dev->irq = platform_get_irq(plat_dev, 0); + if (pvr_dev->irq < 0) + return pvr_dev->irq; + + /* Clear any pending events before requesting the IRQ line. */ + pvr_fw_irq_clear(pvr_dev); + pvr_fw_irq_enable(pvr_dev); + + return request_threaded_irq(pvr_dev->irq, pvr_device_irq_handler, + pvr_device_irq_thread_handler, + IRQF_SHARED, "gpu", pvr_dev); +} + +/** + * pvr_device_irq_fini() - Deinitialise IRQ required by a PowerVR device + * @pvr_dev: Target PowerVR device. + */ +static void +pvr_device_irq_fini(struct pvr_device *pvr_dev) +{ + free_irq(pvr_dev->irq, pvr_dev); +} + +/** + * pvr_build_firmware_filename() - Construct a PowerVR firmware filename + * @pvr_dev: Target PowerVR device. + * @base: First part of the filename. + * @major: Major version number. + * + * A PowerVR firmware filename consists of three parts separated by underscores + * (``'_'``) along with a '.fw' file suffix. The first part is the exact value + * of @base, the second part is the hardware version string derived from @pvr_fw + * and the final part is the firmware version number constructed from @major with + * a 'v' prefix, e.g. powervr/rogue_4.40.2.51_v1.fw. + * + * The returned string will have been slab allocated and must be freed with + * kfree(). + * + * Return: + * * The constructed filename on success, or + * * Any error returned by kasprintf(). + */ +static char * +pvr_build_firmware_filename(struct pvr_device *pvr_dev, const char *base, + u8 major) +{ + struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id; + + return kasprintf(GFP_KERNEL, "%s_%d.%d.%d.%d_v%d.fw", base, gpu_id->b, + gpu_id->v, gpu_id->n, gpu_id->c, major); +} + +static void +pvr_release_firmware(void *data) +{ + struct pvr_device *pvr_dev = data; + + release_firmware(pvr_dev->fw_dev.firmware); +} + +/** + * pvr_request_firmware() - Load firmware for a PowerVR device + * @pvr_dev: Target PowerVR device. + * + * See pvr_build_firmware_filename() for details on firmware file naming. + * + * Return: + * * 0 on success, + * * Any error returned by pvr_build_firmware_filename(), or + * * Any error returned by request_firmware(). + */ +static int +pvr_request_firmware(struct pvr_device *pvr_dev) +{ + struct drm_device *drm_dev = &pvr_dev->base; + char *filename; + const struct firmware *fw; + int err; + + filename = pvr_build_firmware_filename(pvr_dev, "powervr/rogue", + PVR_FW_VERSION_MAJOR); + if (!filename) + return -ENOMEM; + + /* + * This function takes a copy of &filename, meaning we can free our + * instance before returning. + */ + err = request_firmware(&fw, filename, pvr_dev->base.dev); + if (err) { + drm_err(drm_dev, "failed to load firmware %s (err=%d)\n", + filename, err); + goto err_free_filename; + } + + drm_info(drm_dev, "loaded firmware %s\n", filename); + kfree(filename); + + pvr_dev->fw_dev.firmware = fw; + + return devm_add_action_or_reset(drm_dev->dev, pvr_release_firmware, pvr_dev); + +err_free_filename: + kfree(filename); + + return err; +} + +/** + * pvr_load_gpu_id() - Load a PowerVR device's GPU ID (BVNC) from control registers. + * + * Sets struct pvr_dev.gpu_id. + * + * @pvr_dev: Target PowerVR device. + */ +static void +pvr_load_gpu_id(struct pvr_device *pvr_dev) +{ + struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id; + u64 bvnc; + + /* + * Try reading the BVNC using the newer (cleaner) method first. If the + * B value is zero, fall back to the older method. + */ + bvnc = pvr_cr_read64(pvr_dev, ROGUE_CR_CORE_ID__PBVNC); + + gpu_id->b = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__BRANCH_ID); + if (gpu_id->b != 0) { + gpu_id->v = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__VERSION_ID); + gpu_id->n = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS); + gpu_id->c = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__CONFIG_ID); + } else { + u32 core_rev = pvr_cr_read32(pvr_dev, ROGUE_CR_CORE_REVISION); + u32 core_id = pvr_cr_read32(pvr_dev, ROGUE_CR_CORE_ID); + u16 core_id_config = PVR_CR_FIELD_GET(core_id, CORE_ID_CONFIG); + + gpu_id->b = PVR_CR_FIELD_GET(core_rev, CORE_REVISION_MAJOR); + gpu_id->v = PVR_CR_FIELD_GET(core_rev, CORE_REVISION_MINOR); + gpu_id->n = FIELD_GET(0xFF00, core_id_config); + gpu_id->c = FIELD_GET(0x00FF, core_id_config); + } +} + +/** + * pvr_set_dma_info() - Set PowerVR device DMA information + * @pvr_dev: Target PowerVR device. + * + * Sets the DMA mask and max segment size for the PowerVR device. + * + * Return: + * * 0 on success, + * * Any error returned by PVR_FEATURE_VALUE(), or + * * Any error returned by dma_set_mask(). + */ + +static int +pvr_set_dma_info(struct pvr_device *pvr_dev) +{ + struct drm_device *drm_dev = from_pvr_device(pvr_dev); + u16 phys_bus_width; + int err; + + err = PVR_FEATURE_VALUE(pvr_dev, phys_bus_width, &phys_bus_width); + if (err) { + drm_err(drm_dev, "Failed to get device physical bus width\n"); + return err; + } + + err = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(phys_bus_width)); + if (err) { + drm_err(drm_dev, "Failed to set DMA mask (err=%d)\n", err); + return err; + } + + dma_set_max_seg_size(drm_dev->dev, UINT_MAX); + + return 0; +} + +/** + * pvr_device_gpu_init() - GPU-specific initialization for a PowerVR device + * @pvr_dev: Target PowerVR device. + * + * The following steps are taken to ensure the device is ready: + * + * 1. Read the hardware version information from control registers, + * 2. Initialise the hardware feature information, + * 3. Setup the device DMA information, + * 4. Setup the device-scoped memory context, and + * 5. Load firmware into the device. + * + * Return: + * * 0 on success, + * * -%ENODEV if the GPU is not supported, + * * Any error returned by pvr_set_dma_info(), + * * Any error returned by pvr_memory_context_init(), or + * * Any error returned by pvr_request_firmware(). + */ +static int +pvr_device_gpu_init(struct pvr_device *pvr_dev) +{ + int err; + + pvr_load_gpu_id(pvr_dev); + + err = pvr_request_firmware(pvr_dev); + if (err) + return err; + + err = pvr_fw_validate_init_device_info(pvr_dev); + if (err) + return err; + + if (PVR_HAS_FEATURE(pvr_dev, meta)) + pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_META; + else if (PVR_HAS_FEATURE(pvr_dev, mips)) + pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_MIPS; + else if (PVR_HAS_FEATURE(pvr_dev, riscv_fw_processor)) + pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_RISCV; + else + return -EINVAL; + + pvr_stream_create_musthave_masks(pvr_dev); + + err = pvr_set_dma_info(pvr_dev); + if (err) + return err; + + if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) { + pvr_dev->kernel_vm_ctx = pvr_vm_create_context(pvr_dev, false); + if (IS_ERR(pvr_dev->kernel_vm_ctx)) + return PTR_ERR(pvr_dev->kernel_vm_ctx); + } + + err = pvr_fw_init(pvr_dev); + if (err) + goto err_vm_ctx_put; + + return 0; + +err_vm_ctx_put: + if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) { + pvr_vm_context_put(pvr_dev->kernel_vm_ctx); + pvr_dev->kernel_vm_ctx = NULL; + } + + return err; +} + +/** + * pvr_device_gpu_fini() - GPU-specific deinitialization for a PowerVR device + * @pvr_dev: Target PowerVR device. + */ +static void +pvr_device_gpu_fini(struct pvr_device *pvr_dev) +{ + pvr_fw_fini(pvr_dev); + + if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) { + WARN_ON(!pvr_vm_context_put(pvr_dev->kernel_vm_ctx)); + pvr_dev->kernel_vm_ctx = NULL; + } +} + +/** + * pvr_device_init() - Initialize a PowerVR device + * @pvr_dev: Target PowerVR device. + * + * If this function returns successfully, the device will have been fully + * initialized. Otherwise, any parts of the device initialized before an error + * occurs will be de-initialized before returning. + * + * NOTE: The initialization steps currently taken are the bare minimum required + * to read from the control registers. The device is unlikely to function + * until further initialization steps are added. [This note should be + * removed when that happens.] + * + * Return: + * * 0 on success, + * * Any error returned by pvr_device_reg_init(), + * * Any error returned by pvr_device_clk_init(), or + * * Any error returned by pvr_device_gpu_init(). + */ +int +pvr_device_init(struct pvr_device *pvr_dev) +{ + struct drm_device *drm_dev = from_pvr_device(pvr_dev); + struct device *dev = drm_dev->dev; + int err; + + /* + * Setup device parameters. We do this first in case other steps + * depend on them. + */ + err = pvr_device_params_init(&pvr_dev->params); + if (err) + return err; + + /* Enable and initialize clocks required for the device to operate. */ + err = pvr_device_clk_init(pvr_dev); + if (err) + return err; + + /* Explicitly power the GPU so we can access control registers before the FW is booted. */ + err = pm_runtime_resume_and_get(dev); + if (err) + return err; + + /* Map the control registers into memory. */ + err = pvr_device_reg_init(pvr_dev); + if (err) + goto err_pm_runtime_put; + + /* Perform GPU-specific initialization steps. */ + err = pvr_device_gpu_init(pvr_dev); + if (err) + goto err_pm_runtime_put; + + err = pvr_device_irq_init(pvr_dev); + if (err) + goto err_device_gpu_fini; + + pm_runtime_put(dev); + + return 0; + +err_device_gpu_fini: + pvr_device_gpu_fini(pvr_dev); + +err_pm_runtime_put: + pm_runtime_put_sync_suspend(dev); + + return err; +} + +/** + * pvr_device_fini() - Deinitialize a PowerVR device + * @pvr_dev: Target PowerVR device. + */ +void +pvr_device_fini(struct pvr_device *pvr_dev) +{ + /* + * Deinitialization stages are performed in reverse order compared to + * the initialization stages in pvr_device_init(). + */ + pvr_device_irq_fini(pvr_dev); + pvr_device_gpu_fini(pvr_dev); +} + +bool +pvr_device_has_uapi_quirk(struct pvr_device *pvr_dev, u32 quirk) +{ + switch (quirk) { + case 47217: + return PVR_HAS_QUIRK(pvr_dev, 47217); + case 48545: + return PVR_HAS_QUIRK(pvr_dev, 48545); + case 49927: + return PVR_HAS_QUIRK(pvr_dev, 49927); + case 51764: + return PVR_HAS_QUIRK(pvr_dev, 51764); + case 62269: + return PVR_HAS_QUIRK(pvr_dev, 62269); + default: + return false; + }; +} + +bool +pvr_device_has_uapi_enhancement(struct pvr_device *pvr_dev, u32 enhancement) +{ + switch (enhancement) { + case 35421: + return PVR_HAS_ENHANCEMENT(pvr_dev, 35421); + case 42064: + return PVR_HAS_ENHANCEMENT(pvr_dev, 42064); + default: + return false; + }; +} + +/** + * pvr_device_has_feature() - Look up device feature based on feature definition + * @pvr_dev: Device pointer. + * @feature: Feature to look up. Should be one of %PVR_FEATURE_*. + * + * Returns: + * * %true if feature is present on device, or + * * %false if feature is not present on device. + */ +bool +pvr_device_has_feature(struct pvr_device *pvr_dev, u32 feature) +{ + switch (feature) { + case PVR_FEATURE_CLUSTER_GROUPING: + return PVR_HAS_FEATURE(pvr_dev, cluster_grouping); + + case PVR_FEATURE_COMPUTE_MORTON_CAPABLE: + return PVR_HAS_FEATURE(pvr_dev, compute_morton_capable); + + case PVR_FEATURE_FB_CDC_V4: + return PVR_HAS_FEATURE(pvr_dev, fb_cdc_v4); + + case PVR_FEATURE_GPU_MULTICORE_SUPPORT: + return PVR_HAS_FEATURE(pvr_dev, gpu_multicore_support); + + case PVR_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE: + return PVR_HAS_FEATURE(pvr_dev, isp_zls_d24_s8_packing_ogl_mode); + + case PVR_FEATURE_S7_TOP_INFRASTRUCTURE: + return PVR_HAS_FEATURE(pvr_dev, s7_top_infrastructure); + + case PVR_FEATURE_TESSELLATION: + return PVR_HAS_FEATURE(pvr_dev, tessellation); + + case PVR_FEATURE_TPU_DM_GLOBAL_REGISTERS: + return PVR_HAS_FEATURE(pvr_dev, tpu_dm_global_registers); + + case PVR_FEATURE_VDM_DRAWINDIRECT: + return PVR_HAS_FEATURE(pvr_dev, vdm_drawindirect); + + case PVR_FEATURE_VDM_OBJECT_LEVEL_LLS: + return PVR_HAS_FEATURE(pvr_dev, vdm_object_level_lls); + + case PVR_FEATURE_ZLS_SUBTILE: + return PVR_HAS_FEATURE(pvr_dev, zls_subtile); + + /* Derived features. */ + case PVR_FEATURE_CDM_USER_MODE_QUEUE: { + u8 cdm_control_stream_format = 0; + + PVR_FEATURE_VALUE(pvr_dev, cdm_control_stream_format, &cdm_control_stream_format); + return (cdm_control_stream_format >= 2 && cdm_control_stream_format <= 4); + } + + case PVR_FEATURE_REQUIRES_FB_CDC_ZLS_SETUP: + if (PVR_HAS_FEATURE(pvr_dev, fbcdc_algorithm)) { + u8 fbcdc_algorithm = 0; + + PVR_FEATURE_VALUE(pvr_dev, fbcdc_algorithm, &fbcdc_algorithm); + return (fbcdc_algorithm < 3 || PVR_HAS_FEATURE(pvr_dev, fb_cdc_v4)); + } + return false; + + default: + WARN(true, "Looking up undefined feature %u\n", feature); + return false; + } +} diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h new file mode 100644 index 0000000000..ecdd5767d8 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_device.h @@ -0,0 +1,725 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_DEVICE_H +#define PVR_DEVICE_H + +#include "pvr_ccb.h" +#include "pvr_device_info.h" +#include "pvr_fw.h" +#include "pvr_params.h" +#include "pvr_rogue_fwif_stream.h" +#include "pvr_stream.h" + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Forward declaration from . */ +struct clk; + +/* Forward declaration from . */ +struct firmware; + +/** + * struct pvr_gpu_id - Hardware GPU ID information for a PowerVR device + * @b: Branch ID. + * @v: Version ID. + * @n: Number of scalable units. + * @c: Config ID. + */ +struct pvr_gpu_id { + u16 b, v, n, c; +}; + +/** + * struct pvr_fw_version - Firmware version information + * @major: Major version number. + * @minor: Minor version number. + */ +struct pvr_fw_version { + u16 major, minor; +}; + +/** + * struct pvr_device - powervr-specific wrapper for &struct drm_device + */ +struct pvr_device { + /** + * @base: The underlying &struct drm_device. + * + * Do not access this member directly, instead call + * from_pvr_device(). + */ + struct drm_device base; + + /** @gpu_id: GPU ID detected at runtime. */ + struct pvr_gpu_id gpu_id; + + /** + * @features: Hardware feature information. + * + * Do not access this member directly, instead use PVR_HAS_FEATURE() + * or PVR_FEATURE_VALUE() macros. + */ + struct pvr_device_features features; + + /** + * @quirks: Hardware quirk information. + * + * Do not access this member directly, instead use PVR_HAS_QUIRK(). + */ + struct pvr_device_quirks quirks; + + /** + * @enhancements: Hardware enhancement information. + * + * Do not access this member directly, instead use + * PVR_HAS_ENHANCEMENT(). + */ + struct pvr_device_enhancements enhancements; + + /** @fw_version: Firmware version detected at runtime. */ + struct pvr_fw_version fw_version; + + /** @regs_resource: Resource representing device control registers. */ + struct resource *regs_resource; + + /** + * @regs: Device control registers. + * + * These are mapped into memory when the device is initialized; that + * location is where this pointer points. + */ + void __iomem *regs; + + /** + * @core_clk: General core clock. + * + * This is the primary clock used by the entire GPU core. + */ + struct clk *core_clk; + + /** + * @sys_clk: Optional system bus clock. + * + * This may be used on some platforms to provide an independent clock to the SoC Interface + * (SOCIF). If present, this needs to be enabled/disabled together with @core_clk. + */ + struct clk *sys_clk; + + /** + * @mem_clk: Optional memory clock. + * + * This may be used on some platforms to provide an independent clock to the Memory + * Interface (MEMIF). If present, this needs to be enabled/disabled together with @core_clk. + */ + struct clk *mem_clk; + + /** @irq: IRQ number. */ + int irq; + + /** @fwccb: Firmware CCB. */ + struct pvr_ccb fwccb; + + /** + * @kernel_vm_ctx: Virtual memory context used for kernel mappings. + * + * This is used for mappings in the firmware address region when a META firmware processor + * is in use. + * + * When a MIPS firmware processor is in use, this will be %NULL. + */ + struct pvr_vm_context *kernel_vm_ctx; + + /** @fw_dev: Firmware related data. */ + struct pvr_fw_device fw_dev; + + /** + * @params: Device-specific parameters. + * + * The values of these parameters are initialized from the + * defaults specified as module parameters. They may be + * modified at runtime via debugfs (if enabled). + */ + struct pvr_device_params params; + + /** @stream_musthave_quirks: Bit array of "must-have" quirks for stream commands. */ + u32 stream_musthave_quirks[PVR_STREAM_TYPE_MAX][PVR_STREAM_EXTHDR_TYPE_MAX]; + + /** + * @mmu_flush_cache_flags: Records which MMU caches require flushing + * before submitting the next job. + */ + atomic_t mmu_flush_cache_flags; + + /** + * @ctx_ids: Array of contexts belonging to this device. Array members + * are of type "struct pvr_context *". + * + * This array is used to allocate IDs used by the firmware. + */ + struct xarray ctx_ids; + + /** + * @free_list_ids: Array of free lists belonging to this device. Array members + * are of type "struct pvr_free_list *". + * + * This array is used to allocate IDs used by the firmware. + */ + struct xarray free_list_ids; + + /** + * @job_ids: Array of jobs belonging to this device. Array members + * are of type "struct pvr_job *". + */ + struct xarray job_ids; + + /** + * @queues: Queue-related fields. + */ + struct { + /** @queues.active: Active queue list. */ + struct list_head active; + + /** @queues.idle: Idle queue list. */ + struct list_head idle; + + /** @queues.lock: Lock protecting access to the active/idle + * lists. */ + struct mutex lock; + } queues; + + /** + * @watchdog: Watchdog for communications with firmware. + */ + struct { + /** @watchdog.work: Work item for watchdog callback. */ + struct delayed_work work; + + /** + * @watchdog.old_kccb_cmds_executed: KCCB command execution + * count at last watchdog poll. + */ + u32 old_kccb_cmds_executed; + + /** + * @watchdog.kccb_stall_count: Number of watchdog polls + * KCCB has been stalled for. + */ + u32 kccb_stall_count; + } watchdog; + + /** + * @kccb: Circular buffer for communications with firmware. + */ + struct { + /** @kccb.ccb: Kernel CCB. */ + struct pvr_ccb ccb; + + /** @kccb.rtn_q: Waitqueue for KCCB command return waiters. */ + wait_queue_head_t rtn_q; + + /** @kccb.rtn_obj: Object representing KCCB return slots. */ + struct pvr_fw_object *rtn_obj; + + /** + * @kccb.rtn: Pointer to CPU mapping of KCCB return slots. + * Must be accessed by READ_ONCE()/WRITE_ONCE(). + */ + u32 *rtn; + + /** @kccb.slot_count: Total number of KCCB slots available. */ + u32 slot_count; + + /** @kccb.reserved_count: Number of KCCB slots reserved for + * future use. */ + u32 reserved_count; + + /** + * @kccb.waiters: List of KCCB slot waiters. + */ + struct list_head waiters; + + /** @kccb.fence_ctx: KCCB fence context. */ + struct { + /** @kccb.fence_ctx.id: KCCB fence context ID + * allocated with dma_fence_context_alloc(). */ + u64 id; + + /** @kccb.fence_ctx.seqno: Sequence number incremented + * each time a fence is created. */ + atomic_t seqno; + + /** + * @kccb.fence_ctx.lock: Lock used to synchronize + * access to fences allocated by this context. + */ + spinlock_t lock; + } fence_ctx; + } kccb; + + /** + * @lost: %true if the device has been lost. + * + * This variable is set if the device has become irretrievably unavailable, e.g. if the + * firmware processor has stopped responding and can not be revived via a hard reset. + */ + bool lost; + + /** + * @reset_sem: Reset semaphore. + * + * GPU reset code will lock this for writing. Any code that submits commands to the firmware + * that isn't in an IRQ handler or on the scheduler workqueue must lock this for reading. + * Once this has been successfully locked, &pvr_dev->lost _must_ be checked, and -%EIO must + * be returned if it is set. + */ + struct rw_semaphore reset_sem; + + /** @sched_wq: Workqueue for schedulers. */ + struct workqueue_struct *sched_wq; +}; + +/** + * struct pvr_file - powervr-specific data to be assigned to &struct + * drm_file.driver_priv + */ +struct pvr_file { + /** + * @file: A reference to the parent &struct drm_file. + * + * Do not access this member directly, instead call from_pvr_file(). + */ + struct drm_file *file; + + /** + * @pvr_dev: A reference to the powervr-specific wrapper for the + * associated device. Saves on repeated calls to to_pvr_device(). + */ + struct pvr_device *pvr_dev; + + /** + * @ctx_handles: Array of contexts belonging to this file. Array members + * are of type "struct pvr_context *". + * + * This array is used to allocate handles returned to userspace. + */ + struct xarray ctx_handles; + + /** + * @free_list_handles: Array of free lists belonging to this file. Array + * members are of type "struct pvr_free_list *". + * + * This array is used to allocate handles returned to userspace. + */ + struct xarray free_list_handles; + + /** + * @hwrt_handles: Array of HWRT datasets belonging to this file. Array + * members are of type "struct pvr_hwrt_dataset *". + * + * This array is used to allocate handles returned to userspace. + */ + struct xarray hwrt_handles; + + /** + * @vm_ctx_handles: Array of VM contexts belonging to this file. Array + * members are of type "struct pvr_vm_context *". + * + * This array is used to allocate handles returned to userspace. + */ + struct xarray vm_ctx_handles; +}; + +/** + * PVR_HAS_FEATURE() - Tests whether a PowerVR device has a given feature + * @pvr_dev: [IN] Target PowerVR device. + * @feature: [IN] Hardware feature name. + * + * Feature names are derived from those found in &struct pvr_device_features by + * dropping the 'has_' prefix, which is applied by this macro. + * + * Return: + * * true if the named feature is present in the hardware + * * false if the named feature is not present in the hardware + */ +#define PVR_HAS_FEATURE(pvr_dev, feature) ((pvr_dev)->features.has_##feature) + +/** + * PVR_FEATURE_VALUE() - Gets a PowerVR device feature value + * @pvr_dev: [IN] Target PowerVR device. + * @feature: [IN] Feature name. + * @value_out: [OUT] Feature value. + * + * This macro will get a feature value for those features that have values. + * If the feature is not present, nothing will be stored to @value_out. + * + * Feature names are derived from those found in &struct pvr_device_features by + * dropping the 'has_' prefix. + * + * Return: + * * 0 on success, or + * * -%EINVAL if the named feature is not present in the hardware + */ +#define PVR_FEATURE_VALUE(pvr_dev, feature, value_out) \ + ({ \ + struct pvr_device *_pvr_dev = pvr_dev; \ + int _ret = -EINVAL; \ + if (_pvr_dev->features.has_##feature) { \ + *(value_out) = _pvr_dev->features.feature; \ + _ret = 0; \ + } \ + _ret; \ + }) + +/** + * PVR_HAS_QUIRK() - Tests whether a physical device has a given quirk + * @pvr_dev: [IN] Target PowerVR device. + * @quirk: [IN] Hardware quirk name. + * + * Quirk numbers are derived from those found in #pvr_device_quirks by + * dropping the 'has_brn' prefix, which is applied by this macro. + * + * Returns + * * true if the quirk is present in the hardware, or + * * false if the quirk is not present in the hardware. + */ +#define PVR_HAS_QUIRK(pvr_dev, quirk) ((pvr_dev)->quirks.has_brn##quirk) + +/** + * PVR_HAS_ENHANCEMENT() - Tests whether a physical device has a given + * enhancement + * @pvr_dev: [IN] Target PowerVR device. + * @enhancement: [IN] Hardware enhancement name. + * + * Enhancement numbers are derived from those found in #pvr_device_enhancements + * by dropping the 'has_ern' prefix, which is applied by this macro. + * + * Returns + * * true if the enhancement is present in the hardware, or + * * false if the enhancement is not present in the hardware. + */ +#define PVR_HAS_ENHANCEMENT(pvr_dev, enhancement) ((pvr_dev)->enhancements.has_ern##enhancement) + +#define from_pvr_device(pvr_dev) (&(pvr_dev)->base) + +#define to_pvr_device(drm_dev) container_of_const(drm_dev, struct pvr_device, base) + +#define from_pvr_file(pvr_file) ((pvr_file)->file) + +#define to_pvr_file(file) ((file)->driver_priv) + +/** + * PVR_PACKED_BVNC() - Packs B, V, N and C values into a 64-bit unsigned integer + * @b: Branch ID. + * @v: Version ID. + * @n: Number of scalable units. + * @c: Config ID. + * + * The packed layout is as follows: + * + * +--------+--------+--------+-------+ + * | 63..48 | 47..32 | 31..16 | 15..0 | + * +========+========+========+=======+ + * | B | V | N | C | + * +--------+--------+--------+-------+ + * + * pvr_gpu_id_to_packed_bvnc() should be used instead of this macro when a + * &struct pvr_gpu_id is available in order to ensure proper type checking. + * + * Return: Packed BVNC. + */ +/* clang-format off */ +#define PVR_PACKED_BVNC(b, v, n, c) \ + ((((u64)(b) & GENMASK_ULL(15, 0)) << 48) | \ + (((u64)(v) & GENMASK_ULL(15, 0)) << 32) | \ + (((u64)(n) & GENMASK_ULL(15, 0)) << 16) | \ + (((u64)(c) & GENMASK_ULL(15, 0)) << 0)) +/* clang-format on */ + +/** + * pvr_gpu_id_to_packed_bvnc() - Packs B, V, N and C values into a 64-bit + * unsigned integer + * @gpu_id: GPU ID. + * + * The packed layout is as follows: + * + * +--------+--------+--------+-------+ + * | 63..48 | 47..32 | 31..16 | 15..0 | + * +========+========+========+=======+ + * | B | V | N | C | + * +--------+--------+--------+-------+ + * + * This should be used in preference to PVR_PACKED_BVNC() when a &struct + * pvr_gpu_id is available in order to ensure proper type checking. + * + * Return: Packed BVNC. + */ +static __always_inline u64 +pvr_gpu_id_to_packed_bvnc(struct pvr_gpu_id *gpu_id) +{ + return PVR_PACKED_BVNC(gpu_id->b, gpu_id->v, gpu_id->n, gpu_id->c); +} + +static __always_inline void +packed_bvnc_to_pvr_gpu_id(u64 bvnc, struct pvr_gpu_id *gpu_id) +{ + gpu_id->b = (bvnc & GENMASK_ULL(63, 48)) >> 48; + gpu_id->v = (bvnc & GENMASK_ULL(47, 32)) >> 32; + gpu_id->n = (bvnc & GENMASK_ULL(31, 16)) >> 16; + gpu_id->c = bvnc & GENMASK_ULL(15, 0); +} + +int pvr_device_init(struct pvr_device *pvr_dev); +void pvr_device_fini(struct pvr_device *pvr_dev); +void pvr_device_reset(struct pvr_device *pvr_dev); + +bool +pvr_device_has_uapi_quirk(struct pvr_device *pvr_dev, u32 quirk); +bool +pvr_device_has_uapi_enhancement(struct pvr_device *pvr_dev, u32 enhancement); +bool +pvr_device_has_feature(struct pvr_device *pvr_dev, u32 feature); + +/** + * PVR_CR_FIELD_GET() - Extract a single field from a PowerVR control register + * @val: Value of the target register. + * @field: Field specifier, as defined in "pvr_rogue_cr_defs.h". + * + * Return: The extracted field. + */ +#define PVR_CR_FIELD_GET(val, field) FIELD_GET(~ROGUE_CR_##field##_CLRMSK, val) + +/** + * pvr_cr_read32() - Read a 32-bit register from a PowerVR device + * @pvr_dev: Target PowerVR device. + * @reg: Target register. + * + * Return: The value of the requested register. + */ +static __always_inline u32 +pvr_cr_read32(struct pvr_device *pvr_dev, u32 reg) +{ + return ioread32(pvr_dev->regs + reg); +} + +/** + * pvr_cr_read64() - Read a 64-bit register from a PowerVR device + * @pvr_dev: Target PowerVR device. + * @reg: Target register. + * + * Return: The value of the requested register. + */ +static __always_inline u64 +pvr_cr_read64(struct pvr_device *pvr_dev, u32 reg) +{ + return ioread64(pvr_dev->regs + reg); +} + +/** + * pvr_cr_write32() - Write to a 32-bit register in a PowerVR device + * @pvr_dev: Target PowerVR device. + * @reg: Target register. + * @val: Value to write. + */ +static __always_inline void +pvr_cr_write32(struct pvr_device *pvr_dev, u32 reg, u32 val) +{ + iowrite32(val, pvr_dev->regs + reg); +} + +/** + * pvr_cr_write64() - Write to a 64-bit register in a PowerVR device + * @pvr_dev: Target PowerVR device. + * @reg: Target register. + * @val: Value to write. + */ +static __always_inline void +pvr_cr_write64(struct pvr_device *pvr_dev, u32 reg, u64 val) +{ + iowrite64(val, pvr_dev->regs + reg); +} + +/** + * pvr_cr_poll_reg32() - Wait for a 32-bit register to match a given value by + * polling + * @pvr_dev: Target PowerVR device. + * @reg_addr: Address of register. + * @reg_value: Expected register value (after masking). + * @reg_mask: Mask of bits valid for comparison with @reg_value. + * @timeout_usec: Timeout length, in us. + * + * Returns: + * * 0 on success, or + * * -%ETIMEDOUT on timeout. + */ +static __always_inline int +pvr_cr_poll_reg32(struct pvr_device *pvr_dev, u32 reg_addr, u32 reg_value, + u32 reg_mask, u64 timeout_usec) +{ + u32 value; + + return readl_poll_timeout(pvr_dev->regs + reg_addr, value, + (value & reg_mask) == reg_value, 0, timeout_usec); +} + +/** + * pvr_cr_poll_reg64() - Wait for a 64-bit register to match a given value by + * polling + * @pvr_dev: Target PowerVR device. + * @reg_addr: Address of register. + * @reg_value: Expected register value (after masking). + * @reg_mask: Mask of bits valid for comparison with @reg_value. + * @timeout_usec: Timeout length, in us. + * + * Returns: + * * 0 on success, or + * * -%ETIMEDOUT on timeout. + */ +static __always_inline int +pvr_cr_poll_reg64(struct pvr_device *pvr_dev, u32 reg_addr, u64 reg_value, + u64 reg_mask, u64 timeout_usec) +{ + u64 value; + + return readq_poll_timeout(pvr_dev->regs + reg_addr, value, + (value & reg_mask) == reg_value, 0, timeout_usec); +} + +/** + * pvr_round_up_to_cacheline_size() - Round up a provided size to be cacheline + * aligned + * @pvr_dev: Target PowerVR device. + * @size: Initial size, in bytes. + * + * Returns: + * * Size aligned to cacheline size. + */ +static __always_inline size_t +pvr_round_up_to_cacheline_size(struct pvr_device *pvr_dev, size_t size) +{ + u16 slc_cacheline_size_bits = 0; + u16 slc_cacheline_size_bytes; + + WARN_ON(!PVR_HAS_FEATURE(pvr_dev, slc_cache_line_size_bits)); + PVR_FEATURE_VALUE(pvr_dev, slc_cache_line_size_bits, + &slc_cacheline_size_bits); + slc_cacheline_size_bytes = slc_cacheline_size_bits / 8; + + return round_up(size, slc_cacheline_size_bytes); +} + +/** + * DOC: IOCTL validation helpers + * + * To validate the constraints imposed on IOCTL argument structs, a collection + * of macros and helper functions exist in ``pvr_device.h``. + * + * Of the current helpers, it should only be necessary to call + * PVR_IOCTL_UNION_PADDING_CHECK() directly. This macro should be used once in + * every code path which extracts a union member from a struct passed from + * userspace. + */ + +/** + * pvr_ioctl_union_padding_check() - Validate that the implicit padding between + * the end of a union member and the end of the union itself is zeroed. + * @instance: Pointer to the instance of the struct to validate. + * @union_offset: Offset into the type of @instance of the target union. Must + * be 64-bit aligned. + * @union_size: Size of the target union in the type of @instance. Must be + * 64-bit aligned. + * @member_size: Size of the target member in the target union specified by + * @union_offset and @union_size. It is assumed that the offset of the target + * member is zero relative to @union_offset. Must be 64-bit aligned. + * + * You probably want to use PVR_IOCTL_UNION_PADDING_CHECK() instead of calling + * this function directly, since that macro abstracts away much of the setup, + * and also provides some static validation. See its docs for details. + * + * Return: + * * %true if every byte between the end of the used member of the union and + * the end of that union is zeroed, or + * * %false otherwise. + */ +static __always_inline bool +pvr_ioctl_union_padding_check(void *instance, size_t union_offset, + size_t union_size, size_t member_size) +{ + /* + * void pointer arithmetic is technically illegal - cast to a byte + * pointer so this addition works safely. + */ + void *padding_start = ((u8 *)instance) + union_offset + member_size; + size_t padding_size = union_size - member_size; + + return !memchr_inv(padding_start, 0, padding_size); +} + +/** + * PVR_STATIC_ASSERT_64BIT_ALIGNED() - Inline assertion for 64-bit alignment. + * @static_expr_: Target expression to evaluate. + * + * If @static_expr_ does not evaluate to a constant integer which would be a + * 64-bit aligned address (i.e. a multiple of 8), compilation will fail. + * + * Return: + * The value of @static_expr_. + */ +#define PVR_STATIC_ASSERT_64BIT_ALIGNED(static_expr_) \ + ({ \ + static_assert(((static_expr_) & (sizeof(u64) - 1)) == 0); \ + (static_expr_); \ + }) + +/** + * PVR_IOCTL_UNION_PADDING_CHECK() - Validate that the implicit padding between + * the end of a union member and the end of the union itself is zeroed. + * @struct_instance_: An expression which evaluates to a pointer to a UAPI data + * struct. + * @union_: The name of the union member of @struct_instance_ to check. If the + * union member is nested within the type of @struct_instance_, this may + * contain the member access operator ("."). + * @member_: The name of the member of @union_ to assess. + * + * This is a wrapper around pvr_ioctl_union_padding_check() which performs + * alignment checks and simplifies things for the caller. + * + * Return: + * * %true if every byte in @struct_instance_ between the end of @member_ and + * the end of @union_ is zeroed, or + * * %false otherwise. + */ +#define PVR_IOCTL_UNION_PADDING_CHECK(struct_instance_, union_, member_) \ + ({ \ + typeof(struct_instance_) __instance = (struct_instance_); \ + size_t __union_offset = PVR_STATIC_ASSERT_64BIT_ALIGNED( \ + offsetof(typeof(*__instance), union_)); \ + size_t __union_size = PVR_STATIC_ASSERT_64BIT_ALIGNED( \ + sizeof(__instance->union_)); \ + size_t __member_size = PVR_STATIC_ASSERT_64BIT_ALIGNED( \ + sizeof(__instance->union_.member_)); \ + pvr_ioctl_union_padding_check(__instance, __union_offset, \ + __union_size, __member_size); \ + }) + +#define PVR_FW_PROCESSOR_TYPE_META 0 +#define PVR_FW_PROCESSOR_TYPE_MIPS 1 +#define PVR_FW_PROCESSOR_TYPE_RISCV 2 + +#endif /* PVR_DEVICE_H */ diff --git a/drivers/gpu/drm/imagination/pvr_device_info.c b/drivers/gpu/drm/imagination/pvr_device_info.c new file mode 100644 index 0000000000..d3301cde7d --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_device_info.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_device.h" +#include "pvr_device_info.h" +#include "pvr_rogue_fwif_dev_info.h" + +#include + +#include +#include +#include +#include + +#define QUIRK_MAPPING(quirk) \ + [PVR_FW_HAS_BRN_##quirk] = offsetof(struct pvr_device, quirks.has_brn##quirk) + +static const uintptr_t quirks_mapping[] = { + QUIRK_MAPPING(44079), + QUIRK_MAPPING(47217), + QUIRK_MAPPING(48492), + QUIRK_MAPPING(48545), + QUIRK_MAPPING(49927), + QUIRK_MAPPING(50767), + QUIRK_MAPPING(51764), + QUIRK_MAPPING(62269), + QUIRK_MAPPING(63142), + QUIRK_MAPPING(63553), + QUIRK_MAPPING(66011), + QUIRK_MAPPING(71242), +}; + +#undef QUIRK_MAPPING + +#define ENHANCEMENT_MAPPING(enhancement) \ + [PVR_FW_HAS_ERN_##enhancement] = offsetof(struct pvr_device, \ + enhancements.has_ern##enhancement) + +static const uintptr_t enhancements_mapping[] = { + ENHANCEMENT_MAPPING(35421), + ENHANCEMENT_MAPPING(38020), + ENHANCEMENT_MAPPING(38748), + ENHANCEMENT_MAPPING(42064), + ENHANCEMENT_MAPPING(42290), + ENHANCEMENT_MAPPING(42606), + ENHANCEMENT_MAPPING(47025), + ENHANCEMENT_MAPPING(57596), +}; + +#undef ENHANCEMENT_MAPPING + +static void pvr_device_info_set_common(struct pvr_device *pvr_dev, const u64 *bitmask, + u32 bitmask_size, const uintptr_t *mapping, u32 mapping_max) +{ + const u32 mapping_max_size = (mapping_max + 63) >> 6; + const u32 nr_bits = min(bitmask_size * 64, mapping_max); + + /* Warn if any unsupported values in the bitmask. */ + if (bitmask_size > mapping_max_size) { + if (mapping == quirks_mapping) + drm_warn(from_pvr_device(pvr_dev), "Unsupported quirks in firmware image"); + else + drm_warn(from_pvr_device(pvr_dev), + "Unsupported enhancements in firmware image"); + } else if (bitmask_size == mapping_max_size && (mapping_max & 63)) { + u64 invalid_mask = ~0ull << (mapping_max & 63); + + if (bitmask[bitmask_size - 1] & invalid_mask) { + if (mapping == quirks_mapping) + drm_warn(from_pvr_device(pvr_dev), + "Unsupported quirks in firmware image"); + else + drm_warn(from_pvr_device(pvr_dev), + "Unsupported enhancements in firmware image"); + } + } + + for (u32 i = 0; i < nr_bits; i++) { + if (bitmask[i >> 6] & BIT_ULL(i & 63)) + *(bool *)((u8 *)pvr_dev + mapping[i]) = true; + } +} + +/** + * pvr_device_info_set_quirks() - Set device quirks from device information in firmware + * @pvr_dev: Device pointer. + * @quirks: Pointer to quirks mask in device information. + * @quirks_size: Size of quirks mask, in u64s. + */ +void pvr_device_info_set_quirks(struct pvr_device *pvr_dev, const u64 *quirks, u32 quirks_size) +{ + BUILD_BUG_ON(ARRAY_SIZE(quirks_mapping) != PVR_FW_HAS_BRN_MAX); + + pvr_device_info_set_common(pvr_dev, quirks, quirks_size, quirks_mapping, + ARRAY_SIZE(quirks_mapping)); +} + +/** + * pvr_device_info_set_enhancements() - Set device enhancements from device information in firmware + * @pvr_dev: Device pointer. + * @enhancements: Pointer to enhancements mask in device information. + * @enhancements_size: Size of enhancements mask, in u64s. + */ +void pvr_device_info_set_enhancements(struct pvr_device *pvr_dev, const u64 *enhancements, + u32 enhancements_size) +{ + BUILD_BUG_ON(ARRAY_SIZE(enhancements_mapping) != PVR_FW_HAS_ERN_MAX); + + pvr_device_info_set_common(pvr_dev, enhancements, enhancements_size, + enhancements_mapping, ARRAY_SIZE(enhancements_mapping)); +} + +#define FEATURE_MAPPING(fw_feature, feature) \ + [PVR_FW_HAS_FEATURE_##fw_feature] = { \ + .flag_offset = offsetof(struct pvr_device, features.has_##feature), \ + .value_offset = 0 \ + } + +#define FEATURE_MAPPING_VALUE(fw_feature, feature) \ + [PVR_FW_HAS_FEATURE_##fw_feature] = { \ + .flag_offset = offsetof(struct pvr_device, features.has_##feature), \ + .value_offset = offsetof(struct pvr_device, features.feature) \ + } + +static const struct { + uintptr_t flag_offset; + uintptr_t value_offset; +} features_mapping[] = { + FEATURE_MAPPING(AXI_ACELITE, axi_acelite), + FEATURE_MAPPING_VALUE(CDM_CONTROL_STREAM_FORMAT, cdm_control_stream_format), + FEATURE_MAPPING(CLUSTER_GROUPING, cluster_grouping), + FEATURE_MAPPING_VALUE(COMMON_STORE_SIZE_IN_DWORDS, common_store_size_in_dwords), + FEATURE_MAPPING(COMPUTE, compute), + FEATURE_MAPPING(COMPUTE_MORTON_CAPABLE, compute_morton_capable), + FEATURE_MAPPING(COMPUTE_OVERLAP, compute_overlap), + FEATURE_MAPPING(COREID_PER_OS, coreid_per_os), + FEATURE_MAPPING(DYNAMIC_DUST_POWER, dynamic_dust_power), + FEATURE_MAPPING_VALUE(ECC_RAMS, ecc_rams), + FEATURE_MAPPING_VALUE(FBCDC, fbcdc), + FEATURE_MAPPING_VALUE(FBCDC_ALGORITHM, fbcdc_algorithm), + FEATURE_MAPPING_VALUE(FBCDC_ARCHITECTURE, fbcdc_architecture), + FEATURE_MAPPING_VALUE(FBC_MAX_DEFAULT_DESCRIPTORS, fbc_max_default_descriptors), + FEATURE_MAPPING_VALUE(FBC_MAX_LARGE_DESCRIPTORS, fbc_max_large_descriptors), + FEATURE_MAPPING(FB_CDC_V4, fb_cdc_v4), + FEATURE_MAPPING(GPU_MULTICORE_SUPPORT, gpu_multicore_support), + FEATURE_MAPPING(GPU_VIRTUALISATION, gpu_virtualisation), + FEATURE_MAPPING(GS_RTA_SUPPORT, gs_rta_support), + FEATURE_MAPPING(IRQ_PER_OS, irq_per_os), + FEATURE_MAPPING_VALUE(ISP_MAX_TILES_IN_FLIGHT, isp_max_tiles_in_flight), + FEATURE_MAPPING_VALUE(ISP_SAMPLES_PER_PIXEL, isp_samples_per_pixel), + FEATURE_MAPPING(ISP_ZLS_D24_S8_PACKING_OGL_MODE, isp_zls_d24_s8_packing_ogl_mode), + FEATURE_MAPPING_VALUE(LAYOUT_MARS, layout_mars), + FEATURE_MAPPING_VALUE(MAX_PARTITIONS, max_partitions), + FEATURE_MAPPING_VALUE(META, meta), + FEATURE_MAPPING_VALUE(META_COREMEM_SIZE, meta_coremem_size), + FEATURE_MAPPING(MIPS, mips), + FEATURE_MAPPING_VALUE(NUM_CLUSTERS, num_clusters), + FEATURE_MAPPING_VALUE(NUM_ISP_IPP_PIPES, num_isp_ipp_pipes), + FEATURE_MAPPING_VALUE(NUM_OSIDS, num_osids), + FEATURE_MAPPING_VALUE(NUM_RASTER_PIPES, num_raster_pipes), + FEATURE_MAPPING(PBE2_IN_XE, pbe2_in_xe), + FEATURE_MAPPING(PBVNC_COREID_REG, pbvnc_coreid_reg), + FEATURE_MAPPING(PERFBUS, perfbus), + FEATURE_MAPPING(PERF_COUNTER_BATCH, perf_counter_batch), + FEATURE_MAPPING_VALUE(PHYS_BUS_WIDTH, phys_bus_width), + FEATURE_MAPPING(RISCV_FW_PROCESSOR, riscv_fw_processor), + FEATURE_MAPPING(ROGUEXE, roguexe), + FEATURE_MAPPING(S7_TOP_INFRASTRUCTURE, s7_top_infrastructure), + FEATURE_MAPPING(SIMPLE_INTERNAL_PARAMETER_FORMAT, simple_internal_parameter_format), + FEATURE_MAPPING(SIMPLE_INTERNAL_PARAMETER_FORMAT_V2, simple_internal_parameter_format_v2), + FEATURE_MAPPING_VALUE(SIMPLE_PARAMETER_FORMAT_VERSION, simple_parameter_format_version), + FEATURE_MAPPING_VALUE(SLC_BANKS, slc_banks), + FEATURE_MAPPING_VALUE(SLC_CACHE_LINE_SIZE_BITS, slc_cache_line_size_bits), + FEATURE_MAPPING(SLC_SIZE_CONFIGURABLE, slc_size_configurable), + FEATURE_MAPPING_VALUE(SLC_SIZE_IN_KILOBYTES, slc_size_in_kilobytes), + FEATURE_MAPPING(SOC_TIMER, soc_timer), + FEATURE_MAPPING(SYS_BUS_SECURE_RESET, sys_bus_secure_reset), + FEATURE_MAPPING(TESSELLATION, tessellation), + FEATURE_MAPPING(TILE_REGION_PROTECTION, tile_region_protection), + FEATURE_MAPPING_VALUE(TILE_SIZE_X, tile_size_x), + FEATURE_MAPPING_VALUE(TILE_SIZE_Y, tile_size_y), + FEATURE_MAPPING(TLA, tla), + FEATURE_MAPPING(TPU_CEM_DATAMASTER_GLOBAL_REGISTERS, tpu_cem_datamaster_global_registers), + FEATURE_MAPPING(TPU_DM_GLOBAL_REGISTERS, tpu_dm_global_registers), + FEATURE_MAPPING(TPU_FILTERING_MODE_CONTROL, tpu_filtering_mode_control), + FEATURE_MAPPING_VALUE(USC_MIN_OUTPUT_REGISTERS_PER_PIX, usc_min_output_registers_per_pix), + FEATURE_MAPPING(VDM_DRAWINDIRECT, vdm_drawindirect), + FEATURE_MAPPING(VDM_OBJECT_LEVEL_LLS, vdm_object_level_lls), + FEATURE_MAPPING_VALUE(VIRTUAL_ADDRESS_SPACE_BITS, virtual_address_space_bits), + FEATURE_MAPPING(WATCHDOG_TIMER, watchdog_timer), + FEATURE_MAPPING(WORKGROUP_PROTECTION, workgroup_protection), + FEATURE_MAPPING_VALUE(XE_ARCHITECTURE, xe_architecture), + FEATURE_MAPPING(XE_MEMORY_HIERARCHY, xe_memory_hierarchy), + FEATURE_MAPPING(XE_TPU2, xe_tpu2), + FEATURE_MAPPING_VALUE(XPU_MAX_REGBANKS_ADDR_WIDTH, xpu_max_regbanks_addr_width), + FEATURE_MAPPING_VALUE(XPU_MAX_SLAVES, xpu_max_slaves), + FEATURE_MAPPING_VALUE(XPU_REGISTER_BROADCAST, xpu_register_broadcast), + FEATURE_MAPPING(XT_TOP_INFRASTRUCTURE, xt_top_infrastructure), + FEATURE_MAPPING(ZLS_SUBTILE, zls_subtile), +}; + +#undef FEATURE_MAPPING_VALUE +#undef FEATURE_MAPPING + +/** + * pvr_device_info_set_features() - Set device features from device information in firmware + * @pvr_dev: Device pointer. + * @features: Pointer to features mask in device information. + * @features_size: Size of features mask, in u64s. + * @feature_param_size: Size of feature parameters, in u64s. + * + * Returns: + * * 0 on success, or + * * -%EINVAL on malformed stream. + */ +int pvr_device_info_set_features(struct pvr_device *pvr_dev, const u64 *features, u32 features_size, + u32 feature_param_size) +{ + const u32 mapping_max = ARRAY_SIZE(features_mapping); + const u32 mapping_max_size = (mapping_max + 63) >> 6; + const u32 nr_bits = min(features_size * 64, mapping_max); + const u64 *feature_params = features + features_size; + u32 param_idx = 0; + + BUILD_BUG_ON(ARRAY_SIZE(features_mapping) != PVR_FW_HAS_FEATURE_MAX); + + /* Verify no unsupported values in the bitmask. */ + if (features_size > mapping_max_size) { + drm_warn(from_pvr_device(pvr_dev), "Unsupported features in firmware image"); + } else if (features_size == mapping_max_size && + ((mapping_max & 63) != 0)) { + u64 invalid_mask = ~0ull << (mapping_max & 63); + + if (features[features_size - 1] & invalid_mask) + drm_warn(from_pvr_device(pvr_dev), + "Unsupported features in firmware image"); + } + + for (u32 i = 0; i < nr_bits; i++) { + if (features[i >> 6] & BIT_ULL(i & 63)) { + *(bool *)((u8 *)pvr_dev + features_mapping[i].flag_offset) = true; + + if (features_mapping[i].value_offset) { + if (param_idx >= feature_param_size) + return -EINVAL; + + *(u64 *)((u8 *)pvr_dev + features_mapping[i].value_offset) = + feature_params[param_idx]; + param_idx++; + } + } + } + + return 0; +} diff --git a/drivers/gpu/drm/imagination/pvr_device_info.h b/drivers/gpu/drm/imagination/pvr_device_info.h new file mode 100644 index 0000000000..f61fb988b5 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_device_info.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_DEVICE_INFO_H +#define PVR_DEVICE_INFO_H + +#include + +struct pvr_device; + +/* + * struct pvr_device_features - Hardware feature information + */ +struct pvr_device_features { + bool has_axi_acelite; + bool has_cdm_control_stream_format; + bool has_cluster_grouping; + bool has_common_store_size_in_dwords; + bool has_compute; + bool has_compute_morton_capable; + bool has_compute_overlap; + bool has_coreid_per_os; + bool has_dynamic_dust_power; + bool has_ecc_rams; + bool has_fb_cdc_v4; + bool has_fbc_max_default_descriptors; + bool has_fbc_max_large_descriptors; + bool has_fbcdc; + bool has_fbcdc_algorithm; + bool has_fbcdc_architecture; + bool has_gpu_multicore_support; + bool has_gpu_virtualisation; + bool has_gs_rta_support; + bool has_irq_per_os; + bool has_isp_max_tiles_in_flight; + bool has_isp_samples_per_pixel; + bool has_isp_zls_d24_s8_packing_ogl_mode; + bool has_layout_mars; + bool has_max_partitions; + bool has_meta; + bool has_meta_coremem_size; + bool has_mips; + bool has_num_clusters; + bool has_num_isp_ipp_pipes; + bool has_num_osids; + bool has_num_raster_pipes; + bool has_pbe2_in_xe; + bool has_pbvnc_coreid_reg; + bool has_perfbus; + bool has_perf_counter_batch; + bool has_phys_bus_width; + bool has_riscv_fw_processor; + bool has_roguexe; + bool has_s7_top_infrastructure; + bool has_simple_internal_parameter_format; + bool has_simple_internal_parameter_format_v2; + bool has_simple_parameter_format_version; + bool has_slc_banks; + bool has_slc_cache_line_size_bits; + bool has_slc_size_configurable; + bool has_slc_size_in_kilobytes; + bool has_soc_timer; + bool has_sys_bus_secure_reset; + bool has_tessellation; + bool has_tile_region_protection; + bool has_tile_size_x; + bool has_tile_size_y; + bool has_tla; + bool has_tpu_cem_datamaster_global_registers; + bool has_tpu_dm_global_registers; + bool has_tpu_filtering_mode_control; + bool has_usc_min_output_registers_per_pix; + bool has_vdm_drawindirect; + bool has_vdm_object_level_lls; + bool has_virtual_address_space_bits; + bool has_watchdog_timer; + bool has_workgroup_protection; + bool has_xe_architecture; + bool has_xe_memory_hierarchy; + bool has_xe_tpu2; + bool has_xpu_max_regbanks_addr_width; + bool has_xpu_max_slaves; + bool has_xpu_register_broadcast; + bool has_xt_top_infrastructure; + bool has_zls_subtile; + + u64 cdm_control_stream_format; + u64 common_store_size_in_dwords; + u64 ecc_rams; + u64 fbc_max_default_descriptors; + u64 fbc_max_large_descriptors; + u64 fbcdc; + u64 fbcdc_algorithm; + u64 fbcdc_architecture; + u64 isp_max_tiles_in_flight; + u64 isp_samples_per_pixel; + u64 layout_mars; + u64 max_partitions; + u64 meta; + u64 meta_coremem_size; + u64 num_clusters; + u64 num_isp_ipp_pipes; + u64 num_osids; + u64 num_raster_pipes; + u64 phys_bus_width; + u64 simple_parameter_format_version; + u64 slc_banks; + u64 slc_cache_line_size_bits; + u64 slc_size_in_kilobytes; + u64 tile_size_x; + u64 tile_size_y; + u64 usc_min_output_registers_per_pix; + u64 virtual_address_space_bits; + u64 xe_architecture; + u64 xpu_max_regbanks_addr_width; + u64 xpu_max_slaves; + u64 xpu_register_broadcast; +}; + +/* + * struct pvr_device_quirks - Hardware quirk information + */ +struct pvr_device_quirks { + bool has_brn44079; + bool has_brn47217; + bool has_brn48492; + bool has_brn48545; + bool has_brn49927; + bool has_brn50767; + bool has_brn51764; + bool has_brn62269; + bool has_brn63142; + bool has_brn63553; + bool has_brn66011; + bool has_brn71242; +}; + +/* + * struct pvr_device_enhancements - Hardware enhancement information + */ +struct pvr_device_enhancements { + bool has_ern35421; + bool has_ern38020; + bool has_ern38748; + bool has_ern42064; + bool has_ern42290; + bool has_ern42606; + bool has_ern47025; + bool has_ern57596; +}; + +void pvr_device_info_set_quirks(struct pvr_device *pvr_dev, const u64 *bitmask, + u32 bitmask_len); +void pvr_device_info_set_enhancements(struct pvr_device *pvr_dev, const u64 *bitmask, + u32 bitmask_len); +int pvr_device_info_set_features(struct pvr_device *pvr_dev, const u64 *features, u32 features_size, + u32 feature_param_size); + +/* + * Meta cores + * + * These are the values for the 'meta' feature when the feature is present + * (as per &struct pvr_device_features)/ + */ +#define PVR_META_MTP218 (1) +#define PVR_META_MTP219 (2) +#define PVR_META_LTP218 (3) +#define PVR_META_LTP217 (4) + +enum { + PVR_FEATURE_CDM_USER_MODE_QUEUE, + PVR_FEATURE_CLUSTER_GROUPING, + PVR_FEATURE_COMPUTE_MORTON_CAPABLE, + PVR_FEATURE_FB_CDC_V4, + PVR_FEATURE_GPU_MULTICORE_SUPPORT, + PVR_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE, + PVR_FEATURE_REQUIRES_FB_CDC_ZLS_SETUP, + PVR_FEATURE_S7_TOP_INFRASTRUCTURE, + PVR_FEATURE_TESSELLATION, + PVR_FEATURE_TPU_DM_GLOBAL_REGISTERS, + PVR_FEATURE_VDM_DRAWINDIRECT, + PVR_FEATURE_VDM_OBJECT_LEVEL_LLS, + PVR_FEATURE_ZLS_SUBTILE, +}; + +#endif /* PVR_DEVICE_INFO_H */ diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c new file mode 100644 index 0000000000..5c3b2d58d7 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_drv.c @@ -0,0 +1,1501 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_context.h" +#include "pvr_debugfs.h" +#include "pvr_device.h" +#include "pvr_drv.h" +#include "pvr_free_list.h" +#include "pvr_gem.h" +#include "pvr_hwrt.h" +#include "pvr_job.h" +#include "pvr_mmu.h" +#include "pvr_power.h" +#include "pvr_rogue_defs.h" +#include "pvr_rogue_fwif_client.h" +#include "pvr_rogue_fwif_shared.h" +#include "pvr_vm.h" + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * DOC: PowerVR (Series 6 and later) and IMG Graphics Driver + * + * This driver supports the following PowerVR/IMG graphics cores from Imagination Technologies: + * + * * AXE-1-16M (found in Texas Instruments AM62) + */ + +/** + * pvr_ioctl_create_bo() - IOCTL to create a GEM buffer object. + * @drm_dev: [IN] Target DRM device. + * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type + * &struct drm_pvr_ioctl_create_bo_args. + * @file: [IN] DRM file-private data. + * + * Called from userspace with %DRM_IOCTL_PVR_CREATE_BO. + * + * Return: + * * 0 on success, + * * -%EINVAL if the value of &drm_pvr_ioctl_create_bo_args.size is zero + * or wider than &typedef size_t, + * * -%EINVAL if any bits in &drm_pvr_ioctl_create_bo_args.flags that are + * reserved or undefined are set, + * * -%EINVAL if any padding fields in &drm_pvr_ioctl_create_bo_args are not + * zero, + * * Any error encountered while creating the object (see + * pvr_gem_object_create()), or + * * Any error encountered while transferring ownership of the object into a + * userspace-accessible handle (see pvr_gem_object_into_handle()). + */ +static int +pvr_ioctl_create_bo(struct drm_device *drm_dev, void *raw_args, + struct drm_file *file) +{ + struct drm_pvr_ioctl_create_bo_args *args = raw_args; + struct pvr_device *pvr_dev = to_pvr_device(drm_dev); + struct pvr_file *pvr_file = to_pvr_file(file); + + struct pvr_gem_object *pvr_obj; + size_t sanitized_size; + + int idx; + int err; + + if (!drm_dev_enter(drm_dev, &idx)) + return -EIO; + + /* All padding fields must be zeroed. */ + if (args->_padding_c != 0) { + err = -EINVAL; + goto err_drm_dev_exit; + } + + /* + * On 64-bit platforms (our primary target), size_t is a u64. However, + * on other architectures we have to check for overflow when casting + * down to size_t from u64. + * + * We also disallow zero-sized allocations, and reserved (kernel-only) + * flags. + */ + if (args->size > SIZE_MAX || args->size == 0 || args->flags & + ~DRM_PVR_BO_FLAGS_MASK || args->size & (PVR_DEVICE_PAGE_SIZE - 1)) { + err = -EINVAL; + goto err_drm_dev_exit; + } + + sanitized_size = (size_t)args->size; + + /* + * Create a buffer object and transfer ownership to a userspace- + * accessible handle. + */ + pvr_obj = pvr_gem_object_create(pvr_dev, sanitized_size, args->flags); + if (IS_ERR(pvr_obj)) { + err = PTR_ERR(pvr_obj); + goto err_drm_dev_exit; + } + + /* This function will not modify &args->handle unless it succeeds. */ + err = pvr_gem_object_into_handle(pvr_obj, pvr_file, &args->handle); + if (err) + goto err_destroy_obj; + + drm_dev_exit(idx); + + return 0; + +err_destroy_obj: + /* + * GEM objects are refcounted, so there is no explicit destructor + * function. Instead, we release the singular reference we currently + * hold on the object and let GEM take care of the rest. + */ + pvr_gem_object_put(pvr_obj); + +err_drm_dev_exit: + drm_dev_exit(idx); + + return err; +} + +/** + * pvr_ioctl_get_bo_mmap_offset() - IOCTL to generate a "fake" offset to be + * used when calling mmap() from userspace to map the given GEM buffer object + * @drm_dev: [IN] DRM device (unused). + * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type + * &struct drm_pvr_ioctl_get_bo_mmap_offset_args. + * @file: [IN] DRM file private data. + * + * Called from userspace with %DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET. + * + * This IOCTL does *not* perform an mmap. See the docs on + * &struct drm_pvr_ioctl_get_bo_mmap_offset_args for details. + * + * Return: + * * 0 on success, + * * -%ENOENT if the handle does not reference a valid GEM buffer object, + * * -%EINVAL if any padding fields in &struct + * drm_pvr_ioctl_get_bo_mmap_offset_args are not zero, or + * * Any error returned by drm_gem_create_mmap_offset(). + */ +static int +pvr_ioctl_get_bo_mmap_offset(struct drm_device *drm_dev, void *raw_args, + struct drm_file *file) +{ + struct drm_pvr_ioctl_get_bo_mmap_offset_args *args = raw_args; + struct pvr_file *pvr_file = to_pvr_file(file); + struct pvr_gem_object *pvr_obj; + struct drm_gem_object *gem_obj; + int idx; + int ret; + + if (!drm_dev_enter(drm_dev, &idx)) + return -EIO; + + /* All padding fields must be zeroed. */ + if (args->_padding_4 != 0) { + ret = -EINVAL; + goto err_drm_dev_exit; + } + + /* + * Obtain a kernel reference to the buffer object. This reference is + * counted and must be manually dropped before returning. If a buffer + * object cannot be found for the specified handle, return -%ENOENT (No + * such file or directory). + */ + pvr_obj = pvr_gem_object_from_handle(pvr_file, args->handle); + if (!pvr_obj) { + ret = -ENOENT; + goto err_drm_dev_exit; + } + + gem_obj = gem_from_pvr_gem(pvr_obj); + + /* + * Allocate a fake offset which can be used in userspace calls to mmap + * on the DRM device file. If this fails, return the error code. This + * operation is idempotent. + */ + ret = drm_gem_create_mmap_offset(gem_obj); + if (ret != 0) { + /* Drop our reference to the buffer object. */ + drm_gem_object_put(gem_obj); + goto err_drm_dev_exit; + } + + /* + * Read out the fake offset allocated by the earlier call to + * drm_gem_create_mmap_offset. + */ + args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); + + /* Drop our reference to the buffer object. */ + pvr_gem_object_put(pvr_obj); + +err_drm_dev_exit: + drm_dev_exit(idx); + + return ret; +} + +static __always_inline u64 +pvr_fw_version_packed(u32 major, u32 minor) +{ + return ((u64)major << 32) | minor; +} + +static u32 +rogue_get_common_store_partition_space_size(struct pvr_device *pvr_dev) +{ + u32 max_partitions = 0; + u32 tile_size_x = 0; + u32 tile_size_y = 0; + + PVR_FEATURE_VALUE(pvr_dev, tile_size_x, &tile_size_x); + PVR_FEATURE_VALUE(pvr_dev, tile_size_y, &tile_size_y); + PVR_FEATURE_VALUE(pvr_dev, max_partitions, &max_partitions); + + if (tile_size_x == 16 && tile_size_y == 16) { + u32 usc_min_output_registers_per_pix = 0; + + PVR_FEATURE_VALUE(pvr_dev, usc_min_output_registers_per_pix, + &usc_min_output_registers_per_pix); + + return tile_size_x * tile_size_y * max_partitions * + usc_min_output_registers_per_pix; + } + + return max_partitions * 1024; +} + +static u32 +rogue_get_common_store_alloc_region_size(struct pvr_device *pvr_dev) +{ + u32 common_store_size_in_dwords = 512 * 4 * 4; + u32 alloc_region_size; + + PVR_FEATURE_VALUE(pvr_dev, common_store_size_in_dwords, &common_store_size_in_dwords); + + alloc_region_size = common_store_size_in_dwords - (256U * 4U) - + rogue_get_common_store_partition_space_size(pvr_dev); + + if (PVR_HAS_QUIRK(pvr_dev, 44079)) { + u32 common_store_split_point = (768U * 4U * 4U); + + return min(common_store_split_point - (256U * 4U), alloc_region_size); + } + + return alloc_region_size; +} + +static inline u32 +rogue_get_num_phantoms(struct pvr_device *pvr_dev) +{ + u32 num_clusters = 1; + + PVR_FEATURE_VALUE(pvr_dev, num_clusters, &num_clusters); + + return ROGUE_REQ_NUM_PHANTOMS(num_clusters); +} + +static inline u32 +rogue_get_max_coeffs(struct pvr_device *pvr_dev) +{ + u32 max_coeff_additional_portion = ROGUE_MAX_VERTEX_SHARED_REGISTERS; + u32 pending_allocation_shared_regs = 2U * 1024U; + u32 pending_allocation_coeff_regs = 0U; + u32 num_phantoms = rogue_get_num_phantoms(pvr_dev); + u32 tiles_in_flight = 0; + u32 max_coeff_pixel_portion; + + PVR_FEATURE_VALUE(pvr_dev, isp_max_tiles_in_flight, &tiles_in_flight); + max_coeff_pixel_portion = DIV_ROUND_UP(tiles_in_flight, num_phantoms); + max_coeff_pixel_portion *= ROGUE_MAX_PIXEL_SHARED_REGISTERS; + + /* + * Compute tasks on cores with BRN48492 and without compute overlap may lock + * up without two additional lines of coeffs. + */ + if (PVR_HAS_QUIRK(pvr_dev, 48492) && !PVR_HAS_FEATURE(pvr_dev, compute_overlap)) + pending_allocation_coeff_regs = 2U * 1024U; + + if (PVR_HAS_ENHANCEMENT(pvr_dev, 38748)) + pending_allocation_shared_regs = 0; + + if (PVR_HAS_ENHANCEMENT(pvr_dev, 38020)) + max_coeff_additional_portion += ROGUE_MAX_COMPUTE_SHARED_REGISTERS; + + return rogue_get_common_store_alloc_region_size(pvr_dev) + pending_allocation_coeff_regs - + (max_coeff_pixel_portion + max_coeff_additional_portion + + pending_allocation_shared_regs); +} + +static inline u32 +rogue_get_cdm_max_local_mem_size_regs(struct pvr_device *pvr_dev) +{ + u32 available_coeffs_in_dwords = rogue_get_max_coeffs(pvr_dev); + + if (PVR_HAS_QUIRK(pvr_dev, 48492) && PVR_HAS_FEATURE(pvr_dev, roguexe) && + !PVR_HAS_FEATURE(pvr_dev, compute_overlap)) { + /* Driver must not use the 2 reserved lines. */ + available_coeffs_in_dwords -= ROGUE_CSRM_LINE_SIZE_IN_DWORDS * 2; + } + + /* + * The maximum amount of local memory available to a kernel is the minimum + * of the total number of coefficient registers available and the max common + * store allocation size which can be made by the CDM. + * + * If any coeff lines are reserved for tessellation or pixel then we need to + * subtract those too. + */ + return min(available_coeffs_in_dwords, (u32)ROGUE_MAX_PER_KERNEL_LOCAL_MEM_SIZE_REGS); +} + +/** + * pvr_dev_query_gpu_info_get() + * @pvr_dev: Device pointer. + * @args: [IN] Device query arguments containing a pointer to a userspace + * struct drm_pvr_dev_query_gpu_info. + * + * If the query object pointer is NULL, the size field is updated with the + * expected size of the query object. + * + * Returns: + * * 0 on success, or if size is requested using a NULL pointer, or + * * -%E2BIG if the indicated length of the allocation is less than is + * required to contain the copied data, or + * * -%EFAULT if local memory could not be copied to userspace. + */ +static int +pvr_dev_query_gpu_info_get(struct pvr_device *pvr_dev, + struct drm_pvr_ioctl_dev_query_args *args) +{ + struct drm_pvr_dev_query_gpu_info gpu_info = {0}; + int err; + + if (!args->pointer) { + args->size = sizeof(struct drm_pvr_dev_query_gpu_info); + return 0; + } + + gpu_info.gpu_id = + pvr_gpu_id_to_packed_bvnc(&pvr_dev->gpu_id); + gpu_info.num_phantoms = rogue_get_num_phantoms(pvr_dev); + + err = PVR_UOBJ_SET(args->pointer, args->size, gpu_info); + if (err < 0) + return err; + + if (args->size > sizeof(gpu_info)) + args->size = sizeof(gpu_info); + return 0; +} + +/** + * pvr_dev_query_runtime_info_get() + * @pvr_dev: Device pointer. + * @args: [IN] Device query arguments containing a pointer to a userspace + * struct drm_pvr_dev_query_runtime_info. + * + * If the query object pointer is NULL, the size field is updated with the + * expected size of the query object. + * + * Returns: + * * 0 on success, or if size is requested using a NULL pointer, or + * * -%E2BIG if the indicated length of the allocation is less than is + * required to contain the copied data, or + * * -%EFAULT if local memory could not be copied to userspace. + */ +static int +pvr_dev_query_runtime_info_get(struct pvr_device *pvr_dev, + struct drm_pvr_ioctl_dev_query_args *args) +{ + struct drm_pvr_dev_query_runtime_info runtime_info = {0}; + int err; + + if (!args->pointer) { + args->size = sizeof(struct drm_pvr_dev_query_runtime_info); + return 0; + } + + runtime_info.free_list_min_pages = + pvr_get_free_list_min_pages(pvr_dev); + runtime_info.free_list_max_pages = + ROGUE_PM_MAX_FREELIST_SIZE / ROGUE_PM_PAGE_SIZE; + runtime_info.common_store_alloc_region_size = + rogue_get_common_store_alloc_region_size(pvr_dev); + runtime_info.common_store_partition_space_size = + rogue_get_common_store_partition_space_size(pvr_dev); + runtime_info.max_coeffs = rogue_get_max_coeffs(pvr_dev); + runtime_info.cdm_max_local_mem_size_regs = + rogue_get_cdm_max_local_mem_size_regs(pvr_dev); + + err = PVR_UOBJ_SET(args->pointer, args->size, runtime_info); + if (err < 0) + return err; + + if (args->size > sizeof(runtime_info)) + args->size = sizeof(runtime_info); + return 0; +} + +/** + * pvr_dev_query_quirks_get() - Unpack array of quirks at the address given + * in a struct drm_pvr_dev_query_quirks, or gets the amount of space required + * for it. + * @pvr_dev: Device pointer. + * @args: [IN] Device query arguments containing a pointer to a userspace + * struct drm_pvr_dev_query_query_quirks. + * + * If the query object pointer is NULL, the size field is updated with the + * expected size of the query object. + * If the userspace pointer in the query object is NULL, or the count is + * short, no data is copied. + * The count field will be updated to that copied, or if either pointer is + * NULL, that which would have been copied. + * The size field in the query object will be updated to the size copied. + * + * Returns: + * * 0 on success, or if size/count is requested using a NULL pointer, or + * * -%EINVAL if args contained non-zero reserved fields, or + * * -%E2BIG if the indicated length of the allocation is less than is + * required to contain the copied data, or + * * -%EFAULT if local memory could not be copied to userspace. + */ +static int +pvr_dev_query_quirks_get(struct pvr_device *pvr_dev, + struct drm_pvr_ioctl_dev_query_args *args) +{ + /* + * @FIXME - hardcoding of numbers here is intended as an + * intermediate step so the UAPI can be fixed, but requires a + * a refactor in the future to store them in a more appropriate + * location + */ + static const u32 umd_quirks_musthave[] = { + 47217, + 49927, + 62269, + }; + static const u32 umd_quirks[] = { + 48545, + 51764, + }; + struct drm_pvr_dev_query_quirks query; + u32 out[ARRAY_SIZE(umd_quirks_musthave) + ARRAY_SIZE(umd_quirks)]; + size_t out_musthave_count = 0; + size_t out_count = 0; + int err; + + if (!args->pointer) { + args->size = sizeof(struct drm_pvr_dev_query_quirks); + return 0; + } + + err = PVR_UOBJ_GET(query, args->size, args->pointer); + + if (err < 0) + return err; + if (query._padding_c) + return -EINVAL; + + for (int i = 0; i < ARRAY_SIZE(umd_quirks_musthave); i++) { + if (pvr_device_has_uapi_quirk(pvr_dev, umd_quirks_musthave[i])) { + out[out_count++] = umd_quirks_musthave[i]; + out_musthave_count++; + } + } + + for (int i = 0; i < ARRAY_SIZE(umd_quirks); i++) { + if (pvr_device_has_uapi_quirk(pvr_dev, umd_quirks[i])) + out[out_count++] = umd_quirks[i]; + } + + if (!query.quirks) + goto copy_out; + if (query.count < out_count) + return -E2BIG; + + if (copy_to_user(u64_to_user_ptr(query.quirks), out, + out_count * sizeof(u32))) { + return -EFAULT; + } + + query.musthave_count = out_musthave_count; + +copy_out: + query.count = out_count; + err = PVR_UOBJ_SET(args->pointer, args->size, query); + if (err < 0) + return err; + + args->size = sizeof(query); + return 0; +} + +/** + * pvr_dev_query_enhancements_get() - Unpack array of enhancements at the + * address given in a struct drm_pvr_dev_query_enhancements, or gets the amount + * of space required for it. + * @pvr_dev: Device pointer. + * @args: [IN] Device query arguments containing a pointer to a userspace + * struct drm_pvr_dev_query_enhancements. + * + * If the query object pointer is NULL, the size field is updated with the + * expected size of the query object. + * If the userspace pointer in the query object is NULL, or the count is + * short, no data is copied. + * The count field will be updated to that copied, or if either pointer is + * NULL, that which would have been copied. + * The size field in the query object will be updated to the size copied. + * + * Returns: + * * 0 on success, or if size/count is requested using a NULL pointer, or + * * -%EINVAL if args contained non-zero reserved fields, or + * * -%E2BIG if the indicated length of the allocation is less than is + * required to contain the copied data, or + * * -%EFAULT if local memory could not be copied to userspace. + */ +static int +pvr_dev_query_enhancements_get(struct pvr_device *pvr_dev, + struct drm_pvr_ioctl_dev_query_args *args) +{ + /* + * @FIXME - hardcoding of numbers here is intended as an + * intermediate step so the UAPI can be fixed, but requires a + * a refactor in the future to store them in a more appropriate + * location + */ + const u32 umd_enhancements[] = { + 35421, + 42064, + }; + struct drm_pvr_dev_query_enhancements query; + u32 out[ARRAY_SIZE(umd_enhancements)]; + size_t out_idx = 0; + int err; + + if (!args->pointer) { + args->size = sizeof(struct drm_pvr_dev_query_enhancements); + return 0; + } + + err = PVR_UOBJ_GET(query, args->size, args->pointer); + + if (err < 0) + return err; + if (query._padding_a) + return -EINVAL; + if (query._padding_c) + return -EINVAL; + + for (int i = 0; i < ARRAY_SIZE(umd_enhancements); i++) { + if (pvr_device_has_uapi_enhancement(pvr_dev, umd_enhancements[i])) + out[out_idx++] = umd_enhancements[i]; + } + + if (!query.enhancements) + goto copy_out; + if (query.count < out_idx) + return -E2BIG; + + if (copy_to_user(u64_to_user_ptr(query.enhancements), out, + out_idx * sizeof(u32))) { + return -EFAULT; + } + +copy_out: + query.count = out_idx; + err = PVR_UOBJ_SET(args->pointer, args->size, query); + if (err < 0) + return err; + + args->size = sizeof(query); + return 0; +} + +/** + * pvr_ioctl_dev_query() - IOCTL to copy information about a device + * @drm_dev: [IN] DRM device. + * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type + * &struct drm_pvr_ioctl_dev_query_args. + * @file: [IN] DRM file private data. + * + * Called from userspace with %DRM_IOCTL_PVR_DEV_QUERY. + * If the given receiving struct pointer is NULL, or the indicated size is too + * small, the expected size of the struct type will be returned in the size + * argument field. + * + * Return: + * * 0 on success or when fetching the size with args->pointer == NULL, or + * * -%E2BIG if the indicated size of the receiving struct is less than is + * required to contain the copied data, or + * * -%EINVAL if the indicated struct type is unknown, or + * * -%ENOMEM if local memory could not be allocated, or + * * -%EFAULT if local memory could not be copied to userspace. + */ +static int +pvr_ioctl_dev_query(struct drm_device *drm_dev, void *raw_args, + struct drm_file *file) +{ + struct pvr_device *pvr_dev = to_pvr_device(drm_dev); + struct drm_pvr_ioctl_dev_query_args *args = raw_args; + int idx; + int ret = -EINVAL; + + if (!drm_dev_enter(drm_dev, &idx)) + return -EIO; + + switch ((enum drm_pvr_dev_query)args->type) { + case DRM_PVR_DEV_QUERY_GPU_INFO_GET: + ret = pvr_dev_query_gpu_info_get(pvr_dev, args); + break; + + case DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET: + ret = pvr_dev_query_runtime_info_get(pvr_dev, args); + break; + + case DRM_PVR_DEV_QUERY_QUIRKS_GET: + ret = pvr_dev_query_quirks_get(pvr_dev, args); + break; + + case DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET: + ret = pvr_dev_query_enhancements_get(pvr_dev, args); + break; + + case DRM_PVR_DEV_QUERY_HEAP_INFO_GET: + ret = pvr_heap_info_get(pvr_dev, args); + break; + + case DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET: + ret = pvr_static_data_areas_get(pvr_dev, args); + break; + } + + drm_dev_exit(idx); + + return ret; +} + +/** + * pvr_ioctl_create_context() - IOCTL to create a context + * @drm_dev: [IN] DRM device. + * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type + * &struct drm_pvr_ioctl_create_context_args. + * @file: [IN] DRM file private data. + * + * Called from userspace with %DRM_IOCTL_PVR_CREATE_CONTEXT. + * + * Return: + * * 0 on success, or + * * -%EINVAL if provided arguments are invalid, or + * * -%EFAULT if arguments can't be copied from userspace, or + * * Any error returned by pvr_create_render_context(). + */ +static int +pvr_ioctl_create_context(struct drm_device *drm_dev, void *raw_args, + struct drm_file *file) +{ + struct drm_pvr_ioctl_create_context_args *args = raw_args; + struct pvr_file *pvr_file = file->driver_priv; + int idx; + int ret; + + if (!drm_dev_enter(drm_dev, &idx)) + return -EIO; + + ret = pvr_context_create(pvr_file, args); + + drm_dev_exit(idx); + + return ret; +} + +/** + * pvr_ioctl_destroy_context() - IOCTL to destroy a context + * @drm_dev: [IN] DRM device. + * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type + * &struct drm_pvr_ioctl_destroy_context_args. + * @file: [IN] DRM file private data. + * + * Called from userspace with %DRM_IOCTL_PVR_DESTROY_CONTEXT. + * + * Return: + * * 0 on success, or + * * -%EINVAL if context not in context list. + */ +static int +pvr_ioctl_destroy_context(struct drm_device *drm_dev, void *raw_args, + struct drm_file *file) +{ + struct drm_pvr_ioctl_destroy_context_args *args = raw_args; + struct pvr_file *pvr_file = file->driver_priv; + + if (args->_padding_4) + return -EINVAL; + + return pvr_context_destroy(pvr_file, args->handle); +} + +/** + * pvr_ioctl_create_free_list() - IOCTL to create a free list + * @drm_dev: [IN] DRM device. + * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type + * &struct drm_pvr_ioctl_create_free_list_args. + * @file: [IN] DRM file private data. + * + * Called from userspace with %DRM_IOCTL_PVR_CREATE_FREE_LIST. + * + * Return: + * * 0 on success, or + * * Any error returned by pvr_free_list_create(). + */ +static int +pvr_ioctl_create_free_list(struct drm_device *drm_dev, void *raw_args, + struct drm_file *file) +{ + struct drm_pvr_ioctl_create_free_list_args *args = raw_args; + struct pvr_file *pvr_file = to_pvr_file(file); + struct pvr_free_list *free_list; + int idx; + int err; + + if (!drm_dev_enter(drm_dev, &idx)) + return -EIO; + + free_list = pvr_free_list_create(pvr_file, args); + if (IS_ERR(free_list)) { + err = PTR_ERR(free_list); + goto err_drm_dev_exit; + } + + /* Allocate object handle for userspace. */ + err = xa_alloc(&pvr_file->free_list_handles, + &args->handle, + free_list, + xa_limit_32b, + GFP_KERNEL); + if (err < 0) + goto err_cleanup; + + drm_dev_exit(idx); + + return 0; + +err_cleanup: + pvr_free_list_put(free_list); + +err_drm_dev_exit: + drm_dev_exit(idx); + + return err; +} + +/** + * pvr_ioctl_destroy_free_list() - IOCTL to destroy a free list + * @drm_dev: [IN] DRM device. + * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type + * &struct drm_pvr_ioctl_destroy_free_list_args. + * @file: [IN] DRM file private data. + * + * Called from userspace with %DRM_IOCTL_PVR_DESTROY_FREE_LIST. + * + * Return: + * * 0 on success, or + * * -%EINVAL if free list not in object list. + */ +static int +pvr_ioctl_destroy_free_list(struct drm_device *drm_dev, void *raw_args, + struct drm_file *file) +{ + struct drm_pvr_ioctl_destroy_free_list_args *args = raw_args; + struct pvr_file *pvr_file = to_pvr_file(file); + struct pvr_free_list *free_list; + + if (args->_padding_4) + return -EINVAL; + + free_list = xa_erase(&pvr_file->free_list_handles, args->handle); + if (!free_list) + return -EINVAL; + + pvr_free_list_put(free_list); + return 0; +} + +/** + * pvr_ioctl_create_hwrt_dataset() - IOCTL to create a HWRT dataset + * @drm_dev: [IN] DRM device. + * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type + * &struct drm_pvr_ioctl_create_hwrt_dataset_args. + * @file: [IN] DRM file private data. + * + * Called from userspace with %DRM_IOCTL_PVR_CREATE_HWRT_DATASET. + * + * Return: + * * 0 on success, or + * * Any error returned by pvr_hwrt_dataset_create(). + */ +static int +pvr_ioctl_create_hwrt_dataset(struct drm_device *drm_dev, void *raw_args, + struct drm_file *file) +{ + struct drm_pvr_ioctl_create_hwrt_dataset_args *args = raw_args; + struct pvr_file *pvr_file = to_pvr_file(file); + struct pvr_hwrt_dataset *hwrt; + int idx; + int err; + + if (!drm_dev_enter(drm_dev, &idx)) + return -EIO; + + hwrt = pvr_hwrt_dataset_create(pvr_file, args); + if (IS_ERR(hwrt)) { + err = PTR_ERR(hwrt); + goto err_drm_dev_exit; + } + + /* Allocate object handle for userspace. */ + err = xa_alloc(&pvr_file->hwrt_handles, + &args->handle, + hwrt, + xa_limit_32b, + GFP_KERNEL); + if (err < 0) + goto err_cleanup; + + drm_dev_exit(idx); + + return 0; + +err_cleanup: + pvr_hwrt_dataset_put(hwrt); + +err_drm_dev_exit: + drm_dev_exit(idx); + + return err; +} + +/** + * pvr_ioctl_destroy_hwrt_dataset() - IOCTL to destroy a HWRT dataset + * @drm_dev: [IN] DRM device. + * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type + * &struct drm_pvr_ioctl_destroy_hwrt_dataset_args. + * @file: [IN] DRM file private data. + * + * Called from userspace with %DRM_IOCTL_PVR_DESTROY_HWRT_DATASET. + * + * Return: + * * 0 on success, or + * * -%EINVAL if HWRT dataset not in object list. + */ +static int +pvr_ioctl_destroy_hwrt_dataset(struct drm_device *drm_dev, void *raw_args, + struct drm_file *file) +{ + struct drm_pvr_ioctl_destroy_hwrt_dataset_args *args = raw_args; + struct pvr_file *pvr_file = to_pvr_file(file); + struct pvr_hwrt_dataset *hwrt; + + if (args->_padding_4) + return -EINVAL; + + hwrt = xa_erase(&pvr_file->hwrt_handles, args->handle); + if (!hwrt) + return -EINVAL; + + pvr_hwrt_dataset_put(hwrt); + return 0; +} + +/** + * pvr_ioctl_create_vm_context() - IOCTL to create a VM context + * @drm_dev: [IN] DRM device. + * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type + * &struct drm_pvr_ioctl_create_vm_context_args. + * @file: [IN] DRM file private data. + * + * Called from userspace with %DRM_IOCTL_PVR_CREATE_VM_CONTEXT. + * + * Return: + * * 0 on success, or + * * Any error returned by pvr_vm_create_context(). + */ +static int +pvr_ioctl_create_vm_context(struct drm_device *drm_dev, void *raw_args, + struct drm_file *file) +{ + struct drm_pvr_ioctl_create_vm_context_args *args = raw_args; + struct pvr_file *pvr_file = to_pvr_file(file); + struct pvr_vm_context *vm_ctx; + int idx; + int err; + + if (!drm_dev_enter(drm_dev, &idx)) + return -EIO; + + if (args->_padding_4) { + err = -EINVAL; + goto err_drm_dev_exit; + } + + vm_ctx = pvr_vm_create_context(pvr_file->pvr_dev, true); + if (IS_ERR(vm_ctx)) { + err = PTR_ERR(vm_ctx); + goto err_drm_dev_exit; + } + + /* Allocate object handle for userspace. */ + err = xa_alloc(&pvr_file->vm_ctx_handles, + &args->handle, + vm_ctx, + xa_limit_32b, + GFP_KERNEL); + if (err < 0) + goto err_cleanup; + + drm_dev_exit(idx); + + return 0; + +err_cleanup: + pvr_vm_context_put(vm_ctx); + +err_drm_dev_exit: + drm_dev_exit(idx); + + return err; +} + +/** + * pvr_ioctl_destroy_vm_context() - IOCTL to destroy a VM context +* @drm_dev: [IN] DRM device. +* @raw_args: [IN] Arguments passed to this IOCTL. This must be of type +* &struct drm_pvr_ioctl_destroy_vm_context_args. +* @file: [IN] DRM file private data. +* +* Called from userspace with %DRM_IOCTL_PVR_DESTROY_VM_CONTEXT. +* +* Return: +* * 0 on success, or +* * -%EINVAL if object not in object list. + */ +static int +pvr_ioctl_destroy_vm_context(struct drm_device *drm_dev, void *raw_args, + struct drm_file *file) +{ + struct drm_pvr_ioctl_destroy_vm_context_args *args = raw_args; + struct pvr_file *pvr_file = to_pvr_file(file); + struct pvr_vm_context *vm_ctx; + + if (args->_padding_4) + return -EINVAL; + + vm_ctx = xa_erase(&pvr_file->vm_ctx_handles, args->handle); + if (!vm_ctx) + return -EINVAL; + + pvr_vm_context_put(vm_ctx); + return 0; +} + +/** + * pvr_ioctl_vm_map() - IOCTL to map buffer to GPU address space. + * @drm_dev: [IN] DRM device. + * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type + * &struct drm_pvr_ioctl_vm_map_args. + * @file: [IN] DRM file private data. + * + * Called from userspace with %DRM_IOCTL_PVR_VM_MAP. + * + * Return: + * * 0 on success, + * * -%EINVAL if &drm_pvr_ioctl_vm_op_map_args.flags is not zero, + * * -%EINVAL if the bounds specified by &drm_pvr_ioctl_vm_op_map_args.offset + * and &drm_pvr_ioctl_vm_op_map_args.size are not valid or do not fall + * within the buffer object specified by + * &drm_pvr_ioctl_vm_op_map_args.handle, + * * -%EINVAL if the bounds specified by + * &drm_pvr_ioctl_vm_op_map_args.device_addr and + * &drm_pvr_ioctl_vm_op_map_args.size do not form a valid device-virtual + * address range which falls entirely within a single heap, or + * * -%ENOENT if &drm_pvr_ioctl_vm_op_map_args.handle does not refer to a + * valid PowerVR buffer object. + */ +static int +pvr_ioctl_vm_map(struct drm_device *drm_dev, void *raw_args, + struct drm_file *file) +{ + struct pvr_device *pvr_dev = to_pvr_device(drm_dev); + struct drm_pvr_ioctl_vm_map_args *args = raw_args; + struct pvr_file *pvr_file = to_pvr_file(file); + struct pvr_vm_context *vm_ctx; + + struct pvr_gem_object *pvr_obj; + size_t pvr_obj_size; + + u64 offset_plus_size; + int idx; + int err; + + if (!drm_dev_enter(drm_dev, &idx)) + return -EIO; + + /* Initial validation of args. */ + if (args->_padding_14) { + err = -EINVAL; + goto err_drm_dev_exit; + } + + if (args->flags != 0 || + check_add_overflow(args->offset, args->size, &offset_plus_size) || + !pvr_find_heap_containing(pvr_dev, args->device_addr, args->size)) { + err = -EINVAL; + goto err_drm_dev_exit; + } + + vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle); + if (!vm_ctx) { + err = -EINVAL; + goto err_drm_dev_exit; + } + + pvr_obj = pvr_gem_object_from_handle(pvr_file, args->handle); + if (!pvr_obj) { + err = -ENOENT; + goto err_put_vm_context; + } + + pvr_obj_size = pvr_gem_object_size(pvr_obj); + + /* + * Validate offset and size args. The alignment of these will be + * checked when mapping; for now just check that they're within valid + * bounds + */ + if (args->offset >= pvr_obj_size || offset_plus_size > pvr_obj_size) { + err = -EINVAL; + goto err_put_pvr_object; + } + + err = pvr_vm_map(vm_ctx, pvr_obj, args->offset, + args->device_addr, args->size); + if (err) + goto err_put_pvr_object; + + /* + * In order to set up the mapping, we needed a reference to &pvr_obj. + * However, pvr_vm_map() obtains and stores its own reference, so we + * must release ours before returning. + */ + +err_put_pvr_object: + pvr_gem_object_put(pvr_obj); + +err_put_vm_context: + pvr_vm_context_put(vm_ctx); + +err_drm_dev_exit: + drm_dev_exit(idx); + + return err; +} + +/** + * pvr_ioctl_vm_unmap() - IOCTL to unmap buffer from GPU address space. + * @drm_dev: [IN] DRM device. + * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type + * &struct drm_pvr_ioctl_vm_unmap_args. + * @file: [IN] DRM file private data. + * + * Called from userspace with %DRM_IOCTL_PVR_VM_UNMAP. + * + * Return: + * * 0 on success, + * * -%EINVAL if &drm_pvr_ioctl_vm_op_unmap_args.device_addr is not a valid + * device page-aligned device-virtual address, or + * * -%ENOENT if there is currently no PowerVR buffer object mapped at + * &drm_pvr_ioctl_vm_op_unmap_args.device_addr. + */ +static int +pvr_ioctl_vm_unmap(struct drm_device *drm_dev, void *raw_args, + struct drm_file *file) +{ + struct drm_pvr_ioctl_vm_unmap_args *args = raw_args; + struct pvr_file *pvr_file = to_pvr_file(file); + struct pvr_vm_context *vm_ctx; + int err; + + /* Initial validation of args. */ + if (args->_padding_4) + return -EINVAL; + + vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle); + if (!vm_ctx) + return -EINVAL; + + err = pvr_vm_unmap(vm_ctx, args->device_addr, args->size); + + pvr_vm_context_put(vm_ctx); + + return err; +} + +/* + * pvr_ioctl_submit_job() - IOCTL to submit a job to the GPU + * @drm_dev: [IN] DRM device. + * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type + * &struct drm_pvr_ioctl_submit_job_args. + * @file: [IN] DRM file private data. + * + * Called from userspace with %DRM_IOCTL_PVR_SUBMIT_JOB. + * + * Return: + * * 0 on success, or + * * -%EINVAL if arguments are invalid. + */ +static int +pvr_ioctl_submit_jobs(struct drm_device *drm_dev, void *raw_args, + struct drm_file *file) +{ + struct drm_pvr_ioctl_submit_jobs_args *args = raw_args; + struct pvr_device *pvr_dev = to_pvr_device(drm_dev); + struct pvr_file *pvr_file = to_pvr_file(file); + int idx; + int err; + + if (!drm_dev_enter(drm_dev, &idx)) + return -EIO; + + err = pvr_submit_jobs(pvr_dev, pvr_file, args); + + drm_dev_exit(idx); + + return err; +} + +int +pvr_get_uobj(u64 usr_ptr, u32 usr_stride, u32 min_stride, u32 obj_size, void *out) +{ + if (usr_stride < min_stride) + return -EINVAL; + + return copy_struct_from_user(out, obj_size, u64_to_user_ptr(usr_ptr), usr_stride); +} + +int +pvr_set_uobj(u64 usr_ptr, u32 usr_stride, u32 min_stride, u32 obj_size, const void *in) +{ + if (usr_stride < min_stride) + return -EINVAL; + + if (copy_to_user(u64_to_user_ptr(usr_ptr), in, min_t(u32, usr_stride, obj_size))) + return -EFAULT; + + if (usr_stride > obj_size && + clear_user(u64_to_user_ptr(usr_ptr + obj_size), usr_stride - obj_size)) { + return -EFAULT; + } + + return 0; +} + +int +pvr_get_uobj_array(const struct drm_pvr_obj_array *in, u32 min_stride, u32 obj_size, void **out) +{ + int ret = 0; + void *out_alloc; + + if (in->stride < min_stride) + return -EINVAL; + + if (!in->count) + return 0; + + out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL); + if (!out_alloc) + return -ENOMEM; + + if (obj_size == in->stride) { + if (copy_from_user(out_alloc, u64_to_user_ptr(in->array), + (unsigned long)obj_size * in->count)) + ret = -EFAULT; + } else { + void __user *in_ptr = u64_to_user_ptr(in->array); + void *out_ptr = out_alloc; + + for (u32 i = 0; i < in->count; i++) { + ret = copy_struct_from_user(out_ptr, obj_size, in_ptr, in->stride); + if (ret) + break; + + out_ptr += obj_size; + in_ptr += in->stride; + } + } + + if (ret) { + kvfree(out_alloc); + return ret; + } + + *out = out_alloc; + return 0; +} + +int +pvr_set_uobj_array(const struct drm_pvr_obj_array *out, u32 min_stride, u32 obj_size, + const void *in) +{ + if (out->stride < min_stride) + return -EINVAL; + + if (!out->count) + return 0; + + if (obj_size == out->stride) { + if (copy_to_user(u64_to_user_ptr(out->array), in, + (unsigned long)obj_size * out->count)) + return -EFAULT; + } else { + u32 cpy_elem_size = min_t(u32, out->stride, obj_size); + void __user *out_ptr = u64_to_user_ptr(out->array); + const void *in_ptr = in; + + for (u32 i = 0; i < out->count; i++) { + if (copy_to_user(out_ptr, in_ptr, cpy_elem_size)) + return -EFAULT; + + out_ptr += obj_size; + in_ptr += out->stride; + } + + if (out->stride > obj_size && + clear_user(u64_to_user_ptr(out->array + obj_size), + out->stride - obj_size)) { + return -EFAULT; + } + } + + return 0; +} + +#define DRM_PVR_IOCTL(_name, _func, _flags) \ + DRM_IOCTL_DEF_DRV(PVR_##_name, pvr_ioctl_##_func, _flags) + +/* clang-format off */ + +static const struct drm_ioctl_desc pvr_drm_driver_ioctls[] = { + DRM_PVR_IOCTL(DEV_QUERY, dev_query, DRM_RENDER_ALLOW), + DRM_PVR_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW), + DRM_PVR_IOCTL(GET_BO_MMAP_OFFSET, get_bo_mmap_offset, DRM_RENDER_ALLOW), + DRM_PVR_IOCTL(CREATE_VM_CONTEXT, create_vm_context, DRM_RENDER_ALLOW), + DRM_PVR_IOCTL(DESTROY_VM_CONTEXT, destroy_vm_context, DRM_RENDER_ALLOW), + DRM_PVR_IOCTL(VM_MAP, vm_map, DRM_RENDER_ALLOW), + DRM_PVR_IOCTL(VM_UNMAP, vm_unmap, DRM_RENDER_ALLOW), + DRM_PVR_IOCTL(CREATE_CONTEXT, create_context, DRM_RENDER_ALLOW), + DRM_PVR_IOCTL(DESTROY_CONTEXT, destroy_context, DRM_RENDER_ALLOW), + DRM_PVR_IOCTL(CREATE_FREE_LIST, create_free_list, DRM_RENDER_ALLOW), + DRM_PVR_IOCTL(DESTROY_FREE_LIST, destroy_free_list, DRM_RENDER_ALLOW), + DRM_PVR_IOCTL(CREATE_HWRT_DATASET, create_hwrt_dataset, DRM_RENDER_ALLOW), + DRM_PVR_IOCTL(DESTROY_HWRT_DATASET, destroy_hwrt_dataset, DRM_RENDER_ALLOW), + DRM_PVR_IOCTL(SUBMIT_JOBS, submit_jobs, DRM_RENDER_ALLOW), +}; + +/* clang-format on */ + +#undef DRM_PVR_IOCTL + +/** + * pvr_drm_driver_open() - Driver callback when a new &struct drm_file is opened + * @drm_dev: [IN] DRM device. + * @file: [IN] DRM file private data. + * + * Allocates powervr-specific file private data (&struct pvr_file). + * + * Registered in &pvr_drm_driver. + * + * Return: + * * 0 on success, + * * -%ENOMEM if the allocation of a &struct ipvr_file fails, or + * * Any error returned by pvr_memory_context_init(). + */ +static int +pvr_drm_driver_open(struct drm_device *drm_dev, struct drm_file *file) +{ + struct pvr_device *pvr_dev = to_pvr_device(drm_dev); + struct pvr_file *pvr_file; + + pvr_file = kzalloc(sizeof(*pvr_file), GFP_KERNEL); + if (!pvr_file) + return -ENOMEM; + + /* + * Store reference to base DRM file private data for use by + * from_pvr_file. + */ + pvr_file->file = file; + + /* + * Store reference to powervr-specific outer device struct in file + * private data for convenient access. + */ + pvr_file->pvr_dev = pvr_dev; + + xa_init_flags(&pvr_file->ctx_handles, XA_FLAGS_ALLOC1); + xa_init_flags(&pvr_file->free_list_handles, XA_FLAGS_ALLOC1); + xa_init_flags(&pvr_file->hwrt_handles, XA_FLAGS_ALLOC1); + xa_init_flags(&pvr_file->vm_ctx_handles, XA_FLAGS_ALLOC1); + + /* + * Store reference to powervr-specific file private data in DRM file + * private data. + */ + file->driver_priv = pvr_file; + + return 0; +} + +/** + * pvr_drm_driver_postclose() - One of the driver callbacks when a &struct + * drm_file is closed. + * @drm_dev: [IN] DRM device (unused). + * @file: [IN] DRM file private data. + * + * Frees powervr-specific file private data (&struct pvr_file). + * + * Registered in &pvr_drm_driver. + */ +static void +pvr_drm_driver_postclose(__always_unused struct drm_device *drm_dev, + struct drm_file *file) +{ + struct pvr_file *pvr_file = to_pvr_file(file); + + /* Kill remaining contexts. */ + pvr_destroy_contexts_for_file(pvr_file); + + /* Drop references on any remaining objects. */ + pvr_destroy_free_lists_for_file(pvr_file); + pvr_destroy_hwrt_datasets_for_file(pvr_file); + pvr_destroy_vm_contexts_for_file(pvr_file); + + kfree(pvr_file); + file->driver_priv = NULL; +} + +DEFINE_DRM_GEM_FOPS(pvr_drm_driver_fops); + +static struct drm_driver pvr_drm_driver = { + .driver_features = DRIVER_GEM | DRIVER_GEM_GPUVA | DRIVER_RENDER | + DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE, + .open = pvr_drm_driver_open, + .postclose = pvr_drm_driver_postclose, + .ioctls = pvr_drm_driver_ioctls, + .num_ioctls = ARRAY_SIZE(pvr_drm_driver_ioctls), + .fops = &pvr_drm_driver_fops, +#if defined(CONFIG_DEBUG_FS) + .debugfs_init = pvr_debugfs_init, +#endif + + .name = PVR_DRIVER_NAME, + .desc = PVR_DRIVER_DESC, + .date = PVR_DRIVER_DATE, + .major = PVR_DRIVER_MAJOR, + .minor = PVR_DRIVER_MINOR, + .patchlevel = PVR_DRIVER_PATCHLEVEL, + + .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, + .gem_create_object = pvr_gem_create_object, +}; + +static int +pvr_probe(struct platform_device *plat_dev) +{ + struct pvr_device *pvr_dev; + struct drm_device *drm_dev; + int err; + + pvr_dev = devm_drm_dev_alloc(&plat_dev->dev, &pvr_drm_driver, + struct pvr_device, base); + if (IS_ERR(pvr_dev)) + return PTR_ERR(pvr_dev); + + drm_dev = &pvr_dev->base; + + platform_set_drvdata(plat_dev, drm_dev); + + init_rwsem(&pvr_dev->reset_sem); + + pvr_context_device_init(pvr_dev); + + err = pvr_queue_device_init(pvr_dev); + if (err) + goto err_context_fini; + + devm_pm_runtime_enable(&plat_dev->dev); + pm_runtime_mark_last_busy(&plat_dev->dev); + + pm_runtime_set_autosuspend_delay(&plat_dev->dev, 50); + pm_runtime_use_autosuspend(&plat_dev->dev); + pvr_watchdog_init(pvr_dev); + + err = pvr_device_init(pvr_dev); + if (err) + goto err_watchdog_fini; + + err = drm_dev_register(drm_dev, 0); + if (err) + goto err_device_fini; + + xa_init_flags(&pvr_dev->free_list_ids, XA_FLAGS_ALLOC1); + xa_init_flags(&pvr_dev->job_ids, XA_FLAGS_ALLOC1); + + return 0; + +err_device_fini: + pvr_device_fini(pvr_dev); + +err_watchdog_fini: + pvr_watchdog_fini(pvr_dev); + + pvr_queue_device_fini(pvr_dev); + +err_context_fini: + pvr_context_device_fini(pvr_dev); + + return err; +} + +static int +pvr_remove(struct platform_device *plat_dev) +{ + struct drm_device *drm_dev = platform_get_drvdata(plat_dev); + struct pvr_device *pvr_dev = to_pvr_device(drm_dev); + + WARN_ON(!xa_empty(&pvr_dev->job_ids)); + WARN_ON(!xa_empty(&pvr_dev->free_list_ids)); + + xa_destroy(&pvr_dev->job_ids); + xa_destroy(&pvr_dev->free_list_ids); + + pm_runtime_suspend(drm_dev->dev); + pvr_device_fini(pvr_dev); + drm_dev_unplug(drm_dev); + pvr_watchdog_fini(pvr_dev); + pvr_queue_device_fini(pvr_dev); + pvr_context_device_fini(pvr_dev); + + return 0; +} + +static const struct of_device_id dt_match[] = { + { .compatible = "img,img-axe", .data = NULL }, + {} +}; +MODULE_DEVICE_TABLE(of, dt_match); + +static const struct dev_pm_ops pvr_pm_ops = { + RUNTIME_PM_OPS(pvr_power_device_suspend, pvr_power_device_resume, pvr_power_device_idle) +}; + +static struct platform_driver pvr_driver = { + .probe = pvr_probe, + .remove = pvr_remove, + .driver = { + .name = PVR_DRIVER_NAME, + .pm = &pvr_pm_ops, + .of_match_table = dt_match, + }, +}; +module_platform_driver(pvr_driver); + +MODULE_AUTHOR("Imagination Technologies Ltd."); +MODULE_DESCRIPTION(PVR_DRIVER_DESC); +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_IMPORT_NS(DMA_BUF); +MODULE_FIRMWARE("powervr/rogue_33.15.11.3_v1.fw"); diff --git a/drivers/gpu/drm/imagination/pvr_drv.h b/drivers/gpu/drm/imagination/pvr_drv.h new file mode 100644 index 0000000000..378fe477b7 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_drv.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_DRV_H +#define PVR_DRV_H + +#include "linux/compiler_attributes.h" +#include + +#define PVR_DRIVER_NAME "powervr" +#define PVR_DRIVER_DESC "Imagination PowerVR (Series 6 and later) & IMG Graphics" +#define PVR_DRIVER_DATE "20230904" + +/* + * Driver interface version: + * - 1.0: Initial interface + */ +#define PVR_DRIVER_MAJOR 1 +#define PVR_DRIVER_MINOR 0 +#define PVR_DRIVER_PATCHLEVEL 0 + +int pvr_get_uobj(u64 usr_ptr, u32 usr_size, u32 min_size, u32 obj_size, void *out); +int pvr_set_uobj(u64 usr_ptr, u32 usr_size, u32 min_size, u32 obj_size, const void *in); +int pvr_get_uobj_array(const struct drm_pvr_obj_array *in, u32 min_stride, u32 obj_size, + void **out); +int pvr_set_uobj_array(const struct drm_pvr_obj_array *out, u32 min_stride, u32 obj_size, + const void *in); + +#define PVR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field) \ + (offsetof(_typename, _last_mandatory_field) + \ + sizeof(((_typename *)NULL)->_last_mandatory_field)) + +/* NOLINTBEGIN(bugprone-macro-parentheses) */ +#define PVR_UOBJ_DECL(_typename, _last_mandatory_field) \ + , _typename : PVR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field) +/* NOLINTEND(bugprone-macro-parentheses) */ + +/** + * DOC: PVR user objects. + * + * Macros used to aid copying structured and array data to and from + * userspace. Objects can differ in size, provided the minimum size + * allowed is specified (using the last mandatory field in the struct). + * All types used with PVR_UOBJ_GET/SET macros must be listed here under + * PVR_UOBJ_MIN_SIZE, with the last mandatory struct field specified. + */ + +/** + * PVR_UOBJ_MIN_SIZE() - Fetch the minimum copy size of a compatible type object. + * @_obj_name: The name of the object. Cannot be a typename - this is deduced. + * + * This cannot fail. Using the macro with an incompatible type will result in a + * compiler error. + * + * To add compatibility for a type, list it within the macro in an orderly + * fashion. The second argument is the name of the last mandatory field of the + * struct type, which is used to calculate the size. See also PVR_UOBJ_DECL(). + * + * Return: The minimum copy size. + */ +#define PVR_UOBJ_MIN_SIZE(_obj_name) _Generic(_obj_name \ + PVR_UOBJ_DECL(struct drm_pvr_job, hwrt) \ + PVR_UOBJ_DECL(struct drm_pvr_sync_op, value) \ + PVR_UOBJ_DECL(struct drm_pvr_dev_query_gpu_info, num_phantoms) \ + PVR_UOBJ_DECL(struct drm_pvr_dev_query_runtime_info, cdm_max_local_mem_size_regs) \ + PVR_UOBJ_DECL(struct drm_pvr_dev_query_quirks, _padding_c) \ + PVR_UOBJ_DECL(struct drm_pvr_dev_query_enhancements, _padding_c) \ + PVR_UOBJ_DECL(struct drm_pvr_heap, page_size_log2) \ + PVR_UOBJ_DECL(struct drm_pvr_dev_query_heap_info, heaps) \ + PVR_UOBJ_DECL(struct drm_pvr_static_data_area, offset) \ + PVR_UOBJ_DECL(struct drm_pvr_dev_query_static_data_areas, static_data_areas) \ + ) + +/** + * PVR_UOBJ_GET() - Copies from _src_usr_ptr to &_dest_obj. + * @_dest_obj: The destination container object in kernel space. + * @_usr_size: The size of the source container in user space. + * @_src_usr_ptr: __u64 raw pointer to the source container in user space. + * + * Return: Error code. See pvr_get_uobj(). + */ +#define PVR_UOBJ_GET(_dest_obj, _usr_size, _src_usr_ptr) \ + pvr_get_uobj(_src_usr_ptr, _usr_size, \ + PVR_UOBJ_MIN_SIZE(_dest_obj), \ + sizeof(_dest_obj), &(_dest_obj)) + +/** + * PVR_UOBJ_SET() - Copies from &_src_obj to _dest_usr_ptr. + * @_dest_usr_ptr: __u64 raw pointer to the destination container in user space. + * @_usr_size: The size of the destination container in user space. + * @_src_obj: The source container object in kernel space. + * + * Return: Error code. See pvr_set_uobj(). + */ +#define PVR_UOBJ_SET(_dest_usr_ptr, _usr_size, _src_obj) \ + pvr_set_uobj(_dest_usr_ptr, _usr_size, \ + PVR_UOBJ_MIN_SIZE(_src_obj), \ + sizeof(_src_obj), &(_src_obj)) + +/** + * PVR_UOBJ_GET_ARRAY() - Copies from @_src_drm_pvr_obj_array.array to + * alloced memory and returns a pointer in _dest_array. + * @_dest_array: The destination C array object in kernel space. + * @_src_drm_pvr_obj_array: The &struct drm_pvr_obj_array containing a __u64 raw + * pointer to the source C array in user space and the size of each array + * element in user space (the 'stride'). + * + * Return: Error code. See pvr_get_uobj_array(). + */ +#define PVR_UOBJ_GET_ARRAY(_dest_array, _src_drm_pvr_obj_array) \ + pvr_get_uobj_array(_src_drm_pvr_obj_array, \ + PVR_UOBJ_MIN_SIZE((_dest_array)[0]), \ + sizeof((_dest_array)[0]), (void **)&(_dest_array)) + +/** + * PVR_UOBJ_SET_ARRAY() - Copies from _src_array to @_dest_drm_pvr_obj_array.array. + * @_dest_drm_pvr_obj_array: The &struct drm_pvr_obj_array containing a __u64 raw + * pointer to the destination C array in user space and the size of each array + * element in user space (the 'stride'). + * @_src_array: The source C array object in kernel space. + * + * Return: Error code. See pvr_set_uobj_array(). + */ +#define PVR_UOBJ_SET_ARRAY(_dest_drm_pvr_obj_array, _src_array) \ + pvr_set_uobj_array(_dest_drm_pvr_obj_array, \ + PVR_UOBJ_MIN_SIZE((_src_array)[0]), \ + sizeof((_src_array)[0]), _src_array) + +#endif /* PVR_DRV_H */ diff --git a/drivers/gpu/drm/imagination/pvr_free_list.c b/drivers/gpu/drm/imagination/pvr_free_list.c new file mode 100644 index 0000000000..5e51bc9807 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_free_list.c @@ -0,0 +1,625 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_free_list.h" +#include "pvr_gem.h" +#include "pvr_hwrt.h" +#include "pvr_rogue_fwif.h" +#include "pvr_vm.h" + +#include +#include +#include +#include + +#define FREE_LIST_ENTRY_SIZE sizeof(u32) + +#define FREE_LIST_ALIGNMENT \ + ((ROGUE_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE / FREE_LIST_ENTRY_SIZE) - 1) + +#define FREE_LIST_MIN_PAGES 50 +#define FREE_LIST_MIN_PAGES_BRN66011 40 +#define FREE_LIST_MIN_PAGES_ROGUEXE 25 + +/** + * pvr_get_free_list_min_pages() - Get minimum free list size for this device + * @pvr_dev: Device pointer. + * + * Returns: + * * Minimum free list size, in PM physical pages. + */ +u32 +pvr_get_free_list_min_pages(struct pvr_device *pvr_dev) +{ + u32 value; + + if (PVR_HAS_FEATURE(pvr_dev, roguexe)) { + if (PVR_HAS_QUIRK(pvr_dev, 66011)) + value = FREE_LIST_MIN_PAGES_BRN66011; + else + value = FREE_LIST_MIN_PAGES_ROGUEXE; + } else { + value = FREE_LIST_MIN_PAGES; + } + + return value; +} + +static int +free_list_create_kernel_structure(struct pvr_file *pvr_file, + struct drm_pvr_ioctl_create_free_list_args *args, + struct pvr_free_list *free_list) +{ + struct pvr_gem_object *free_list_obj; + struct pvr_vm_context *vm_ctx; + u64 free_list_size; + int err; + + if (args->grow_threshold > 100 || + args->initial_num_pages > args->max_num_pages || + args->grow_num_pages > args->max_num_pages || + args->max_num_pages == 0 || + (args->initial_num_pages < args->max_num_pages && !args->grow_num_pages) || + (args->initial_num_pages == args->max_num_pages && args->grow_num_pages)) + return -EINVAL; + + if ((args->initial_num_pages & FREE_LIST_ALIGNMENT) || + (args->max_num_pages & FREE_LIST_ALIGNMENT) || + (args->grow_num_pages & FREE_LIST_ALIGNMENT)) + return -EINVAL; + + vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle); + if (!vm_ctx) + return -EINVAL; + + free_list_obj = pvr_vm_find_gem_object(vm_ctx, args->free_list_gpu_addr, + NULL, &free_list_size); + if (!free_list_obj) { + err = -EINVAL; + goto err_put_vm_context; + } + + if ((free_list_obj->flags & DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS) || + !(free_list_obj->flags & DRM_PVR_BO_PM_FW_PROTECT) || + free_list_size < (args->max_num_pages * FREE_LIST_ENTRY_SIZE)) { + err = -EINVAL; + goto err_put_free_list_obj; + } + + free_list->pvr_dev = pvr_file->pvr_dev; + free_list->current_pages = 0; + free_list->max_pages = args->max_num_pages; + free_list->grow_pages = args->grow_num_pages; + free_list->grow_threshold = args->grow_threshold; + free_list->obj = free_list_obj; + free_list->free_list_gpu_addr = args->free_list_gpu_addr; + free_list->initial_num_pages = args->initial_num_pages; + + pvr_vm_context_put(vm_ctx); + + return 0; + +err_put_free_list_obj: + pvr_gem_object_put(free_list_obj); + +err_put_vm_context: + pvr_vm_context_put(vm_ctx); + + return err; +} + +static void +free_list_destroy_kernel_structure(struct pvr_free_list *free_list) +{ + WARN_ON(!list_empty(&free_list->hwrt_list)); + + pvr_gem_object_put(free_list->obj); +} + +/** + * calculate_free_list_ready_pages_locked() - Function to work out the number of free + * list pages to reserve for growing within + * the FW without having to wait for the + * host to progress a grow request + * @free_list: Pointer to free list. + * @pages: Total pages currently in free list. + * + * If the threshold or grow size means less than the alignment size (4 pages on + * Rogue), then the feature is not used. + * + * Caller must hold &free_list->lock. + * + * Return: number of pages to reserve. + */ +static u32 +calculate_free_list_ready_pages_locked(struct pvr_free_list *free_list, u32 pages) +{ + u32 ready_pages; + + lockdep_assert_held(&free_list->lock); + + ready_pages = ((pages * free_list->grow_threshold) / 100); + + /* The number of pages must be less than the grow size. */ + ready_pages = min(ready_pages, free_list->grow_pages); + + /* + * The number of pages must be a multiple of the free list align size. + */ + ready_pages &= ~FREE_LIST_ALIGNMENT; + + return ready_pages; +} + +static u32 +calculate_free_list_ready_pages(struct pvr_free_list *free_list, u32 pages) +{ + u32 ret; + + mutex_lock(&free_list->lock); + + ret = calculate_free_list_ready_pages_locked(free_list, pages); + + mutex_unlock(&free_list->lock); + + return ret; +} + +static void +free_list_fw_init(void *cpu_ptr, void *priv) +{ + struct rogue_fwif_freelist *fw_data = cpu_ptr; + struct pvr_free_list *free_list = priv; + u32 ready_pages; + + /* Fill out FW structure */ + ready_pages = calculate_free_list_ready_pages(free_list, + free_list->initial_num_pages); + + fw_data->max_pages = free_list->max_pages; + fw_data->current_pages = free_list->initial_num_pages - ready_pages; + fw_data->grow_pages = free_list->grow_pages; + fw_data->ready_pages = ready_pages; + fw_data->freelist_id = free_list->fw_id; + fw_data->grow_pending = false; + fw_data->current_stack_top = fw_data->current_pages - 1; + fw_data->freelist_dev_addr = free_list->free_list_gpu_addr; + fw_data->current_dev_addr = (fw_data->freelist_dev_addr + + ((fw_data->max_pages - fw_data->current_pages) * + FREE_LIST_ENTRY_SIZE)) & + ~((u64)ROGUE_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE - 1); +} + +static int +free_list_create_fw_structure(struct pvr_file *pvr_file, + struct drm_pvr_ioctl_create_free_list_args *args, + struct pvr_free_list *free_list) +{ + struct pvr_device *pvr_dev = pvr_file->pvr_dev; + + /* + * Create and map the FW structure so we can initialise it. This is not + * accessed on the CPU side post-initialisation so the mapping lifetime + * is only for this function. + */ + free_list->fw_data = pvr_fw_object_create_and_map(pvr_dev, sizeof(*free_list->fw_data), + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + free_list_fw_init, free_list, + &free_list->fw_obj); + if (IS_ERR(free_list->fw_data)) + return PTR_ERR(free_list->fw_data); + + return 0; +} + +static void +free_list_destroy_fw_structure(struct pvr_free_list *free_list) +{ + pvr_fw_object_unmap_and_destroy(free_list->fw_obj); +} + +static int +pvr_free_list_insert_pages_locked(struct pvr_free_list *free_list, + struct sg_table *sgt, u32 offset, u32 num_pages) +{ + struct sg_dma_page_iter dma_iter; + u32 *page_list; + + lockdep_assert_held(&free_list->lock); + + page_list = pvr_gem_object_vmap(free_list->obj); + if (IS_ERR(page_list)) + return PTR_ERR(page_list); + + offset /= FREE_LIST_ENTRY_SIZE; + /* clang-format off */ + for_each_sgtable_dma_page(sgt, &dma_iter, 0) { + dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter); + u64 dma_pfn = dma_addr >> + ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; + u32 dma_addr_offset; + + BUILD_BUG_ON(ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE > PAGE_SIZE); + + for (dma_addr_offset = 0; dma_addr_offset < PAGE_SIZE; + dma_addr_offset += ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE) { + WARN_ON_ONCE(dma_pfn >> 32); + + page_list[offset++] = (u32)dma_pfn; + dma_pfn++; + + num_pages--; + if (!num_pages) + break; + } + + if (!num_pages) + break; + } + /* clang-format on */ + + /* Make sure our free_list update is flushed. */ + wmb(); + + pvr_gem_object_vunmap(free_list->obj); + + return 0; +} + +static int +pvr_free_list_insert_node_locked(struct pvr_free_list_node *free_list_node) +{ + struct pvr_free_list *free_list = free_list_node->free_list; + struct sg_table *sgt; + u32 start_page; + u32 offset; + int err; + + lockdep_assert_held(&free_list->lock); + + start_page = free_list->max_pages - free_list->current_pages - + free_list_node->num_pages; + offset = (start_page * FREE_LIST_ENTRY_SIZE) & + ~((u64)ROGUE_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE - 1); + + sgt = drm_gem_shmem_get_pages_sgt(&free_list_node->mem_obj->base); + if (WARN_ON(IS_ERR(sgt))) + return PTR_ERR(sgt); + + err = pvr_free_list_insert_pages_locked(free_list, sgt, + offset, free_list_node->num_pages); + if (!err) + free_list->current_pages += free_list_node->num_pages; + + return err; +} + +static int +pvr_free_list_grow(struct pvr_free_list *free_list, u32 num_pages) +{ + struct pvr_device *pvr_dev = free_list->pvr_dev; + struct pvr_free_list_node *free_list_node; + int err; + + mutex_lock(&free_list->lock); + + if (num_pages & FREE_LIST_ALIGNMENT) { + err = -EINVAL; + goto err_unlock; + } + + free_list_node = kzalloc(sizeof(*free_list_node), GFP_KERNEL); + if (!free_list_node) { + err = -ENOMEM; + goto err_unlock; + } + + free_list_node->num_pages = num_pages; + free_list_node->free_list = free_list; + + free_list_node->mem_obj = pvr_gem_object_create(pvr_dev, + num_pages << + ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, + PVR_BO_FW_FLAGS_DEVICE_CACHED); + if (IS_ERR(free_list_node->mem_obj)) { + err = PTR_ERR(free_list_node->mem_obj); + goto err_free; + } + + err = pvr_free_list_insert_node_locked(free_list_node); + if (err) + goto err_destroy_gem_object; + + list_add_tail(&free_list_node->node, &free_list->mem_block_list); + + /* + * Reserve a number ready pages to allow the FW to process OOM quickly + * and asynchronously request a grow. + */ + free_list->ready_pages = + calculate_free_list_ready_pages_locked(free_list, + free_list->current_pages); + free_list->current_pages -= free_list->ready_pages; + + mutex_unlock(&free_list->lock); + + return 0; + +err_destroy_gem_object: + pvr_gem_object_put(free_list_node->mem_obj); + +err_free: + kfree(free_list_node); + +err_unlock: + mutex_unlock(&free_list->lock); + + return err; +} + +void pvr_free_list_process_grow_req(struct pvr_device *pvr_dev, + struct rogue_fwif_fwccb_cmd_freelist_gs_data *req) +{ + struct pvr_free_list *free_list = pvr_free_list_lookup_id(pvr_dev, req->freelist_id); + struct rogue_fwif_kccb_cmd resp_cmd = { + .cmd_type = ROGUE_FWIF_KCCB_CMD_FREELIST_GROW_UPDATE, + }; + struct rogue_fwif_freelist_gs_data *resp = &resp_cmd.cmd_data.free_list_gs_data; + u32 grow_pages = 0; + + /* If we don't have a freelist registered for this ID, we can't do much. */ + if (WARN_ON(!free_list)) + return; + + /* Since the FW made the request, it has already consumed the ready pages, + * update the host struct. + */ + free_list->current_pages += free_list->ready_pages; + free_list->ready_pages = 0; + + /* If the grow succeeds, update the grow_pages argument. */ + if (!pvr_free_list_grow(free_list, free_list->grow_pages)) + grow_pages = free_list->grow_pages; + + /* Now prepare the response and send it back to the FW. */ + pvr_fw_object_get_fw_addr(free_list->fw_obj, &resp->freelist_fw_addr); + resp->delta_pages = grow_pages; + resp->new_pages = free_list->current_pages + free_list->ready_pages; + resp->ready_pages = free_list->ready_pages; + pvr_free_list_put(free_list); + + WARN_ON(pvr_kccb_send_cmd(pvr_dev, &resp_cmd, NULL)); +} + +static void +pvr_free_list_free_node(struct pvr_free_list_node *free_list_node) +{ + pvr_gem_object_put(free_list_node->mem_obj); + + kfree(free_list_node); +} + +/** + * pvr_free_list_create() - Create a new free list and return an object pointer + * @pvr_file: Pointer to pvr_file structure. + * @args: Creation arguments from userspace. + * + * Return: + * * Pointer to new free_list, or + * * ERR_PTR(-%ENOMEM) on out of memory. + */ +struct pvr_free_list * +pvr_free_list_create(struct pvr_file *pvr_file, + struct drm_pvr_ioctl_create_free_list_args *args) +{ + struct pvr_free_list *free_list; + int err; + + /* Create and fill out the kernel structure */ + free_list = kzalloc(sizeof(*free_list), GFP_KERNEL); + + if (!free_list) + return ERR_PTR(-ENOMEM); + + kref_init(&free_list->ref_count); + INIT_LIST_HEAD(&free_list->mem_block_list); + INIT_LIST_HEAD(&free_list->hwrt_list); + mutex_init(&free_list->lock); + + err = free_list_create_kernel_structure(pvr_file, args, free_list); + if (err < 0) + goto err_free; + + /* Allocate global object ID for firmware. */ + err = xa_alloc(&pvr_file->pvr_dev->free_list_ids, + &free_list->fw_id, + free_list, + xa_limit_32b, + GFP_KERNEL); + if (err) + goto err_destroy_kernel_structure; + + err = free_list_create_fw_structure(pvr_file, args, free_list); + if (err < 0) + goto err_free_fw_id; + + err = pvr_free_list_grow(free_list, args->initial_num_pages); + if (err < 0) + goto err_fw_struct_cleanup; + + return free_list; + +err_fw_struct_cleanup: + WARN_ON(pvr_fw_structure_cleanup(free_list->pvr_dev, + ROGUE_FWIF_CLEANUP_FREELIST, + free_list->fw_obj, 0)); + +err_free_fw_id: + xa_erase(&free_list->pvr_dev->free_list_ids, free_list->fw_id); + +err_destroy_kernel_structure: + free_list_destroy_kernel_structure(free_list); + +err_free: + mutex_destroy(&free_list->lock); + kfree(free_list); + + return ERR_PTR(err); +} + +static void +pvr_free_list_release(struct kref *ref_count) +{ + struct pvr_free_list *free_list = + container_of(ref_count, struct pvr_free_list, ref_count); + struct list_head *pos, *n; + int err; + + xa_erase(&free_list->pvr_dev->free_list_ids, free_list->fw_id); + + err = pvr_fw_structure_cleanup(free_list->pvr_dev, + ROGUE_FWIF_CLEANUP_FREELIST, + free_list->fw_obj, 0); + if (err == -EBUSY) { + /* Flush the FWCCB to process any HWR or freelist reconstruction + * request that might keep the freelist busy, and try again. + */ + pvr_fwccb_process(free_list->pvr_dev); + err = pvr_fw_structure_cleanup(free_list->pvr_dev, + ROGUE_FWIF_CLEANUP_FREELIST, + free_list->fw_obj, 0); + } + + WARN_ON(err); + + /* clang-format off */ + list_for_each_safe(pos, n, &free_list->mem_block_list) { + struct pvr_free_list_node *free_list_node = + container_of(pos, struct pvr_free_list_node, node); + + list_del(pos); + pvr_free_list_free_node(free_list_node); + } + /* clang-format on */ + + free_list_destroy_kernel_structure(free_list); + free_list_destroy_fw_structure(free_list); + mutex_destroy(&free_list->lock); + kfree(free_list); +} + +/** + * pvr_destroy_free_lists_for_file: Destroy any free lists associated with the + * given file. + * @pvr_file: Pointer to pvr_file structure. + * + * Removes all free lists associated with @pvr_file from the device free_list + * list and drops initial references. Free lists will then be destroyed once + * all outstanding references are dropped. + */ +void pvr_destroy_free_lists_for_file(struct pvr_file *pvr_file) +{ + struct pvr_free_list *free_list; + unsigned long handle; + + xa_for_each(&pvr_file->free_list_handles, handle, free_list) { + (void)free_list; + pvr_free_list_put(xa_erase(&pvr_file->free_list_handles, handle)); + } +} + +/** + * pvr_free_list_put() - Release reference on free list + * @free_list: Pointer to list to release reference on + */ +void +pvr_free_list_put(struct pvr_free_list *free_list) +{ + if (free_list) + kref_put(&free_list->ref_count, pvr_free_list_release); +} + +void pvr_free_list_add_hwrt(struct pvr_free_list *free_list, struct pvr_hwrt_data *hwrt_data) +{ + mutex_lock(&free_list->lock); + + list_add_tail(&hwrt_data->freelist_node, &free_list->hwrt_list); + + mutex_unlock(&free_list->lock); +} + +void pvr_free_list_remove_hwrt(struct pvr_free_list *free_list, struct pvr_hwrt_data *hwrt_data) +{ + mutex_lock(&free_list->lock); + + list_del(&hwrt_data->freelist_node); + + mutex_unlock(&free_list->lock); +} + +static void +pvr_free_list_reconstruct(struct pvr_device *pvr_dev, u32 freelist_id) +{ + struct pvr_free_list *free_list = pvr_free_list_lookup_id(pvr_dev, freelist_id); + struct pvr_free_list_node *free_list_node; + struct rogue_fwif_freelist *fw_data; + struct pvr_hwrt_data *hwrt_data; + + if (!free_list) + return; + + mutex_lock(&free_list->lock); + + /* Rebuild the free list based on the memory block list. */ + free_list->current_pages = 0; + + list_for_each_entry(free_list_node, &free_list->mem_block_list, node) + WARN_ON(pvr_free_list_insert_node_locked(free_list_node)); + + /* + * Remove the ready pages, which are reserved to allow the FW to process OOM quickly and + * asynchronously request a grow. + */ + free_list->current_pages -= free_list->ready_pages; + + fw_data = free_list->fw_data; + fw_data->current_stack_top = fw_data->current_pages - 1; + fw_data->allocated_page_count = 0; + fw_data->allocated_mmu_page_count = 0; + + /* Reset the state of any associated HWRTs. */ + list_for_each_entry(hwrt_data, &free_list->hwrt_list, freelist_node) { + struct rogue_fwif_hwrtdata *hwrt_fw_data = pvr_fw_object_vmap(hwrt_data->fw_obj); + + if (!WARN_ON(IS_ERR(hwrt_fw_data))) { + hwrt_fw_data->state = ROGUE_FWIF_RTDATA_STATE_HWR; + hwrt_fw_data->hwrt_data_flags &= ~HWRTDATA_HAS_LAST_GEOM; + } + + pvr_fw_object_vunmap(hwrt_data->fw_obj); + } + + mutex_unlock(&free_list->lock); + + pvr_free_list_put(free_list); +} + +void +pvr_free_list_process_reconstruct_req(struct pvr_device *pvr_dev, + struct rogue_fwif_fwccb_cmd_freelists_reconstruction_data *req) +{ + struct rogue_fwif_kccb_cmd resp_cmd = { + .cmd_type = ROGUE_FWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE, + }; + struct rogue_fwif_freelists_reconstruction_data *resp = + &resp_cmd.cmd_data.free_lists_reconstruction_data; + + for (u32 i = 0; i < req->freelist_count; i++) + pvr_free_list_reconstruct(pvr_dev, req->freelist_ids[i]); + + resp->freelist_count = req->freelist_count; + memcpy(resp->freelist_ids, req->freelist_ids, + req->freelist_count * sizeof(resp->freelist_ids[0])); + + WARN_ON(pvr_kccb_send_cmd(pvr_dev, &resp_cmd, NULL)); +} diff --git a/drivers/gpu/drm/imagination/pvr_free_list.h b/drivers/gpu/drm/imagination/pvr_free_list.h new file mode 100644 index 0000000000..bfb4f5fc62 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_free_list.h @@ -0,0 +1,195 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_FREE_LIST_H +#define PVR_FREE_LIST_H + +#include +#include +#include +#include +#include +#include +#include + +#include "pvr_device.h" + +/* Forward declaration from pvr_gem.h. */ +struct pvr_fw_object; + +/* Forward declaration from pvr_gem.h. */ +struct pvr_gem_object; + +/* Forward declaration from pvr_hwrt.h. */ +struct pvr_hwrt_data; + +/** + * struct pvr_free_list_node - structure representing an allocation in the free + * list + */ +struct pvr_free_list_node { + /** @node: List node for &pvr_free_list.mem_block_list. */ + struct list_head node; + + /** @free_list: Pointer to owning free list. */ + struct pvr_free_list *free_list; + + /** @num_pages: Number of pages in this node. */ + u32 num_pages; + + /** @mem_obj: GEM object representing the pages in this node. */ + struct pvr_gem_object *mem_obj; +}; + +/** + * struct pvr_free_list - structure representing a free list + */ +struct pvr_free_list { + /** @ref_count: Reference count of object. */ + struct kref ref_count; + + /** @pvr_dev: Pointer to device that owns this object. */ + struct pvr_device *pvr_dev; + + /** @obj: GEM object representing the free list. */ + struct pvr_gem_object *obj; + + /** @fw_obj: FW object representing the FW-side structure. */ + struct pvr_fw_object *fw_obj; + + /** @fw_data: Pointer to CPU mapping of the FW-side structure. */ + struct rogue_fwif_freelist *fw_data; + + /** + * @lock: Mutex protecting modification of the free list. Must be held when accessing any + * of the members below. + */ + struct mutex lock; + + /** @fw_id: Firmware ID for this object. */ + u32 fw_id; + + /** @current_pages: Current number of pages in free list. */ + u32 current_pages; + + /** @max_pages: Maximum number of pages in free list. */ + u32 max_pages; + + /** @grow_pages: Pages to grow free list by per request. */ + u32 grow_pages; + + /** + * @grow_threshold: Percentage of FL memory used that should trigger a + * new grow request. + */ + u32 grow_threshold; + + /** + * @ready_pages: Number of pages reserved for FW to use while a grow + * request is being processed. + */ + u32 ready_pages; + + /** @mem_block_list: List of memory blocks in this free list. */ + struct list_head mem_block_list; + + /** @hwrt_list: List of HWRTs using this free list. */ + struct list_head hwrt_list; + + /** @initial_num_pages: Initial number of pages in free list. */ + u32 initial_num_pages; + + /** @free_list_gpu_addr: Address of free list in GPU address space. */ + u64 free_list_gpu_addr; +}; + +struct pvr_free_list * +pvr_free_list_create(struct pvr_file *pvr_file, + struct drm_pvr_ioctl_create_free_list_args *args); + +void +pvr_destroy_free_lists_for_file(struct pvr_file *pvr_file); + +u32 +pvr_get_free_list_min_pages(struct pvr_device *pvr_dev); + +static __always_inline struct pvr_free_list * +pvr_free_list_get(struct pvr_free_list *free_list) +{ + if (free_list) + kref_get(&free_list->ref_count); + + return free_list; +} + +/** + * pvr_free_list_lookup() - Lookup free list pointer from handle and file + * @pvr_file: Pointer to pvr_file structure. + * @handle: Object handle. + * + * Takes reference on free list object. Call pvr_free_list_put() to release. + * + * Returns: + * * The requested object on success, or + * * %NULL on failure (object does not exist in list, is not a free list, or + * does not belong to @pvr_file) + */ +static __always_inline struct pvr_free_list * +pvr_free_list_lookup(struct pvr_file *pvr_file, u32 handle) +{ + struct pvr_free_list *free_list; + + xa_lock(&pvr_file->free_list_handles); + free_list = pvr_free_list_get(xa_load(&pvr_file->free_list_handles, handle)); + xa_unlock(&pvr_file->free_list_handles); + + return free_list; +} + +/** + * pvr_free_list_lookup_id() - Lookup free list pointer from FW ID + * @pvr_dev: Device pointer. + * @id: FW object ID. + * + * Takes reference on free list object. Call pvr_free_list_put() to release. + * + * Returns: + * * The requested object on success, or + * * %NULL on failure (object does not exist in list, or is not a free list) + */ +static __always_inline struct pvr_free_list * +pvr_free_list_lookup_id(struct pvr_device *pvr_dev, u32 id) +{ + struct pvr_free_list *free_list; + + xa_lock(&pvr_dev->free_list_ids); + + /* Contexts are removed from the ctx_ids set in the context release path, + * meaning the ref_count reached zero before they get removed. We need + * to make sure we're not trying to acquire a context that's being + * destroyed. + */ + free_list = xa_load(&pvr_dev->free_list_ids, id); + if (free_list && !kref_get_unless_zero(&free_list->ref_count)) + free_list = NULL; + xa_unlock(&pvr_dev->free_list_ids); + + return free_list; +} + +void +pvr_free_list_put(struct pvr_free_list *free_list); + +void +pvr_free_list_add_hwrt(struct pvr_free_list *free_list, struct pvr_hwrt_data *hwrt_data); +void +pvr_free_list_remove_hwrt(struct pvr_free_list *free_list, struct pvr_hwrt_data *hwrt_data); + +void pvr_free_list_process_grow_req(struct pvr_device *pvr_dev, + struct rogue_fwif_fwccb_cmd_freelist_gs_data *req); + +void +pvr_free_list_process_reconstruct_req(struct pvr_device *pvr_dev, + struct rogue_fwif_fwccb_cmd_freelists_reconstruction_data *req); + +#endif /* PVR_FREE_LIST_H */ diff --git a/drivers/gpu/drm/imagination/pvr_fw.c b/drivers/gpu/drm/imagination/pvr_fw.c new file mode 100644 index 0000000000..3debc9870a --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_fw.c @@ -0,0 +1,1489 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_ccb.h" +#include "pvr_device.h" +#include "pvr_device_info.h" +#include "pvr_fw.h" +#include "pvr_fw_info.h" +#include "pvr_fw_startstop.h" +#include "pvr_fw_trace.h" +#include "pvr_gem.h" +#include "pvr_power.h" +#include "pvr_rogue_fwif_dev_info.h" +#include "pvr_rogue_heap_config.h" +#include "pvr_vm.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#define FW_MAX_SUPPORTED_MAJOR_VERSION 1 + +#define FW_BOOT_TIMEOUT_USEC 5000000 + +/* Config heap occupies top 192k of the firmware heap. */ +#define PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY SZ_64K +#define PVR_ROGUE_FW_CONFIG_HEAP_SIZE (3 * PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY) + +/* Main firmware allocations should come from the remainder of the heap. */ +#define PVR_ROGUE_FW_MAIN_HEAP_BASE ROGUE_FW_HEAP_BASE + +/* Offsets from start of configuration area of FW heap. */ +#define PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET 0 +#define PVR_ROGUE_FWIF_OSINIT_OFFSET \ + (PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET + PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY) +#define PVR_ROGUE_FWIF_SYSINIT_OFFSET \ + (PVR_ROGUE_FWIF_OSINIT_OFFSET + PVR_ROGUE_FW_CONFIG_HEAP_GRANULARITY) + +#define PVR_ROGUE_FAULT_PAGE_SIZE SZ_4K + +#define PVR_SYNC_OBJ_SIZE sizeof(u32) + +const struct pvr_fw_layout_entry * +pvr_fw_find_layout_entry(struct pvr_device *pvr_dev, enum pvr_fw_section_id id) +{ + const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries; + u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num; + u32 entry; + + for (entry = 0; entry < num_layout_entries; entry++) { + if (layout_entries[entry].id == id) + return &layout_entries[entry]; + } + + return NULL; +} + +static const struct pvr_fw_layout_entry * +pvr_fw_find_private_data(struct pvr_device *pvr_dev) +{ + const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries; + u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num; + u32 entry; + + for (entry = 0; entry < num_layout_entries; entry++) { + if (layout_entries[entry].id == META_PRIVATE_DATA || + layout_entries[entry].id == MIPS_PRIVATE_DATA || + layout_entries[entry].id == RISCV_PRIVATE_DATA) + return &layout_entries[entry]; + } + + return NULL; +} + +#define DEV_INFO_MASK_SIZE(x) DIV_ROUND_UP(x, 64) + +/** + * pvr_fw_validate() - Parse firmware header and check compatibility + * @pvr_dev: Device pointer. + * + * Returns: + * * 0 on success, or + * * -EINVAL if firmware is incompatible. + */ +static int +pvr_fw_validate(struct pvr_device *pvr_dev) +{ + struct drm_device *drm_dev = from_pvr_device(pvr_dev); + const struct firmware *firmware = pvr_dev->fw_dev.firmware; + const struct pvr_fw_layout_entry *layout_entries; + const struct pvr_fw_info_header *header; + const u8 *fw = firmware->data; + u32 fw_offset = firmware->size - SZ_4K; + u32 layout_table_size; + u32 entry; + + if (firmware->size < SZ_4K || (firmware->size % FW_BLOCK_SIZE)) + return -EINVAL; + + header = (const struct pvr_fw_info_header *)&fw[fw_offset]; + + if (header->info_version != PVR_FW_INFO_VERSION) { + drm_err(drm_dev, "Unsupported fw info version %u\n", + header->info_version); + return -EINVAL; + } + + if (header->header_len != sizeof(struct pvr_fw_info_header) || + header->layout_entry_size != sizeof(struct pvr_fw_layout_entry) || + header->layout_entry_num > PVR_FW_INFO_MAX_NUM_ENTRIES) { + drm_err(drm_dev, "FW info format mismatch\n"); + return -EINVAL; + } + + if (!(header->flags & PVR_FW_FLAGS_OPEN_SOURCE) || + header->fw_version_major > FW_MAX_SUPPORTED_MAJOR_VERSION || + header->fw_version_major == 0) { + drm_err(drm_dev, "Unsupported FW version %u.%u (build: %u%s)\n", + header->fw_version_major, header->fw_version_minor, + header->fw_version_build, + (header->flags & PVR_FW_FLAGS_OPEN_SOURCE) ? " OS" : ""); + return -EINVAL; + } + + if (pvr_gpu_id_to_packed_bvnc(&pvr_dev->gpu_id) != header->bvnc) { + struct pvr_gpu_id fw_gpu_id; + + packed_bvnc_to_pvr_gpu_id(header->bvnc, &fw_gpu_id); + drm_err(drm_dev, "FW built for incorrect GPU ID %i.%i.%i.%i (expected %i.%i.%i.%i)\n", + fw_gpu_id.b, fw_gpu_id.v, fw_gpu_id.n, fw_gpu_id.c, + pvr_dev->gpu_id.b, pvr_dev->gpu_id.v, pvr_dev->gpu_id.n, pvr_dev->gpu_id.c); + return -EINVAL; + } + + fw_offset += header->header_len; + layout_table_size = + header->layout_entry_size * header->layout_entry_num; + if ((fw_offset + layout_table_size) > firmware->size) + return -EINVAL; + + layout_entries = (const struct pvr_fw_layout_entry *)&fw[fw_offset]; + for (entry = 0; entry < header->layout_entry_num; entry++) { + u32 start_addr = layout_entries[entry].base_addr; + u32 end_addr = start_addr + layout_entries[entry].alloc_size; + + if (start_addr >= end_addr) + return -EINVAL; + } + + fw_offset = (firmware->size - SZ_4K) - header->device_info_size; + + drm_info(drm_dev, "FW version v%u.%u (build %u OS)\n", header->fw_version_major, + header->fw_version_minor, header->fw_version_build); + + pvr_dev->fw_version.major = header->fw_version_major; + pvr_dev->fw_version.minor = header->fw_version_minor; + + pvr_dev->fw_dev.header = header; + pvr_dev->fw_dev.layout_entries = layout_entries; + + return 0; +} + +static int +pvr_fw_get_device_info(struct pvr_device *pvr_dev) +{ + const struct firmware *firmware = pvr_dev->fw_dev.firmware; + struct pvr_fw_device_info_header *header; + const u8 *fw = firmware->data; + const u64 *dev_info; + u32 fw_offset; + + fw_offset = (firmware->size - SZ_4K) - pvr_dev->fw_dev.header->device_info_size; + + header = (struct pvr_fw_device_info_header *)&fw[fw_offset]; + dev_info = (u64 *)(header + 1); + + pvr_device_info_set_quirks(pvr_dev, dev_info, header->brn_mask_size); + dev_info += header->brn_mask_size; + + pvr_device_info_set_enhancements(pvr_dev, dev_info, header->ern_mask_size); + dev_info += header->ern_mask_size; + + return pvr_device_info_set_features(pvr_dev, dev_info, header->feature_mask_size, + header->feature_param_size); +} + +static void +layout_get_sizes(struct pvr_device *pvr_dev) +{ + const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries; + u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num; + struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem; + + fw_mem->code_alloc_size = 0; + fw_mem->data_alloc_size = 0; + fw_mem->core_code_alloc_size = 0; + fw_mem->core_data_alloc_size = 0; + + /* Extract section sizes from FW layout table. */ + for (u32 entry = 0; entry < num_layout_entries; entry++) { + switch (layout_entries[entry].type) { + case FW_CODE: + fw_mem->code_alloc_size += layout_entries[entry].alloc_size; + break; + case FW_DATA: + fw_mem->data_alloc_size += layout_entries[entry].alloc_size; + break; + case FW_COREMEM_CODE: + fw_mem->core_code_alloc_size += + layout_entries[entry].alloc_size; + break; + case FW_COREMEM_DATA: + fw_mem->core_data_alloc_size += + layout_entries[entry].alloc_size; + break; + case NONE: + break; + } + } +} + +int +pvr_fw_find_mmu_segment(struct pvr_device *pvr_dev, u32 addr, u32 size, void *fw_code_ptr, + void *fw_data_ptr, void *fw_core_code_ptr, void *fw_core_data_ptr, + void **host_addr_out) +{ + const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries; + u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num; + u32 end_addr = addr + size; + int entry = 0; + + /* Ensure requested range is not zero, and size is not causing addr to overflow. */ + if (end_addr <= addr) + return -EINVAL; + + for (entry = 0; entry < num_layout_entries; entry++) { + u32 entry_start_addr = layout_entries[entry].base_addr; + u32 entry_end_addr = entry_start_addr + layout_entries[entry].alloc_size; + + if (addr >= entry_start_addr && addr < entry_end_addr && + end_addr > entry_start_addr && end_addr <= entry_end_addr) { + switch (layout_entries[entry].type) { + case FW_CODE: + *host_addr_out = fw_code_ptr; + break; + + case FW_DATA: + *host_addr_out = fw_data_ptr; + break; + + case FW_COREMEM_CODE: + *host_addr_out = fw_core_code_ptr; + break; + + case FW_COREMEM_DATA: + *host_addr_out = fw_core_data_ptr; + break; + + default: + return -EINVAL; + } + /* Direct Mem write to mapped memory */ + addr -= layout_entries[entry].base_addr; + addr += layout_entries[entry].alloc_offset; + + /* + * Add offset to pointer to FW allocation only if that + * allocation is available + */ + *(u8 **)host_addr_out += addr; + return 0; + } + } + + return -EINVAL; +} + +static int +pvr_fw_create_fwif_connection_ctl(struct pvr_device *pvr_dev) +{ + struct drm_device *drm_dev = from_pvr_device(pvr_dev); + struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; + + fw_dev->fwif_connection_ctl = + pvr_fw_object_create_and_map_offset(pvr_dev, + fw_dev->fw_heap_info.config_offset + + PVR_ROGUE_FWIF_CONNECTION_CTL_OFFSET, + sizeof(*fw_dev->fwif_connection_ctl), + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + NULL, NULL, + &fw_dev->mem.fwif_connection_ctl_obj); + if (IS_ERR(fw_dev->fwif_connection_ctl)) { + drm_err(drm_dev, + "Unable to allocate FWIF connection control memory\n"); + return PTR_ERR(fw_dev->fwif_connection_ctl); + } + + return 0; +} + +static void +pvr_fw_fini_fwif_connection_ctl(struct pvr_device *pvr_dev) +{ + struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; + + pvr_fw_object_unmap_and_destroy(fw_dev->mem.fwif_connection_ctl_obj); +} + +static void +fw_osinit_init(void *cpu_ptr, void *priv) +{ + struct rogue_fwif_osinit *fwif_osinit = cpu_ptr; + struct pvr_device *pvr_dev = priv; + struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; + struct pvr_fw_mem *fw_mem = &fw_dev->mem; + + fwif_osinit->kernel_ccbctl_fw_addr = pvr_dev->kccb.ccb.ctrl_fw_addr; + fwif_osinit->kernel_ccb_fw_addr = pvr_dev->kccb.ccb.ccb_fw_addr; + pvr_fw_object_get_fw_addr(pvr_dev->kccb.rtn_obj, + &fwif_osinit->kernel_ccb_rtn_slots_fw_addr); + + fwif_osinit->firmware_ccbctl_fw_addr = pvr_dev->fwccb.ctrl_fw_addr; + fwif_osinit->firmware_ccb_fw_addr = pvr_dev->fwccb.ccb_fw_addr; + + fwif_osinit->work_est_firmware_ccbctl_fw_addr = 0; + fwif_osinit->work_est_firmware_ccb_fw_addr = 0; + + pvr_fw_object_get_fw_addr(fw_mem->hwrinfobuf_obj, + &fwif_osinit->rogue_fwif_hwr_info_buf_ctl_fw_addr); + pvr_fw_object_get_fw_addr(fw_mem->osdata_obj, &fwif_osinit->fw_os_data_fw_addr); + + fwif_osinit->hwr_debug_dump_limit = 0; + + rogue_fwif_compchecks_bvnc_init(&fwif_osinit->rogue_comp_checks.hw_bvnc); + rogue_fwif_compchecks_bvnc_init(&fwif_osinit->rogue_comp_checks.fw_bvnc); +} + +static void +fw_osdata_init(void *cpu_ptr, void *priv) +{ + struct rogue_fwif_osdata *fwif_osdata = cpu_ptr; + struct pvr_device *pvr_dev = priv; + struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem; + + pvr_fw_object_get_fw_addr(fw_mem->power_sync_obj, &fwif_osdata->power_sync_fw_addr); +} + +static void +fw_fault_page_init(void *cpu_ptr, void *priv) +{ + u32 *fault_page = cpu_ptr; + + for (int i = 0; i < PVR_ROGUE_FAULT_PAGE_SIZE / sizeof(*fault_page); i++) + fault_page[i] = 0xdeadbee0; +} + +static void +fw_sysinit_init(void *cpu_ptr, void *priv) +{ + struct rogue_fwif_sysinit *fwif_sysinit = cpu_ptr; + struct pvr_device *pvr_dev = priv; + struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; + struct pvr_fw_mem *fw_mem = &fw_dev->mem; + dma_addr_t fault_dma_addr = 0; + u32 clock_speed_hz = clk_get_rate(pvr_dev->core_clk); + + WARN_ON(!clock_speed_hz); + + WARN_ON(pvr_fw_object_get_dma_addr(fw_mem->fault_page_obj, 0, &fault_dma_addr)); + fwif_sysinit->fault_phys_addr = (u64)fault_dma_addr; + + fwif_sysinit->pds_exec_base = ROGUE_PDSCODEDATA_HEAP_BASE; + fwif_sysinit->usc_exec_base = ROGUE_USCCODE_HEAP_BASE; + + pvr_fw_object_get_fw_addr(fw_mem->runtime_cfg_obj, &fwif_sysinit->runtime_cfg_fw_addr); + pvr_fw_object_get_fw_addr(fw_dev->fw_trace.tracebuf_ctrl_obj, + &fwif_sysinit->trace_buf_ctl_fw_addr); + pvr_fw_object_get_fw_addr(fw_mem->sysdata_obj, &fwif_sysinit->fw_sys_data_fw_addr); + pvr_fw_object_get_fw_addr(fw_mem->gpu_util_fwcb_obj, + &fwif_sysinit->gpu_util_fw_cb_ctl_fw_addr); + if (fw_mem->core_data_obj) { + pvr_fw_object_get_fw_addr(fw_mem->core_data_obj, + &fwif_sysinit->coremem_data_store.fw_addr); + } + + /* Currently unsupported. */ + fwif_sysinit->counter_dump_ctl.buffer_fw_addr = 0; + fwif_sysinit->counter_dump_ctl.size_in_dwords = 0; + + /* Skip alignment checks. */ + fwif_sysinit->align_checks = 0; + + fwif_sysinit->filter_flags = 0; + fwif_sysinit->hw_perf_filter = 0; + fwif_sysinit->firmware_perf = FW_PERF_CONF_NONE; + fwif_sysinit->initial_core_clock_speed = clock_speed_hz; + fwif_sysinit->active_pm_latency_ms = 0; + fwif_sysinit->gpio_validation_mode = ROGUE_FWIF_GPIO_VAL_OFF; + fwif_sysinit->firmware_started = false; + fwif_sysinit->marker_val = 1; + + memset(&fwif_sysinit->bvnc_km_feature_flags, 0, + sizeof(fwif_sysinit->bvnc_km_feature_flags)); +} + +#define ROGUE_FWIF_SLC_MIN_SIZE_FOR_DM_OVERLAP_KB 4 + +static void +fw_sysdata_init(void *cpu_ptr, void *priv) +{ + struct rogue_fwif_sysdata *fwif_sysdata = cpu_ptr; + struct pvr_device *pvr_dev = priv; + u32 slc_size_in_kilobytes = 0; + u32 config_flags = 0; + + WARN_ON(PVR_FEATURE_VALUE(pvr_dev, slc_size_in_kilobytes, &slc_size_in_kilobytes)); + + if (slc_size_in_kilobytes < ROGUE_FWIF_SLC_MIN_SIZE_FOR_DM_OVERLAP_KB) + config_flags |= ROGUE_FWIF_INICFG_DISABLE_DM_OVERLAP; + + fwif_sysdata->config_flags = config_flags; +} + +static void +fw_runtime_cfg_init(void *cpu_ptr, void *priv) +{ + struct rogue_fwif_runtime_cfg *runtime_cfg = cpu_ptr; + struct pvr_device *pvr_dev = priv; + u32 clock_speed_hz = clk_get_rate(pvr_dev->core_clk); + + WARN_ON(!clock_speed_hz); + + runtime_cfg->core_clock_speed = clock_speed_hz; + runtime_cfg->active_pm_latency_ms = 0; + runtime_cfg->active_pm_latency_persistant = true; + WARN_ON(PVR_FEATURE_VALUE(pvr_dev, num_clusters, + &runtime_cfg->default_dusts_num_init) != 0); +} + +static void +fw_gpu_util_fwcb_init(void *cpu_ptr, void *priv) +{ + struct rogue_fwif_gpu_util_fwcb *gpu_util_fwcb = cpu_ptr; + + gpu_util_fwcb->last_word = PVR_FWIF_GPU_UTIL_STATE_IDLE; +} + +static int +pvr_fw_create_structures(struct pvr_device *pvr_dev) +{ + struct drm_device *drm_dev = from_pvr_device(pvr_dev); + struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; + struct pvr_fw_mem *fw_mem = &fw_dev->mem; + int err; + + fw_dev->power_sync = pvr_fw_object_create_and_map(pvr_dev, sizeof(*fw_dev->power_sync), + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + NULL, NULL, &fw_mem->power_sync_obj); + if (IS_ERR(fw_dev->power_sync)) { + drm_err(drm_dev, "Unable to allocate FW power_sync structure\n"); + return PTR_ERR(fw_dev->power_sync); + } + + fw_dev->hwrinfobuf = pvr_fw_object_create_and_map(pvr_dev, sizeof(*fw_dev->hwrinfobuf), + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + NULL, NULL, &fw_mem->hwrinfobuf_obj); + if (IS_ERR(fw_dev->hwrinfobuf)) { + drm_err(drm_dev, + "Unable to allocate FW hwrinfobuf structure\n"); + err = PTR_ERR(fw_dev->hwrinfobuf); + goto err_release_power_sync; + } + + err = pvr_fw_object_create(pvr_dev, PVR_SYNC_OBJ_SIZE, + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + NULL, NULL, &fw_mem->mmucache_sync_obj); + if (err) { + drm_err(drm_dev, + "Unable to allocate MMU cache sync object\n"); + goto err_release_hwrinfobuf; + } + + fw_dev->fwif_sysdata = pvr_fw_object_create_and_map(pvr_dev, + sizeof(*fw_dev->fwif_sysdata), + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + fw_sysdata_init, pvr_dev, + &fw_mem->sysdata_obj); + if (IS_ERR(fw_dev->fwif_sysdata)) { + drm_err(drm_dev, "Unable to allocate FW SYSDATA structure\n"); + err = PTR_ERR(fw_dev->fwif_sysdata); + goto err_release_mmucache_sync_obj; + } + + err = pvr_fw_object_create(pvr_dev, PVR_ROGUE_FAULT_PAGE_SIZE, + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + fw_fault_page_init, NULL, &fw_mem->fault_page_obj); + if (err) { + drm_err(drm_dev, "Unable to allocate FW fault page\n"); + goto err_release_sysdata; + } + + err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_gpu_util_fwcb), + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + fw_gpu_util_fwcb_init, pvr_dev, &fw_mem->gpu_util_fwcb_obj); + if (err) { + drm_err(drm_dev, "Unable to allocate GPU util FWCB\n"); + goto err_release_fault_page; + } + + err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_runtime_cfg), + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + fw_runtime_cfg_init, pvr_dev, &fw_mem->runtime_cfg_obj); + if (err) { + drm_err(drm_dev, "Unable to allocate FW runtime config\n"); + goto err_release_gpu_util_fwcb; + } + + err = pvr_fw_trace_init(pvr_dev); + if (err) + goto err_release_runtime_cfg; + + fw_dev->fwif_osdata = pvr_fw_object_create_and_map(pvr_dev, + sizeof(*fw_dev->fwif_osdata), + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + fw_osdata_init, pvr_dev, + &fw_mem->osdata_obj); + if (IS_ERR(fw_dev->fwif_osdata)) { + drm_err(drm_dev, "Unable to allocate FW OSDATA structure\n"); + err = PTR_ERR(fw_dev->fwif_osdata); + goto err_fw_trace_fini; + } + + fw_dev->fwif_osinit = + pvr_fw_object_create_and_map_offset(pvr_dev, + fw_dev->fw_heap_info.config_offset + + PVR_ROGUE_FWIF_OSINIT_OFFSET, + sizeof(*fw_dev->fwif_osinit), + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + fw_osinit_init, pvr_dev, &fw_mem->osinit_obj); + if (IS_ERR(fw_dev->fwif_osinit)) { + drm_err(drm_dev, "Unable to allocate FW OSINIT structure\n"); + err = PTR_ERR(fw_dev->fwif_osinit); + goto err_release_osdata; + } + + fw_dev->fwif_sysinit = + pvr_fw_object_create_and_map_offset(pvr_dev, + fw_dev->fw_heap_info.config_offset + + PVR_ROGUE_FWIF_SYSINIT_OFFSET, + sizeof(*fw_dev->fwif_sysinit), + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + fw_sysinit_init, pvr_dev, &fw_mem->sysinit_obj); + if (IS_ERR(fw_dev->fwif_sysinit)) { + drm_err(drm_dev, "Unable to allocate FW SYSINIT structure\n"); + err = PTR_ERR(fw_dev->fwif_sysinit); + goto err_release_osinit; + } + + return 0; + +err_release_osinit: + pvr_fw_object_unmap_and_destroy(fw_mem->osinit_obj); + +err_release_osdata: + pvr_fw_object_unmap_and_destroy(fw_mem->osdata_obj); + +err_fw_trace_fini: + pvr_fw_trace_fini(pvr_dev); + +err_release_runtime_cfg: + pvr_fw_object_destroy(fw_mem->runtime_cfg_obj); + +err_release_gpu_util_fwcb: + pvr_fw_object_destroy(fw_mem->gpu_util_fwcb_obj); + +err_release_fault_page: + pvr_fw_object_destroy(fw_mem->fault_page_obj); + +err_release_sysdata: + pvr_fw_object_unmap_and_destroy(fw_mem->sysdata_obj); + +err_release_mmucache_sync_obj: + pvr_fw_object_destroy(fw_mem->mmucache_sync_obj); + +err_release_hwrinfobuf: + pvr_fw_object_unmap_and_destroy(fw_mem->hwrinfobuf_obj); + +err_release_power_sync: + pvr_fw_object_unmap_and_destroy(fw_mem->power_sync_obj); + + return err; +} + +static void +pvr_fw_destroy_structures(struct pvr_device *pvr_dev) +{ + struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; + struct pvr_fw_mem *fw_mem = &fw_dev->mem; + + pvr_fw_trace_fini(pvr_dev); + pvr_fw_object_destroy(fw_mem->runtime_cfg_obj); + pvr_fw_object_destroy(fw_mem->gpu_util_fwcb_obj); + pvr_fw_object_destroy(fw_mem->fault_page_obj); + pvr_fw_object_unmap_and_destroy(fw_mem->sysdata_obj); + pvr_fw_object_unmap_and_destroy(fw_mem->sysinit_obj); + + pvr_fw_object_destroy(fw_mem->mmucache_sync_obj); + pvr_fw_object_unmap_and_destroy(fw_mem->hwrinfobuf_obj); + pvr_fw_object_unmap_and_destroy(fw_mem->power_sync_obj); + pvr_fw_object_unmap_and_destroy(fw_mem->osdata_obj); + pvr_fw_object_unmap_and_destroy(fw_mem->osinit_obj); +} + +/** + * pvr_fw_process() - Process firmware image, allocate FW memory and create boot + * arguments + * @pvr_dev: Device pointer. + * + * Returns: + * * 0 on success, or + * * Any error returned by pvr_fw_object_create_and_map_offset(), or + * * Any error returned by pvr_fw_object_create_and_map(). + */ +static int +pvr_fw_process(struct pvr_device *pvr_dev) +{ + struct drm_device *drm_dev = from_pvr_device(pvr_dev); + struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem; + const u8 *fw = pvr_dev->fw_dev.firmware->data; + const struct pvr_fw_layout_entry *private_data; + u8 *fw_code_ptr; + u8 *fw_data_ptr; + u8 *fw_core_code_ptr; + u8 *fw_core_data_ptr; + int err; + + layout_get_sizes(pvr_dev); + + private_data = pvr_fw_find_private_data(pvr_dev); + if (!private_data) + return -EINVAL; + + /* Allocate and map memory for firmware sections. */ + + /* + * Code allocation must be at the start of the firmware heap, otherwise + * firmware processor will be unable to boot. + * + * This has the useful side-effect that for every other object in the + * driver, a firmware address of 0 is invalid. + */ + fw_code_ptr = pvr_fw_object_create_and_map_offset(pvr_dev, 0, fw_mem->code_alloc_size, + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + NULL, NULL, &fw_mem->code_obj); + if (IS_ERR(fw_code_ptr)) { + drm_err(drm_dev, "Unable to allocate FW code memory\n"); + return PTR_ERR(fw_code_ptr); + } + + if (pvr_dev->fw_dev.defs->has_fixed_data_addr()) { + u32 base_addr = private_data->base_addr & pvr_dev->fw_dev.fw_heap_info.offset_mask; + + fw_data_ptr = + pvr_fw_object_create_and_map_offset(pvr_dev, base_addr, + fw_mem->data_alloc_size, + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + NULL, NULL, &fw_mem->data_obj); + } else { + fw_data_ptr = pvr_fw_object_create_and_map(pvr_dev, fw_mem->data_alloc_size, + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + NULL, NULL, &fw_mem->data_obj); + } + if (IS_ERR(fw_data_ptr)) { + drm_err(drm_dev, "Unable to allocate FW data memory\n"); + err = PTR_ERR(fw_data_ptr); + goto err_free_fw_code_obj; + } + + /* Core code and data sections are optional. */ + if (fw_mem->core_code_alloc_size) { + fw_core_code_ptr = + pvr_fw_object_create_and_map(pvr_dev, fw_mem->core_code_alloc_size, + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + NULL, NULL, &fw_mem->core_code_obj); + if (IS_ERR(fw_core_code_ptr)) { + drm_err(drm_dev, + "Unable to allocate FW core code memory\n"); + err = PTR_ERR(fw_core_code_ptr); + goto err_free_fw_data_obj; + } + } else { + fw_core_code_ptr = NULL; + } + + if (fw_mem->core_data_alloc_size) { + fw_core_data_ptr = + pvr_fw_object_create_and_map(pvr_dev, fw_mem->core_data_alloc_size, + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + NULL, NULL, &fw_mem->core_data_obj); + if (IS_ERR(fw_core_data_ptr)) { + drm_err(drm_dev, + "Unable to allocate FW core data memory\n"); + err = PTR_ERR(fw_core_data_ptr); + goto err_free_fw_core_code_obj; + } + } else { + fw_core_data_ptr = NULL; + } + + fw_mem->code = kzalloc(fw_mem->code_alloc_size, GFP_KERNEL); + fw_mem->data = kzalloc(fw_mem->data_alloc_size, GFP_KERNEL); + if (fw_mem->core_code_alloc_size) + fw_mem->core_code = kzalloc(fw_mem->core_code_alloc_size, GFP_KERNEL); + if (fw_mem->core_data_alloc_size) + fw_mem->core_data = kzalloc(fw_mem->core_data_alloc_size, GFP_KERNEL); + + if (!fw_mem->code || !fw_mem->data || + (!fw_mem->core_code && fw_mem->core_code_alloc_size) || + (!fw_mem->core_data && fw_mem->core_data_alloc_size)) { + err = -ENOMEM; + goto err_free_kdata; + } + + err = pvr_dev->fw_dev.defs->fw_process(pvr_dev, fw, + fw_mem->code, fw_mem->data, fw_mem->core_code, + fw_mem->core_data, fw_mem->core_code_alloc_size); + + if (err) + goto err_free_fw_core_data_obj; + + memcpy(fw_code_ptr, fw_mem->code, fw_mem->code_alloc_size); + memcpy(fw_data_ptr, fw_mem->data, fw_mem->data_alloc_size); + if (fw_mem->core_code) + memcpy(fw_core_code_ptr, fw_mem->core_code, fw_mem->core_code_alloc_size); + if (fw_mem->core_data) + memcpy(fw_core_data_ptr, fw_mem->core_data, fw_mem->core_data_alloc_size); + + /* We're finished with the firmware section memory on the CPU, unmap. */ + if (fw_core_data_ptr) + pvr_fw_object_vunmap(fw_mem->core_data_obj); + if (fw_core_code_ptr) + pvr_fw_object_vunmap(fw_mem->core_code_obj); + pvr_fw_object_vunmap(fw_mem->data_obj); + fw_data_ptr = NULL; + pvr_fw_object_vunmap(fw_mem->code_obj); + fw_code_ptr = NULL; + + err = pvr_fw_create_fwif_connection_ctl(pvr_dev); + if (err) + goto err_free_fw_core_data_obj; + + return 0; + +err_free_kdata: + kfree(fw_mem->core_data); + kfree(fw_mem->core_code); + kfree(fw_mem->data); + kfree(fw_mem->code); + +err_free_fw_core_data_obj: + if (fw_core_data_ptr) + pvr_fw_object_unmap_and_destroy(fw_mem->core_data_obj); + +err_free_fw_core_code_obj: + if (fw_core_code_ptr) + pvr_fw_object_unmap_and_destroy(fw_mem->core_code_obj); + +err_free_fw_data_obj: + if (fw_data_ptr) + pvr_fw_object_vunmap(fw_mem->data_obj); + pvr_fw_object_destroy(fw_mem->data_obj); + +err_free_fw_code_obj: + if (fw_code_ptr) + pvr_fw_object_vunmap(fw_mem->code_obj); + pvr_fw_object_destroy(fw_mem->code_obj); + + return err; +} + +static int +pvr_copy_to_fw(struct pvr_fw_object *dest_obj, u8 *src_ptr, u32 size) +{ + u8 *dest_ptr = pvr_fw_object_vmap(dest_obj); + + if (IS_ERR(dest_ptr)) + return PTR_ERR(dest_ptr); + + memcpy(dest_ptr, src_ptr, size); + + pvr_fw_object_vunmap(dest_obj); + + return 0; +} + +static int +pvr_fw_reinit_code_data(struct pvr_device *pvr_dev) +{ + struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem; + int err; + + err = pvr_copy_to_fw(fw_mem->code_obj, fw_mem->code, fw_mem->code_alloc_size); + if (err) + return err; + + err = pvr_copy_to_fw(fw_mem->data_obj, fw_mem->data, fw_mem->data_alloc_size); + if (err) + return err; + + if (fw_mem->core_code) { + err = pvr_copy_to_fw(fw_mem->core_code_obj, fw_mem->core_code, + fw_mem->core_code_alloc_size); + if (err) + return err; + } + + if (fw_mem->core_data) { + err = pvr_copy_to_fw(fw_mem->core_data_obj, fw_mem->core_data, + fw_mem->core_data_alloc_size); + if (err) + return err; + } + + return 0; +} + +static void +pvr_fw_cleanup(struct pvr_device *pvr_dev) +{ + struct pvr_fw_mem *fw_mem = &pvr_dev->fw_dev.mem; + + pvr_fw_fini_fwif_connection_ctl(pvr_dev); + if (fw_mem->core_code_obj) + pvr_fw_object_destroy(fw_mem->core_code_obj); + if (fw_mem->core_data_obj) + pvr_fw_object_destroy(fw_mem->core_data_obj); + pvr_fw_object_destroy(fw_mem->code_obj); + pvr_fw_object_destroy(fw_mem->data_obj); +} + +/** + * pvr_wait_for_fw_boot() - Wait for firmware to finish booting + * @pvr_dev: Target PowerVR device. + * + * Returns: + * * 0 on success, or + * * -%ETIMEDOUT if firmware fails to boot within timeout. + */ +int +pvr_wait_for_fw_boot(struct pvr_device *pvr_dev) +{ + ktime_t deadline = ktime_add_us(ktime_get(), FW_BOOT_TIMEOUT_USEC); + struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; + + while (ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0) { + if (READ_ONCE(fw_dev->fwif_sysinit->firmware_started)) + return 0; + } + + return -ETIMEDOUT; +} + +/* + * pvr_fw_heap_info_init() - Calculate size and masks for FW heap + * @pvr_dev: Target PowerVR device. + * @log2_size: Log2 of raw heap size. + * @reserved_size: Size of reserved area of heap, in bytes. May be zero. + */ +void +pvr_fw_heap_info_init(struct pvr_device *pvr_dev, u32 log2_size, u32 reserved_size) +{ + struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; + + fw_dev->fw_heap_info.gpu_addr = PVR_ROGUE_FW_MAIN_HEAP_BASE; + fw_dev->fw_heap_info.log2_size = log2_size; + fw_dev->fw_heap_info.reserved_size = reserved_size; + fw_dev->fw_heap_info.raw_size = 1 << fw_dev->fw_heap_info.log2_size; + fw_dev->fw_heap_info.offset_mask = fw_dev->fw_heap_info.raw_size - 1; + fw_dev->fw_heap_info.config_offset = fw_dev->fw_heap_info.raw_size - + PVR_ROGUE_FW_CONFIG_HEAP_SIZE; + fw_dev->fw_heap_info.size = fw_dev->fw_heap_info.raw_size - + (PVR_ROGUE_FW_CONFIG_HEAP_SIZE + reserved_size); +} + +/** + * pvr_fw_validate_init_device_info() - Validate firmware and initialise device information + * @pvr_dev: Target PowerVR device. + * + * This function must be called before querying device information. + * + * Returns: + * * 0 on success, or + * * -%EINVAL if firmware validation fails. + */ +int +pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev) +{ + int err; + + err = pvr_fw_validate(pvr_dev); + if (err) + return err; + + return pvr_fw_get_device_info(pvr_dev); +} + +/** + * pvr_fw_init() - Initialise and boot firmware + * @pvr_dev: Target PowerVR device + * + * On successful completion of the function the PowerVR device will be + * initialised and ready to use. + * + * Returns: + * * 0 on success, + * * -%EINVAL on invalid firmware image, + * * -%ENOMEM on out of memory, or + * * -%ETIMEDOUT if firmware processor fails to boot or on register poll timeout. + */ +int +pvr_fw_init(struct pvr_device *pvr_dev) +{ + u32 kccb_size_log2 = ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT; + u32 kccb_rtn_size = (1 << kccb_size_log2) * sizeof(*pvr_dev->kccb.rtn); + struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; + int err; + + if (fw_dev->processor_type == PVR_FW_PROCESSOR_TYPE_META) + fw_dev->defs = &pvr_fw_defs_meta; + else if (fw_dev->processor_type == PVR_FW_PROCESSOR_TYPE_MIPS) + fw_dev->defs = &pvr_fw_defs_mips; + else + return -EINVAL; + + err = fw_dev->defs->init(pvr_dev); + if (err) + return err; + + drm_mm_init(&fw_dev->fw_mm, ROGUE_FW_HEAP_BASE, fw_dev->fw_heap_info.raw_size); + fw_dev->fw_mm_base = ROGUE_FW_HEAP_BASE; + spin_lock_init(&fw_dev->fw_mm_lock); + + INIT_LIST_HEAD(&fw_dev->fw_objs.list); + err = drmm_mutex_init(from_pvr_device(pvr_dev), &fw_dev->fw_objs.lock); + if (err) + goto err_mm_takedown; + + err = pvr_fw_process(pvr_dev); + if (err) + goto err_mm_takedown; + + /* Initialise KCCB and FWCCB. */ + err = pvr_kccb_init(pvr_dev); + if (err) + goto err_fw_cleanup; + + err = pvr_fwccb_init(pvr_dev); + if (err) + goto err_kccb_fini; + + /* Allocate memory for KCCB return slots. */ + pvr_dev->kccb.rtn = pvr_fw_object_create_and_map(pvr_dev, kccb_rtn_size, + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + NULL, NULL, &pvr_dev->kccb.rtn_obj); + if (IS_ERR(pvr_dev->kccb.rtn)) { + err = PTR_ERR(pvr_dev->kccb.rtn); + goto err_fwccb_fini; + } + + err = pvr_fw_create_structures(pvr_dev); + if (err) + goto err_kccb_rtn_release; + + err = pvr_fw_start(pvr_dev); + if (err) + goto err_destroy_structures; + + err = pvr_wait_for_fw_boot(pvr_dev); + if (err) { + drm_err(from_pvr_device(pvr_dev), "Firmware failed to boot\n"); + goto err_fw_stop; + } + + fw_dev->booted = true; + + return 0; + +err_fw_stop: + pvr_fw_stop(pvr_dev); + +err_destroy_structures: + pvr_fw_destroy_structures(pvr_dev); + +err_kccb_rtn_release: + pvr_fw_object_unmap_and_destroy(pvr_dev->kccb.rtn_obj); + +err_fwccb_fini: + pvr_ccb_fini(&pvr_dev->fwccb); + +err_kccb_fini: + pvr_kccb_fini(pvr_dev); + +err_fw_cleanup: + pvr_fw_cleanup(pvr_dev); + +err_mm_takedown: + drm_mm_takedown(&fw_dev->fw_mm); + + if (fw_dev->defs->fini) + fw_dev->defs->fini(pvr_dev); + + return err; +} + +/** + * pvr_fw_fini() - Shutdown firmware processor and free associated memory + * @pvr_dev: Target PowerVR device + */ +void +pvr_fw_fini(struct pvr_device *pvr_dev) +{ + struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; + + fw_dev->booted = false; + + pvr_fw_destroy_structures(pvr_dev); + pvr_fw_object_unmap_and_destroy(pvr_dev->kccb.rtn_obj); + + /* + * Ensure FWCCB worker has finished executing before destroying FWCCB. The IRQ handler has + * been unregistered at this point so no new work should be being submitted. + */ + pvr_ccb_fini(&pvr_dev->fwccb); + pvr_kccb_fini(pvr_dev); + pvr_fw_cleanup(pvr_dev); + + mutex_lock(&pvr_dev->fw_dev.fw_objs.lock); + WARN_ON(!list_empty(&pvr_dev->fw_dev.fw_objs.list)); + mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock); + + drm_mm_takedown(&fw_dev->fw_mm); + + if (fw_dev->defs->fini) + fw_dev->defs->fini(pvr_dev); +} + +/** + * pvr_fw_mts_schedule() - Schedule work via an MTS kick + * @pvr_dev: Target PowerVR device + * @val: Kick mask. Should be a combination of %ROGUE_CR_MTS_SCHEDULE_* + */ +void +pvr_fw_mts_schedule(struct pvr_device *pvr_dev, u32 val) +{ + /* Ensure memory is flushed before kicking MTS. */ + wmb(); + + pvr_cr_write32(pvr_dev, ROGUE_CR_MTS_SCHEDULE, val); + + /* Ensure the MTS kick goes through before continuing. */ + mb(); +} + +/** + * pvr_fw_structure_cleanup() - Send FW cleanup request for an object + * @pvr_dev: Target PowerVR device. + * @type: Type of object to cleanup. Must be one of &enum rogue_fwif_cleanup_type. + * @fw_obj: Pointer to FW object containing object to cleanup. + * @offset: Offset within FW object of object to cleanup. + * + * Returns: + * * 0 on success, + * * -EBUSY if object is busy, + * * -ETIMEDOUT on timeout, or + * * -EIO if device is lost. + */ +int +pvr_fw_structure_cleanup(struct pvr_device *pvr_dev, u32 type, struct pvr_fw_object *fw_obj, + u32 offset) +{ + struct rogue_fwif_kccb_cmd cmd; + int slot_nr; + int idx; + int err; + u32 rtn; + + struct rogue_fwif_cleanup_request *cleanup_req = &cmd.cmd_data.cleanup_data; + + down_read(&pvr_dev->reset_sem); + + if (!drm_dev_enter(from_pvr_device(pvr_dev), &idx)) { + err = -EIO; + goto err_up_read; + } + + cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_CLEANUP; + cmd.kccb_flags = 0; + cleanup_req->cleanup_type = type; + + switch (type) { + case ROGUE_FWIF_CLEANUP_FWCOMMONCONTEXT: + pvr_fw_object_get_fw_addr_offset(fw_obj, offset, + &cleanup_req->cleanup_data.context_fw_addr); + break; + case ROGUE_FWIF_CLEANUP_HWRTDATA: + pvr_fw_object_get_fw_addr_offset(fw_obj, offset, + &cleanup_req->cleanup_data.hwrt_data_fw_addr); + break; + case ROGUE_FWIF_CLEANUP_FREELIST: + pvr_fw_object_get_fw_addr_offset(fw_obj, offset, + &cleanup_req->cleanup_data.freelist_fw_addr); + break; + default: + err = -EINVAL; + goto err_drm_dev_exit; + } + + err = pvr_kccb_send_cmd(pvr_dev, &cmd, &slot_nr); + if (err) + goto err_drm_dev_exit; + + err = pvr_kccb_wait_for_completion(pvr_dev, slot_nr, HZ, &rtn); + if (err) + goto err_drm_dev_exit; + + if (rtn & ROGUE_FWIF_KCCB_RTN_SLOT_CLEANUP_BUSY) + err = -EBUSY; + +err_drm_dev_exit: + drm_dev_exit(idx); + +err_up_read: + up_read(&pvr_dev->reset_sem); + + return err; +} + +/** + * pvr_fw_object_fw_map() - Map a FW object in firmware address space + * @pvr_dev: Device pointer. + * @fw_obj: FW object to map. + * @dev_addr: Desired address in device space, if a specific address is + * required. 0 otherwise. + * + * Returns: + * * 0 on success, or + * * -%EINVAL if @fw_obj is already mapped but has no references, or + * * Any error returned by DRM. + */ +static int +pvr_fw_object_fw_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj, u64 dev_addr) +{ + struct pvr_gem_object *pvr_obj = fw_obj->gem; + struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj); + struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; + + int err; + + spin_lock(&fw_dev->fw_mm_lock); + + if (drm_mm_node_allocated(&fw_obj->fw_mm_node)) { + err = -EINVAL; + goto err_unlock; + } + + if (!dev_addr) { + /* + * Allocate from the main heap only (firmware heap minus + * config space). + */ + err = drm_mm_insert_node_in_range(&fw_dev->fw_mm, &fw_obj->fw_mm_node, + gem_obj->size, 0, 0, + fw_dev->fw_heap_info.gpu_addr, + fw_dev->fw_heap_info.gpu_addr + + fw_dev->fw_heap_info.size, 0); + if (err) + goto err_unlock; + } else { + fw_obj->fw_mm_node.start = dev_addr; + fw_obj->fw_mm_node.size = gem_obj->size; + err = drm_mm_reserve_node(&fw_dev->fw_mm, &fw_obj->fw_mm_node); + if (err) + goto err_unlock; + } + + spin_unlock(&fw_dev->fw_mm_lock); + + /* Map object on GPU. */ + err = fw_dev->defs->vm_map(pvr_dev, fw_obj); + if (err) + goto err_remove_node; + + fw_obj->fw_addr_offset = (u32)(fw_obj->fw_mm_node.start - fw_dev->fw_mm_base); + + return 0; + +err_remove_node: + spin_lock(&fw_dev->fw_mm_lock); + drm_mm_remove_node(&fw_obj->fw_mm_node); + +err_unlock: + spin_unlock(&fw_dev->fw_mm_lock); + + return err; +} + +/** + * pvr_fw_object_fw_unmap() - Unmap a previously mapped FW object + * @fw_obj: FW object to unmap. + * + * Returns: + * * 0 on success, or + * * -%EINVAL if object is not currently mapped. + */ +static int +pvr_fw_object_fw_unmap(struct pvr_fw_object *fw_obj) +{ + struct pvr_gem_object *pvr_obj = fw_obj->gem; + struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj); + struct pvr_device *pvr_dev = to_pvr_device(gem_obj->dev); + struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; + + fw_dev->defs->vm_unmap(pvr_dev, fw_obj); + + spin_lock(&fw_dev->fw_mm_lock); + + if (!drm_mm_node_allocated(&fw_obj->fw_mm_node)) { + spin_unlock(&fw_dev->fw_mm_lock); + return -EINVAL; + } + + drm_mm_remove_node(&fw_obj->fw_mm_node); + + spin_unlock(&fw_dev->fw_mm_lock); + + return 0; +} + +static void * +pvr_fw_object_create_and_map_common(struct pvr_device *pvr_dev, size_t size, + u64 flags, u64 dev_addr, + void (*init)(void *cpu_ptr, void *priv), + void *init_priv, struct pvr_fw_object **fw_obj_out) +{ + struct pvr_fw_object *fw_obj; + void *cpu_ptr; + int err; + + /* %DRM_PVR_BO_PM_FW_PROTECT is implicit for FW objects. */ + flags |= DRM_PVR_BO_PM_FW_PROTECT; + + fw_obj = kzalloc(sizeof(*fw_obj), GFP_KERNEL); + if (!fw_obj) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&fw_obj->node); + fw_obj->init = init; + fw_obj->init_priv = init_priv; + + fw_obj->gem = pvr_gem_object_create(pvr_dev, size, flags); + if (IS_ERR(fw_obj->gem)) { + err = PTR_ERR(fw_obj->gem); + fw_obj->gem = NULL; + goto err_put_object; + } + + err = pvr_fw_object_fw_map(pvr_dev, fw_obj, dev_addr); + if (err) + goto err_put_object; + + cpu_ptr = pvr_fw_object_vmap(fw_obj); + if (IS_ERR(cpu_ptr)) { + err = PTR_ERR(cpu_ptr); + goto err_put_object; + } + + *fw_obj_out = fw_obj; + + if (fw_obj->init) + fw_obj->init(cpu_ptr, fw_obj->init_priv); + + mutex_lock(&pvr_dev->fw_dev.fw_objs.lock); + list_add_tail(&fw_obj->node, &pvr_dev->fw_dev.fw_objs.list); + mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock); + + return cpu_ptr; + +err_put_object: + pvr_fw_object_destroy(fw_obj); + + return ERR_PTR(err); +} + +/** + * pvr_fw_object_create() - Create a FW object and map to firmware + * @pvr_dev: PowerVR device pointer. + * @size: Size of object, in bytes. + * @flags: Options which affect both this operation and future mapping + * operations performed on the returned object. Must be a combination of + * DRM_PVR_BO_* and/or PVR_BO_* flags. + * @init: Initialisation callback. + * @init_priv: Private pointer to pass to initialisation callback. + * @fw_obj_out: Pointer to location to store created object pointer. + * + * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently, + * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS + * set. + * + * Returns: + * * 0 on success, or + * * Any error returned by pvr_fw_object_create_common(). + */ +int +pvr_fw_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags, + void (*init)(void *cpu_ptr, void *priv), void *init_priv, + struct pvr_fw_object **fw_obj_out) +{ + void *cpu_ptr; + + cpu_ptr = pvr_fw_object_create_and_map_common(pvr_dev, size, flags, 0, init, init_priv, + fw_obj_out); + if (IS_ERR(cpu_ptr)) + return PTR_ERR(cpu_ptr); + + pvr_fw_object_vunmap(*fw_obj_out); + + return 0; +} + +/** + * pvr_fw_object_create_and_map() - Create a FW object and map to firmware and CPU + * @pvr_dev: PowerVR device pointer. + * @size: Size of object, in bytes. + * @flags: Options which affect both this operation and future mapping + * operations performed on the returned object. Must be a combination of + * DRM_PVR_BO_* and/or PVR_BO_* flags. + * @init: Initialisation callback. + * @init_priv: Private pointer to pass to initialisation callback. + * @fw_obj_out: Pointer to location to store created object pointer. + * + * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently, + * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS + * set. + * + * Caller is responsible for calling pvr_fw_object_vunmap() to release the CPU + * mapping. + * + * Returns: + * * Pointer to CPU mapping of newly created object, or + * * Any error returned by pvr_fw_object_create(), or + * * Any error returned by pvr_fw_object_vmap(). + */ +void * +pvr_fw_object_create_and_map(struct pvr_device *pvr_dev, size_t size, u64 flags, + void (*init)(void *cpu_ptr, void *priv), + void *init_priv, struct pvr_fw_object **fw_obj_out) +{ + return pvr_fw_object_create_and_map_common(pvr_dev, size, flags, 0, init, init_priv, + fw_obj_out); +} + +/** + * pvr_fw_object_create_and_map_offset() - Create a FW object and map to + * firmware at the provided offset and to the CPU. + * @pvr_dev: PowerVR device pointer. + * @dev_offset: Base address of desired FW mapping, offset from start of FW heap. + * @size: Size of object, in bytes. + * @flags: Options which affect both this operation and future mapping + * operations performed on the returned object. Must be a combination of + * DRM_PVR_BO_* and/or PVR_BO_* flags. + * @init: Initialisation callback. + * @init_priv: Private pointer to pass to initialisation callback. + * @fw_obj_out: Pointer to location to store created object pointer. + * + * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT is implied for all FW objects. Consequently, + * this function will fail if @flags has %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS + * set. + * + * Caller is responsible for calling pvr_fw_object_vunmap() to release the CPU + * mapping. + * + * Returns: + * * Pointer to CPU mapping of newly created object, or + * * Any error returned by pvr_fw_object_create(), or + * * Any error returned by pvr_fw_object_vmap(). + */ +void * +pvr_fw_object_create_and_map_offset(struct pvr_device *pvr_dev, + u32 dev_offset, size_t size, u64 flags, + void (*init)(void *cpu_ptr, void *priv), + void *init_priv, struct pvr_fw_object **fw_obj_out) +{ + u64 dev_addr = pvr_dev->fw_dev.fw_mm_base + dev_offset; + + return pvr_fw_object_create_and_map_common(pvr_dev, size, flags, dev_addr, init, init_priv, + fw_obj_out); +} + +/** + * pvr_fw_object_destroy() - Destroy a pvr_fw_object + * @fw_obj: Pointer to object to destroy. + */ +void pvr_fw_object_destroy(struct pvr_fw_object *fw_obj) +{ + struct pvr_gem_object *pvr_obj = fw_obj->gem; + struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj); + struct pvr_device *pvr_dev = to_pvr_device(gem_obj->dev); + + mutex_lock(&pvr_dev->fw_dev.fw_objs.lock); + list_del(&fw_obj->node); + mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock); + + if (drm_mm_node_allocated(&fw_obj->fw_mm_node)) { + /* If we can't unmap, leak the memory. */ + if (WARN_ON(pvr_fw_object_fw_unmap(fw_obj))) + return; + } + + if (fw_obj->gem) + pvr_gem_object_put(fw_obj->gem); + + kfree(fw_obj); +} + +/** + * pvr_fw_object_get_fw_addr_offset() - Return address of object in firmware address space, with + * given offset. + * @fw_obj: Pointer to object. + * @offset: Desired offset from start of object. + * @fw_addr_out: Location to store address to. + */ +void pvr_fw_object_get_fw_addr_offset(struct pvr_fw_object *fw_obj, u32 offset, u32 *fw_addr_out) +{ + struct pvr_gem_object *pvr_obj = fw_obj->gem; + struct pvr_device *pvr_dev = to_pvr_device(gem_from_pvr_gem(pvr_obj)->dev); + + *fw_addr_out = pvr_dev->fw_dev.defs->get_fw_addr_with_offset(fw_obj, offset); +} + +/* + * pvr_fw_hard_reset() - Re-initialise the FW code and data segments, and reset all global FW + * structures + * @pvr_dev: Device pointer + * + * If this function returns an error then the caller must regard the device as lost. + * + * Returns: + * * 0 on success, or + * * Any error returned by pvr_fw_init_dev_structures() or pvr_fw_reset_all(). + */ +int +pvr_fw_hard_reset(struct pvr_device *pvr_dev) +{ + struct list_head *pos; + int err; + + /* Reset all FW objects */ + mutex_lock(&pvr_dev->fw_dev.fw_objs.lock); + + list_for_each(pos, &pvr_dev->fw_dev.fw_objs.list) { + struct pvr_fw_object *fw_obj = container_of(pos, struct pvr_fw_object, node); + void *cpu_ptr = pvr_fw_object_vmap(fw_obj); + + WARN_ON(IS_ERR(cpu_ptr)); + + if (!(fw_obj->gem->flags & PVR_BO_FW_NO_CLEAR_ON_RESET)) { + memset(cpu_ptr, 0, pvr_gem_object_size(fw_obj->gem)); + + if (fw_obj->init) + fw_obj->init(cpu_ptr, fw_obj->init_priv); + } + + pvr_fw_object_vunmap(fw_obj); + } + + mutex_unlock(&pvr_dev->fw_dev.fw_objs.lock); + + err = pvr_fw_reinit_code_data(pvr_dev); + if (err) + return err; + + return 0; +} diff --git a/drivers/gpu/drm/imagination/pvr_fw.h b/drivers/gpu/drm/imagination/pvr_fw.h new file mode 100644 index 0000000000..b7966bd574 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_fw.h @@ -0,0 +1,509 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_FW_H +#define PVR_FW_H + +#include "pvr_fw_info.h" +#include "pvr_fw_trace.h" +#include "pvr_gem.h" + +#include + +#include + +/* Forward declarations from "pvr_device.h". */ +struct pvr_device; +struct pvr_file; + +/* Forward declaration from "pvr_vm.h". */ +struct pvr_vm_context; + +#define ROGUE_FWIF_FWCCB_NUMCMDS_LOG2 5 + +#define ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT 7 + +/** + * struct pvr_fw_object - container for firmware memory allocations + */ +struct pvr_fw_object { + /** @ref_count: FW object reference counter. */ + struct kref ref_count; + + /** @gem: GEM object backing the FW object. */ + struct pvr_gem_object *gem; + + /** + * @fw_mm_node: Node representing mapping in FW address space. @pvr_obj->lock must + * be held when writing. + */ + struct drm_mm_node fw_mm_node; + + /** + * @fw_addr_offset: Virtual address offset of firmware mapping. Only + * valid if @flags has %PVR_GEM_OBJECT_FLAGS_FW_MAPPED + * set. + */ + u32 fw_addr_offset; + + /** + * @init: Initialisation callback. Will be called on object creation and FW hard reset. + * Object will have been zeroed before this is called. + */ + void (*init)(void *cpu_ptr, void *priv); + + /** @init_priv: Private data for initialisation callback. */ + void *init_priv; + + /** @node: Node for firmware object list. */ + struct list_head node; +}; + +/** + * struct pvr_fw_defs - FW processor function table and static definitions + */ +struct pvr_fw_defs { + /** + * @init: + * + * FW processor specific initialisation. + * @pvr_dev: Target PowerVR device. + * + * This function must call pvr_fw_heap_calculate() to initialise the firmware heap for this + * FW processor. + * + * This function is mandatory. + * + * Returns: + * * 0 on success, or + * * Any appropriate error on failure. + */ + int (*init)(struct pvr_device *pvr_dev); + + /** + * @fini: + * + * FW processor specific finalisation. + * @pvr_dev: Target PowerVR device. + * + * This function is optional. + */ + void (*fini)(struct pvr_device *pvr_dev); + + /** + * @fw_process: + * + * Load and process firmware image. + * @pvr_dev: Target PowerVR device. + * @fw: Pointer to firmware image. + * @fw_code_ptr: Pointer to firmware code section. + * @fw_data_ptr: Pointer to firmware data section. + * @fw_core_code_ptr: Pointer to firmware core code section. May be %NULL. + * @fw_core_data_ptr: Pointer to firmware core data section. May be %NULL. + * @core_code_alloc_size: Total allocation size of core code section. + * + * This function is mandatory. + * + * Returns: + * * 0 on success, or + * * Any appropriate error on failure. + */ + int (*fw_process)(struct pvr_device *pvr_dev, const u8 *fw, + u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, + u8 *fw_core_data_ptr, u32 core_code_alloc_size); + + /** + * @vm_map: + * + * Map FW object into FW processor address space. + * @pvr_dev: Target PowerVR device. + * @fw_obj: FW object to map. + * + * This function is mandatory. + * + * Returns: + * * 0 on success, or + * * Any appropriate error on failure. + */ + int (*vm_map)(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj); + + /** + * @vm_unmap: + * + * Unmap FW object from FW processor address space. + * @pvr_dev: Target PowerVR device. + * @fw_obj: FW object to map. + * + * This function is mandatory. + */ + void (*vm_unmap)(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj); + + /** + * @get_fw_addr_with_offset: + * + * Called to get address of object in firmware address space, with offset. + * @fw_obj: Pointer to object. + * @offset: Desired offset from start of object. + * + * This function is mandatory. + * + * Returns: + * * Address in firmware address space. + */ + u32 (*get_fw_addr_with_offset)(struct pvr_fw_object *fw_obj, u32 offset); + + /** + * @wrapper_init: + * + * Called to initialise FW wrapper. + * @pvr_dev: Target PowerVR device. + * + * This function is mandatory. + * + * Returns: + * * 0 on success. + * * Any appropriate error on failure. + */ + int (*wrapper_init)(struct pvr_device *pvr_dev); + + /** + * @has_fixed_data_addr: + * + * Called to check if firmware fixed data must be loaded at the address given by the + * firmware layout table. + * + * This function is mandatory. + * + * Returns: + * * %true if firmware fixed data must be loaded at the address given by the firmware + * layout table. + * * %false otherwise. + */ + bool (*has_fixed_data_addr)(void); + + /** + * @irq: FW Interrupt information. + * + * Those are processor dependent, and should be initialized by the + * processor backend in pvr_fw_funcs::init(). + */ + struct { + /** @enable_reg: FW interrupt enable register. */ + u32 enable_reg; + + /** @status_reg: FW interrupt status register. */ + u32 status_reg; + + /** + * @clear_reg: FW interrupt clear register. + * + * If @status_reg == @clear_reg, we clear by write a bit to zero, + * otherwise we clear by writing a bit to one. + */ + u32 clear_reg; + + /** @event_mask: Bitmask of events to listen for. */ + u32 event_mask; + + /** @clear_mask: Value to write to the clear_reg in order to clear FW IRQs. */ + u32 clear_mask; + } irq; +}; + +/** + * struct pvr_fw_mem - FW memory allocations + */ +struct pvr_fw_mem { + /** @code_obj: Object representing firmware code. */ + struct pvr_fw_object *code_obj; + + /** @data_obj: Object representing firmware data. */ + struct pvr_fw_object *data_obj; + + /** + * @core_code_obj: Object representing firmware core code. May be + * %NULL if firmware does not contain this section. + */ + struct pvr_fw_object *core_code_obj; + + /** + * @core_data_obj: Object representing firmware core data. May be + * %NULL if firmware does not contain this section. + */ + struct pvr_fw_object *core_data_obj; + + /** @code: Driver-side copy of firmware code. */ + u8 *code; + + /** @data: Driver-side copy of firmware data. */ + u8 *data; + + /** + * @core_code: Driver-side copy of firmware core code. May be %NULL if firmware does not + * contain this section. + */ + u8 *core_code; + + /** + * @core_data: Driver-side copy of firmware core data. May be %NULL if firmware does not + * contain this section. + */ + u8 *core_data; + + /** @code_alloc_size: Allocation size of firmware code section. */ + u32 code_alloc_size; + + /** @data_alloc_size: Allocation size of firmware data section. */ + u32 data_alloc_size; + + /** @core_code_alloc_size: Allocation size of firmware core code section. */ + u32 core_code_alloc_size; + + /** @core_data_alloc_size: Allocation size of firmware core data section. */ + u32 core_data_alloc_size; + + /** + * @fwif_connection_ctl_obj: Object representing FWIF connection control + * structure. + */ + struct pvr_fw_object *fwif_connection_ctl_obj; + + /** @osinit_obj: Object representing FW OSINIT structure. */ + struct pvr_fw_object *osinit_obj; + + /** @sysinit_obj: Object representing FW SYSINIT structure. */ + struct pvr_fw_object *sysinit_obj; + + /** @osdata_obj: Object representing FW OSDATA structure. */ + struct pvr_fw_object *osdata_obj; + + /** @hwrinfobuf_obj: Object representing FW hwrinfobuf structure. */ + struct pvr_fw_object *hwrinfobuf_obj; + + /** @sysdata_obj: Object representing FW SYSDATA structure. */ + struct pvr_fw_object *sysdata_obj; + + /** @power_sync_obj: Object representing power sync state. */ + struct pvr_fw_object *power_sync_obj; + + /** @fault_page_obj: Object representing FW fault page. */ + struct pvr_fw_object *fault_page_obj; + + /** @gpu_util_fwcb_obj: Object representing FW GPU utilisation control structure. */ + struct pvr_fw_object *gpu_util_fwcb_obj; + + /** @runtime_cfg_obj: Object representing FW runtime config structure. */ + struct pvr_fw_object *runtime_cfg_obj; + + /** @mmucache_sync_obj: Object used as the sync parameter in an MMU cache operation. */ + struct pvr_fw_object *mmucache_sync_obj; +}; + +struct pvr_fw_device { + /** @firmware: Handle to the firmware loaded into the device. */ + const struct firmware *firmware; + + /** @header: Pointer to firmware header. */ + const struct pvr_fw_info_header *header; + + /** @layout_entries: Pointer to firmware layout. */ + const struct pvr_fw_layout_entry *layout_entries; + + /** @mem: Structure containing objects representing firmware memory allocations. */ + struct pvr_fw_mem mem; + + /** @booted: %true if the firmware has been booted, %false otherwise. */ + bool booted; + + /** + * @processor_type: FW processor type for this device. Must be one of + * %PVR_FW_PROCESSOR_TYPE_*. + */ + u16 processor_type; + + /** @funcs: Function table for the FW processor used by this device. */ + const struct pvr_fw_defs *defs; + + /** @processor_data: Pointer to data specific to FW processor. */ + union { + /** @mips_data: Pointer to MIPS-specific data. */ + struct pvr_fw_mips_data *mips_data; + } processor_data; + + /** @fw_heap_info: Firmware heap information. */ + struct { + /** @gpu_addr: Base address of firmware heap in GPU address space. */ + u64 gpu_addr; + + /** @size: Size of main area of heap. */ + u32 size; + + /** @offset_mask: Mask for offsets within FW heap. */ + u32 offset_mask; + + /** @raw_size: Raw size of heap, including reserved areas. */ + u32 raw_size; + + /** @log2_size: Log2 of raw size of heap. */ + u32 log2_size; + + /** @config_offset: Offset of config area within heap. */ + u32 config_offset; + + /** @reserved_size: Size of reserved area in heap. */ + u32 reserved_size; + } fw_heap_info; + + /** @fw_mm: Firmware address space allocator. */ + struct drm_mm fw_mm; + + /** @fw_mm_lock: Lock protecting access to &fw_mm. */ + spinlock_t fw_mm_lock; + + /** @fw_mm_base: Base address of address space managed by @fw_mm. */ + u64 fw_mm_base; + + /** + * @fwif_connection_ctl: Pointer to CPU mapping of FWIF connection + * control structure. + */ + struct rogue_fwif_connection_ctl *fwif_connection_ctl; + + /** @fwif_sysinit: Pointer to CPU mapping of FW SYSINIT structure. */ + struct rogue_fwif_sysinit *fwif_sysinit; + + /** @fwif_sysdata: Pointer to CPU mapping of FW SYSDATA structure. */ + struct rogue_fwif_sysdata *fwif_sysdata; + + /** @fwif_osinit: Pointer to CPU mapping of FW OSINIT structure. */ + struct rogue_fwif_osinit *fwif_osinit; + + /** @fwif_osdata: Pointer to CPU mapping of FW OSDATA structure. */ + struct rogue_fwif_osdata *fwif_osdata; + + /** @power_sync: Pointer to CPU mapping of power sync state. */ + u32 *power_sync; + + /** @hwrinfobuf: Pointer to CPU mapping of FW HWR info buffer. */ + struct rogue_fwif_hwrinfobuf *hwrinfobuf; + + /** @fw_trace: Device firmware trace buffer state. */ + struct pvr_fw_trace fw_trace; + + /** @fw_objs: Structure tracking FW objects. */ + struct { + /** @list: Head of FW object list. */ + struct list_head list; + + /** @lock: Lock protecting access to FW object list. */ + struct mutex lock; + } fw_objs; +}; + +#define pvr_fw_irq_read_reg(pvr_dev, name) \ + pvr_cr_read32((pvr_dev), (pvr_dev)->fw_dev.defs->irq.name ## _reg) + +#define pvr_fw_irq_write_reg(pvr_dev, name, value) \ + pvr_cr_write32((pvr_dev), (pvr_dev)->fw_dev.defs->irq.name ## _reg, value) + +#define pvr_fw_irq_pending(pvr_dev) \ + (pvr_fw_irq_read_reg(pvr_dev, status) & (pvr_dev)->fw_dev.defs->irq.event_mask) + +#define pvr_fw_irq_clear(pvr_dev) \ + pvr_fw_irq_write_reg(pvr_dev, clear, (pvr_dev)->fw_dev.defs->irq.clear_mask) + +#define pvr_fw_irq_enable(pvr_dev) \ + pvr_fw_irq_write_reg(pvr_dev, enable, (pvr_dev)->fw_dev.defs->irq.event_mask) + +#define pvr_fw_irq_disable(pvr_dev) \ + pvr_fw_irq_write_reg(pvr_dev, enable, 0) + +extern const struct pvr_fw_defs pvr_fw_defs_meta; +extern const struct pvr_fw_defs pvr_fw_defs_mips; + +int pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev); +int pvr_fw_init(struct pvr_device *pvr_dev); +void pvr_fw_fini(struct pvr_device *pvr_dev); + +int pvr_wait_for_fw_boot(struct pvr_device *pvr_dev); + +int +pvr_fw_hard_reset(struct pvr_device *pvr_dev); + +void pvr_fw_mts_schedule(struct pvr_device *pvr_dev, u32 val); + +void +pvr_fw_heap_info_init(struct pvr_device *pvr_dev, u32 log2_size, u32 reserved_size); + +const struct pvr_fw_layout_entry * +pvr_fw_find_layout_entry(struct pvr_device *pvr_dev, enum pvr_fw_section_id id); +int +pvr_fw_find_mmu_segment(struct pvr_device *pvr_dev, u32 addr, u32 size, void *fw_code_ptr, + void *fw_data_ptr, void *fw_core_code_ptr, void *fw_core_data_ptr, + void **host_addr_out); + +int +pvr_fw_structure_cleanup(struct pvr_device *pvr_dev, u32 type, struct pvr_fw_object *fw_obj, + u32 offset); + +int pvr_fw_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags, + void (*init)(void *cpu_ptr, void *priv), void *init_priv, + struct pvr_fw_object **pvr_obj_out); + +void *pvr_fw_object_create_and_map(struct pvr_device *pvr_dev, size_t size, u64 flags, + void (*init)(void *cpu_ptr, void *priv), + void *init_priv, struct pvr_fw_object **pvr_obj_out); + +void * +pvr_fw_object_create_and_map_offset(struct pvr_device *pvr_dev, u32 dev_offset, size_t size, + u64 flags, void (*init)(void *cpu_ptr, void *priv), + void *init_priv, struct pvr_fw_object **pvr_obj_out); + +static __always_inline void * +pvr_fw_object_vmap(struct pvr_fw_object *fw_obj) +{ + return pvr_gem_object_vmap(fw_obj->gem); +} + +static __always_inline void +pvr_fw_object_vunmap(struct pvr_fw_object *fw_obj) +{ + pvr_gem_object_vunmap(fw_obj->gem); +} + +void pvr_fw_object_destroy(struct pvr_fw_object *fw_obj); + +static __always_inline void +pvr_fw_object_unmap_and_destroy(struct pvr_fw_object *fw_obj) +{ + pvr_fw_object_vunmap(fw_obj); + pvr_fw_object_destroy(fw_obj); +} + +/** + * pvr_fw_object_get_dma_addr() - Get DMA address for given offset in firmware + * object. + * @fw_obj: Pointer to object to lookup address in. + * @offset: Offset within object to lookup address at. + * @dma_addr_out: Pointer to location to store DMA address. + * + * Returns: + * * 0 on success, or + * * -%EINVAL if object is not currently backed, or if @offset is out of valid + * range for this object. + */ +static __always_inline int +pvr_fw_object_get_dma_addr(struct pvr_fw_object *fw_obj, u32 offset, dma_addr_t *dma_addr_out) +{ + return pvr_gem_get_dma_addr(fw_obj->gem, offset, dma_addr_out); +} + +void pvr_fw_object_get_fw_addr_offset(struct pvr_fw_object *fw_obj, u32 offset, u32 *fw_addr_out); + +static __always_inline void +pvr_fw_object_get_fw_addr(struct pvr_fw_object *fw_obj, u32 *fw_addr_out) +{ + pvr_fw_object_get_fw_addr_offset(fw_obj, 0, fw_addr_out); +} + +#endif /* PVR_FW_H */ diff --git a/drivers/gpu/drm/imagination/pvr_fw_info.h b/drivers/gpu/drm/imagination/pvr_fw_info.h new file mode 100644 index 0000000000..c363944061 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_fw_info.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_FW_INFO_H +#define PVR_FW_INFO_H + +#include +#include +#include + +/* + * Firmware binary block unit in bytes. + * Raw data stored in FW binary will be aligned to this size. + */ +#define FW_BLOCK_SIZE SZ_4K + +/* Maximum number of entries in firmware layout table. */ +#define PVR_FW_INFO_MAX_NUM_ENTRIES 8 + +enum pvr_fw_section_id { + META_CODE = 0, + META_PRIVATE_DATA, + META_COREMEM_CODE, + META_COREMEM_DATA, + MIPS_CODE, + MIPS_EXCEPTIONS_CODE, + MIPS_BOOT_CODE, + MIPS_PRIVATE_DATA, + MIPS_BOOT_DATA, + MIPS_STACK, + RISCV_UNCACHED_CODE, + RISCV_CACHED_CODE, + RISCV_PRIVATE_DATA, + RISCV_COREMEM_CODE, + RISCV_COREMEM_DATA, +}; + +enum pvr_fw_section_type { + NONE = 0, + FW_CODE, + FW_DATA, + FW_COREMEM_CODE, + FW_COREMEM_DATA, +}; + +/* + * FW binary format with FW info attached: + * + * Contents Offset + * +-----------------+ + * | | 0 + * | | + * | Original binary | + * | file | + * | (.ldr/.elf) | + * | | + * | | + * +-----------------+ + * | Device info | FILE_SIZE - 4K - device_info_size + * +-----------------+ + * | FW info header | FILE_SIZE - 4K + * +-----------------+ + * | | + * | FW layout table | + * | | + * +-----------------+ + * FILE_SIZE + */ + +#define PVR_FW_INFO_VERSION 3 + +#define PVR_FW_FLAGS_OPEN_SOURCE BIT(0) + +/** struct pvr_fw_info_header - Firmware header */ +struct pvr_fw_info_header { + /** @info_version: FW info header version. */ + u32 info_version; + /** @header_len: Header length. */ + u32 header_len; + /** @layout_entry_num: Number of entries in the layout table. */ + u32 layout_entry_num; + /** @layout_entry_size: Size of an entry in the layout table. */ + u32 layout_entry_size; + /** @bvnc: GPU ID supported by firmware. */ + aligned_u64 bvnc; + /** @fw_page_size: Page size of processor on which firmware executes. */ + u32 fw_page_size; + /** @flags: Compatibility flags. */ + u32 flags; + /** @fw_version_major: Firmware major version number. */ + u16 fw_version_major; + /** @fw_version_minor: Firmware minor version number. */ + u16 fw_version_minor; + /** @fw_version_build: Firmware build number. */ + u32 fw_version_build; + /** @device_info_size: Size of device info structure. */ + u32 device_info_size; + /** @padding: Padding. */ + u32 padding; +}; + +/** + * struct pvr_fw_layout_entry - Entry in firmware layout table, describing a + * section of the firmware image + */ +struct pvr_fw_layout_entry { + /** @id: Section ID. */ + enum pvr_fw_section_id id; + /** @type: Section type. */ + enum pvr_fw_section_type type; + /** @base_addr: Base address of section in FW address space. */ + u32 base_addr; + /** @max_size: Maximum size of section, in bytes. */ + u32 max_size; + /** @alloc_size: Allocation size of section, in bytes. */ + u32 alloc_size; + /** @alloc_offset: Allocation offset of section. */ + u32 alloc_offset; +}; + +/** + * struct pvr_fw_device_info_header - Device information header. + */ +struct pvr_fw_device_info_header { + /** @brn_mask_size: BRN mask size (in u64s). */ + u64 brn_mask_size; + /** @ern_mask_size: ERN mask size (in u64s). */ + u64 ern_mask_size; + /** @feature_mask_size: Feature mask size (in u64s). */ + u64 feature_mask_size; + /** @feature_param_size: Feature parameter size (in u64s). */ + u64 feature_param_size; +}; + +#endif /* PVR_FW_INFO_H */ diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c new file mode 100644 index 0000000000..c39beb70c3 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c @@ -0,0 +1,555 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_device.h" +#include "pvr_fw.h" +#include "pvr_fw_info.h" +#include "pvr_fw_meta.h" +#include "pvr_gem.h" +#include "pvr_rogue_cr_defs.h" +#include "pvr_rogue_meta.h" +#include "pvr_vm.h" + +#include +#include +#include +#include +#include + +#define ROGUE_FW_HEAP_META_SHIFT 25 /* 32 MB */ + +#define POLL_TIMEOUT_USEC 1000000 + +/** + * pvr_meta_cr_read32() - Read a META register via the Slave Port + * @pvr_dev: Device pointer. + * @reg_addr: Address of register to read. + * @reg_value_out: Pointer to location to store register value. + * + * Returns: + * * 0 on success, or + * * Any error returned by pvr_cr_poll_reg32(). + */ +int +pvr_meta_cr_read32(struct pvr_device *pvr_dev, u32 reg_addr, u32 *reg_value_out) +{ + int err; + + /* Wait for Slave Port to be Ready. */ + err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_META_SP_MSLVCTRL1, + ROGUE_CR_META_SP_MSLVCTRL1_READY_EN | + ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + ROGUE_CR_META_SP_MSLVCTRL1_READY_EN | + ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + POLL_TIMEOUT_USEC); + if (err) + return err; + + /* Issue a Read. */ + pvr_cr_write32(pvr_dev, ROGUE_CR_META_SP_MSLVCTRL0, + reg_addr | ROGUE_CR_META_SP_MSLVCTRL0_RD_EN); + (void)pvr_cr_read32(pvr_dev, ROGUE_CR_META_SP_MSLVCTRL0); /* Fence write. */ + + /* Wait for Slave Port to be Ready. */ + err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_META_SP_MSLVCTRL1, + ROGUE_CR_META_SP_MSLVCTRL1_READY_EN | + ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + ROGUE_CR_META_SP_MSLVCTRL1_READY_EN | + ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, + POLL_TIMEOUT_USEC); + if (err) + return err; + + *reg_value_out = pvr_cr_read32(pvr_dev, ROGUE_CR_META_SP_MSLVDATAX); + + return 0; +} + +static int +pvr_meta_wrapper_init(struct pvr_device *pvr_dev) +{ + u64 garten_config; + + /* Configure META to Master boot. */ + pvr_cr_write64(pvr_dev, ROGUE_CR_META_BOOT, ROGUE_CR_META_BOOT_MODE_EN); + + /* Set Garten IDLE to META idle and Set the Garten Wrapper BIF Fence address. */ + + /* Garten IDLE bit controlled by META. */ + garten_config = ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META; + + /* The fence addr is set during the fw init sequence. */ + + /* Set PC = 0 for fences. */ + garten_config &= + ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK; + garten_config |= + (u64)MMU_CONTEXT_MAPPING_FWPRIV + << ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT; + + /* Set SLC DM=META. */ + garten_config |= ((u64)ROGUE_FW_SEGMMU_META_BIFDM_ID) + << ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT; + + pvr_cr_write64(pvr_dev, ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG, garten_config); + + return 0; +} + +static __always_inline void +add_boot_arg(u32 **boot_conf, u32 param, u32 data) +{ + *(*boot_conf)++ = param; + *(*boot_conf)++ = data; +} + +static int +meta_ldr_cmd_loadmem(struct drm_device *drm_dev, const u8 *fw, + struct rogue_meta_ldr_l1_data_blk *l1_data, u32 coremem_size, u8 *fw_code_ptr, + u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr, const u32 fw_size) +{ + struct rogue_meta_ldr_l2_data_blk *l2_block = + (struct rogue_meta_ldr_l2_data_blk *)(fw + + l1_data->cmd_data[1]); + struct pvr_device *pvr_dev = to_pvr_device(drm_dev); + u32 offset = l1_data->cmd_data[0]; + u32 data_size; + void *write_addr; + int err; + + /* Verify header is within bounds. */ + if (((u8 *)l2_block - fw) >= fw_size || ((u8 *)(l2_block + 1) - fw) >= fw_size) + return -EINVAL; + + data_size = l2_block->length - 6 /* L2 Tag length and checksum */; + + /* Verify data is within bounds. */ + if (((u8 *)l2_block->block_data - fw) >= fw_size || + ((((u8 *)l2_block->block_data) + data_size) - fw) >= fw_size) + return -EINVAL; + + if (!ROGUE_META_IS_COREMEM_CODE(offset, coremem_size) && + !ROGUE_META_IS_COREMEM_DATA(offset, coremem_size)) { + /* Global range is aliased to local range */ + offset &= ~META_MEM_GLOBAL_RANGE_BIT; + } + + err = pvr_fw_find_mmu_segment(pvr_dev, offset, data_size, fw_code_ptr, fw_data_ptr, + fw_core_code_ptr, fw_core_data_ptr, &write_addr); + if (err) { + drm_err(drm_dev, + "Addr 0x%x (size: %d) not found in any firmware segment", + offset, data_size); + return err; + } + + memcpy(write_addr, l2_block->block_data, data_size); + + return 0; +} + +static int +meta_ldr_cmd_zeromem(struct drm_device *drm_dev, + struct rogue_meta_ldr_l1_data_blk *l1_data, u32 coremem_size, + u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr) +{ + struct pvr_device *pvr_dev = to_pvr_device(drm_dev); + u32 offset = l1_data->cmd_data[0]; + u32 byte_count = l1_data->cmd_data[1]; + void *write_addr; + int err; + + if (ROGUE_META_IS_COREMEM_DATA(offset, coremem_size)) { + /* cannot zero coremem directly */ + return 0; + } + + /* Global range is aliased to local range */ + offset &= ~META_MEM_GLOBAL_RANGE_BIT; + + err = pvr_fw_find_mmu_segment(pvr_dev, offset, byte_count, fw_code_ptr, fw_data_ptr, + fw_core_code_ptr, fw_core_data_ptr, &write_addr); + if (err) { + drm_err(drm_dev, + "Addr 0x%x (size: %d) not found in any firmware segment", + offset, byte_count); + return err; + } + + memset(write_addr, 0, byte_count); + + return 0; +} + +static int +meta_ldr_cmd_config(struct drm_device *drm_dev, const u8 *fw, + struct rogue_meta_ldr_l1_data_blk *l1_data, + const u32 fw_size, u32 **boot_conf_ptr) +{ + struct rogue_meta_ldr_l2_data_blk *l2_block = + (struct rogue_meta_ldr_l2_data_blk *)(fw + + l1_data->cmd_data[0]); + struct rogue_meta_ldr_cfg_blk *config_command; + u32 l2_block_size; + u32 curr_block_size = 0; + u32 *boot_conf = boot_conf_ptr ? *boot_conf_ptr : NULL; + + /* Verify block header is within bounds. */ + if (((u8 *)l2_block - fw) >= fw_size || ((u8 *)(l2_block + 1) - fw) >= fw_size) + return -EINVAL; + + l2_block_size = l2_block->length - 6 /* L2 Tag length and checksum */; + config_command = (struct rogue_meta_ldr_cfg_blk *)l2_block->block_data; + + if (((u8 *)config_command - fw) >= fw_size || + ((((u8 *)config_command) + l2_block_size) - fw) >= fw_size) + return -EINVAL; + + while (l2_block_size >= 12) { + if (config_command->type != ROGUE_META_LDR_CFG_WRITE) + return -EINVAL; + + /* + * Only write to bootloader if we got a valid pointer to the FW + * code allocation. + */ + if (boot_conf) { + u32 register_offset = config_command->block_data[0]; + u32 register_value = config_command->block_data[1]; + + /* Do register write */ + add_boot_arg(&boot_conf, register_offset, + register_value); + } + + curr_block_size = 12; + l2_block_size -= curr_block_size; + config_command = (struct rogue_meta_ldr_cfg_blk + *)((uintptr_t)config_command + + curr_block_size); + } + + if (boot_conf_ptr) + *boot_conf_ptr = boot_conf; + + return 0; +} + +/** + * process_ldr_command_stream() - Process LDR firmware image and populate + * firmware sections + * @pvr_dev: Device pointer. + * @fw: Pointer to firmware image. + * @fw_code_ptr: Pointer to FW code section. + * @fw_data_ptr: Pointer to FW data section. + * @fw_core_code_ptr: Pointer to FW coremem code section. + * @fw_core_data_ptr: Pointer to FW coremem data section. + * @boot_conf_ptr: Pointer to boot config argument pointer. + * + * Returns : + * * 0 on success, or + * * -EINVAL on any error in LDR command stream. + */ +static int +process_ldr_command_stream(struct pvr_device *pvr_dev, const u8 *fw, u8 *fw_code_ptr, + u8 *fw_data_ptr, u8 *fw_core_code_ptr, + u8 *fw_core_data_ptr, u32 **boot_conf_ptr) +{ + struct drm_device *drm_dev = from_pvr_device(pvr_dev); + struct rogue_meta_ldr_block_hdr *ldr_header = + (struct rogue_meta_ldr_block_hdr *)fw; + struct rogue_meta_ldr_l1_data_blk *l1_data = + (struct rogue_meta_ldr_l1_data_blk *)(fw + ldr_header->sl_data); + const u32 fw_size = pvr_dev->fw_dev.firmware->size; + int err; + + u32 *boot_conf = boot_conf_ptr ? *boot_conf_ptr : NULL; + u32 coremem_size; + + err = PVR_FEATURE_VALUE(pvr_dev, meta_coremem_size, &coremem_size); + if (err) + return err; + + coremem_size *= SZ_1K; + + while (l1_data) { + /* Verify block header is within bounds. */ + if (((u8 *)l1_data - fw) >= fw_size || ((u8 *)(l1_data + 1) - fw) >= fw_size) + return -EINVAL; + + if (ROGUE_META_LDR_BLK_IS_COMMENT(l1_data->cmd)) { + /* Don't process comment blocks */ + goto next_block; + } + + switch (l1_data->cmd & ROGUE_META_LDR_CMD_MASK) + case ROGUE_META_LDR_CMD_LOADMEM: { + err = meta_ldr_cmd_loadmem(drm_dev, fw, l1_data, + coremem_size, + fw_code_ptr, fw_data_ptr, + fw_core_code_ptr, + fw_core_data_ptr, fw_size); + if (err) + return err; + break; + + case ROGUE_META_LDR_CMD_START_THREADS: + /* Don't process this block */ + break; + + case ROGUE_META_LDR_CMD_ZEROMEM: + err = meta_ldr_cmd_zeromem(drm_dev, l1_data, + coremem_size, + fw_code_ptr, fw_data_ptr, + fw_core_code_ptr, + fw_core_data_ptr); + if (err) + return err; + break; + + case ROGUE_META_LDR_CMD_CONFIG: + err = meta_ldr_cmd_config(drm_dev, fw, l1_data, fw_size, + &boot_conf); + if (err) + return err; + break; + + default: + return -EINVAL; + } + +next_block: + if (l1_data->next == 0xFFFFFFFF) + break; + + l1_data = (struct rogue_meta_ldr_l1_data_blk *)(fw + + l1_data->next); + } + + if (boot_conf_ptr) + *boot_conf_ptr = boot_conf; + + return 0; +} + +static void +configure_seg_id(u64 seg_out_addr, u32 seg_base, u32 seg_limit, u32 seg_id, + u32 **boot_conf_ptr) +{ + u32 seg_out_addr0 = seg_out_addr & 0x00000000FFFFFFFFUL; + u32 seg_out_addr1 = (seg_out_addr >> 32) & 0x00000000FFFFFFFFUL; + u32 *boot_conf = *boot_conf_ptr; + + /* META segments have a minimum size. */ + u32 limit_off = max(seg_limit, ROGUE_FW_SEGMMU_ALIGN); + + /* The limit is an offset, therefore off = size - 1. */ + limit_off -= 1; + + seg_base |= ROGUE_FW_SEGMMU_ALLTHRS_WRITEABLE; + + add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_BASE(seg_id), seg_base); + add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_LIMIT(seg_id), limit_off); + add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_OUTA0(seg_id), seg_out_addr0); + add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_OUTA1(seg_id), seg_out_addr1); + + *boot_conf_ptr = boot_conf; +} + +static u64 get_fw_obj_gpu_addr(struct pvr_fw_object *fw_obj) +{ + struct pvr_device *pvr_dev = to_pvr_device(gem_from_pvr_gem(fw_obj->gem)->dev); + struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; + + return fw_obj->fw_addr_offset + fw_dev->fw_heap_info.gpu_addr; +} + +static void +configure_seg_mmu(struct pvr_device *pvr_dev, u32 **boot_conf_ptr) +{ + const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries; + u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num; + u64 seg_out_addr_top; + u32 i; + + seg_out_addr_top = + ROGUE_FW_SEGMMU_OUTADDR_TOP_SLC(MMU_CONTEXT_MAPPING_FWPRIV, + ROGUE_FW_SEGMMU_META_BIFDM_ID); + + for (i = 0; i < num_layout_entries; i++) { + /* + * FW code is using the bootloader segment which is already + * configured on boot. FW coremem code and data don't use the + * segment MMU. Only the FW data segment needs to be configured. + */ + if (layout_entries[i].type == FW_DATA) { + u32 seg_id = ROGUE_FW_SEGMMU_DATA_ID; + u64 seg_out_addr = get_fw_obj_gpu_addr(pvr_dev->fw_dev.mem.data_obj); + + seg_out_addr += layout_entries[i].alloc_offset; + seg_out_addr |= seg_out_addr_top; + + /* Write the sequence to the bootldr. */ + configure_seg_id(seg_out_addr, + layout_entries[i].base_addr, + layout_entries[i].alloc_size, seg_id, + boot_conf_ptr); + + break; + } + } +} + +static void +configure_meta_caches(u32 **boot_conf_ptr) +{ + u32 *boot_conf = *boot_conf_ptr; + u32 d_cache_t0, i_cache_t0; + u32 d_cache_t1, i_cache_t1; + u32 d_cache_t2, i_cache_t2; + u32 d_cache_t3, i_cache_t3; + + /* Initialise I/Dcache settings */ + d_cache_t0 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE; + d_cache_t1 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE; + d_cache_t2 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE; + d_cache_t3 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE; + i_cache_t0 = 0; + i_cache_t1 = 0; + i_cache_t2 = 0; + i_cache_t3 = 0; + + d_cache_t0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE; + i_cache_t0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE; + + /* Local region MMU enhanced bypass: WIN-3 mode for code and data caches */ + add_boot_arg(&boot_conf, META_CR_MMCU_LOCAL_EBCTRL, + META_CR_MMCU_LOCAL_EBCTRL_ICWIN | + META_CR_MMCU_LOCAL_EBCTRL_DCWIN); + + /* Data cache partitioning thread 0 to 3 */ + add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(0), d_cache_t0); + add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(1), d_cache_t1); + add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(2), d_cache_t2); + add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(3), d_cache_t3); + + /* Enable data cache hits */ + add_boot_arg(&boot_conf, META_CR_MMCU_DCACHE_CTRL, + META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN); + + /* Instruction cache partitioning thread 0 to 3 */ + add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(0), i_cache_t0); + add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(1), i_cache_t1); + add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(2), i_cache_t2); + add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(3), i_cache_t3); + + /* Enable instruction cache hits */ + add_boot_arg(&boot_conf, META_CR_MMCU_ICACHE_CTRL, + META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN); + + add_boot_arg(&boot_conf, 0x040000C0, 0); + + *boot_conf_ptr = boot_conf; +} + +static int +pvr_meta_fw_process(struct pvr_device *pvr_dev, const u8 *fw, + u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr, + u32 core_code_alloc_size) +{ + struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; + u32 *boot_conf; + int err; + + boot_conf = ((u32 *)fw_code_ptr) + ROGUE_FW_BOOTLDR_CONF_OFFSET; + + /* Slave port and JTAG accesses are privileged. */ + add_boot_arg(&boot_conf, META_CR_SYSC_JTAG_THREAD, + META_CR_SYSC_JTAG_THREAD_PRIV_EN); + + configure_seg_mmu(pvr_dev, &boot_conf); + + /* Populate FW sections from LDR image. */ + err = process_ldr_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr, fw_core_code_ptr, + fw_core_data_ptr, &boot_conf); + if (err) + return err; + + configure_meta_caches(&boot_conf); + + /* End argument list. */ + add_boot_arg(&boot_conf, 0, 0); + + if (fw_dev->mem.core_code_obj) { + u32 core_code_fw_addr; + + pvr_fw_object_get_fw_addr(fw_dev->mem.core_code_obj, &core_code_fw_addr); + add_boot_arg(&boot_conf, core_code_fw_addr, core_code_alloc_size); + } else { + add_boot_arg(&boot_conf, 0, 0); + } + /* None of the cores supported by this driver have META DMA. */ + add_boot_arg(&boot_conf, 0, 0); + + return 0; +} + +static int +pvr_meta_init(struct pvr_device *pvr_dev) +{ + pvr_fw_heap_info_init(pvr_dev, ROGUE_FW_HEAP_META_SHIFT, 0); + + return 0; +} + +static u32 +pvr_meta_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset) +{ + u32 fw_addr = fw_obj->fw_addr_offset + offset + ROGUE_FW_SEGMMU_DATA_BASE_ADDRESS; + + /* META cacheability is determined by address. */ + if (fw_obj->gem->flags & PVR_BO_FW_FLAGS_DEVICE_UNCACHED) + fw_addr |= ROGUE_FW_SEGMMU_DATA_META_UNCACHED | + ROGUE_FW_SEGMMU_DATA_VIVT_SLC_UNCACHED; + + return fw_addr; +} + +static int +pvr_meta_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj) +{ + struct pvr_gem_object *pvr_obj = fw_obj->gem; + + return pvr_vm_map(pvr_dev->kernel_vm_ctx, pvr_obj, 0, fw_obj->fw_mm_node.start, + pvr_gem_object_size(pvr_obj)); +} + +static void +pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj) +{ + pvr_vm_unmap(pvr_dev->kernel_vm_ctx, fw_obj->fw_mm_node.start, + fw_obj->fw_mm_node.size); +} + +static bool +pvr_meta_has_fixed_data_addr(void) +{ + return false; +} + +const struct pvr_fw_defs pvr_fw_defs_meta = { + .init = pvr_meta_init, + .fw_process = pvr_meta_fw_process, + .vm_map = pvr_meta_vm_map, + .vm_unmap = pvr_meta_vm_unmap, + .get_fw_addr_with_offset = pvr_meta_get_fw_addr_with_offset, + .wrapper_init = pvr_meta_wrapper_init, + .has_fixed_data_addr = pvr_meta_has_fixed_data_addr, + .irq = { + .enable_reg = ROGUE_CR_META_SP_MSLVIRQENABLE, + .status_reg = ROGUE_CR_META_SP_MSLVIRQSTATUS, + .clear_reg = ROGUE_CR_META_SP_MSLVIRQSTATUS, + .event_mask = ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN, + .clear_mask = ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK, + }, +}; diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.h b/drivers/gpu/drm/imagination/pvr_fw_meta.h new file mode 100644 index 0000000000..911ad700cb --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_fw_meta.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_FW_META_H +#define PVR_FW_META_H + +#include + +/* Forward declaration from pvr_device.h */ +struct pvr_device; + +int pvr_meta_cr_read32(struct pvr_device *pvr_dev, u32 reg_addr, u32 *reg_value_out); + +#endif /* PVR_FW_META_H */ diff --git a/drivers/gpu/drm/imagination/pvr_fw_mips.c b/drivers/gpu/drm/imagination/pvr_fw_mips.c new file mode 100644 index 0000000000..0bed0257e2 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_fw_mips.c @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_device.h" +#include "pvr_fw.h" +#include "pvr_fw_mips.h" +#include "pvr_gem.h" +#include "pvr_rogue_mips.h" +#include "pvr_vm_mips.h" + +#include +#include +#include + +#define ROGUE_FW_HEAP_MIPS_BASE 0xC0000000 +#define ROGUE_FW_HEAP_MIPS_SHIFT 24 /* 16 MB */ +#define ROGUE_FW_HEAP_MIPS_RESERVED_SIZE SZ_1M + +/** + * process_elf_command_stream() - Process ELF firmware image and populate + * firmware sections + * @pvr_dev: Device pointer. + * @fw: Pointer to firmware image. + * @fw_code_ptr: Pointer to FW code section. + * @fw_data_ptr: Pointer to FW data section. + * @fw_core_code_ptr: Pointer to FW coremem code section. + * @fw_core_data_ptr: Pointer to FW coremem data section. + * + * Returns : + * * 0 on success, or + * * -EINVAL on any error in ELF command stream. + */ +static int +process_elf_command_stream(struct pvr_device *pvr_dev, const u8 *fw, u8 *fw_code_ptr, + u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr) +{ + struct elf32_hdr *header = (struct elf32_hdr *)fw; + struct elf32_phdr *program_header = (struct elf32_phdr *)(fw + header->e_phoff); + struct drm_device *drm_dev = from_pvr_device(pvr_dev); + u32 entry; + int err; + + for (entry = 0; entry < header->e_phnum; entry++, program_header++) { + void *write_addr; + + /* Only consider loadable entries in the ELF segment table */ + if (program_header->p_type != PT_LOAD) + continue; + + err = pvr_fw_find_mmu_segment(pvr_dev, program_header->p_vaddr, + program_header->p_memsz, fw_code_ptr, fw_data_ptr, + fw_core_code_ptr, fw_core_data_ptr, &write_addr); + if (err) { + drm_err(drm_dev, + "Addr 0x%x (size: %d) not found in any firmware segment", + program_header->p_vaddr, program_header->p_memsz); + return err; + } + + /* Write to FW allocation only if available */ + if (write_addr) { + memcpy(write_addr, fw + program_header->p_offset, + program_header->p_filesz); + + memset((u8 *)write_addr + program_header->p_filesz, 0, + program_header->p_memsz - program_header->p_filesz); + } + } + + return 0; +} + +static int +pvr_mips_init(struct pvr_device *pvr_dev) +{ + pvr_fw_heap_info_init(pvr_dev, ROGUE_FW_HEAP_MIPS_SHIFT, ROGUE_FW_HEAP_MIPS_RESERVED_SIZE); + + return pvr_vm_mips_init(pvr_dev); +} + +static void +pvr_mips_fini(struct pvr_device *pvr_dev) +{ + pvr_vm_mips_fini(pvr_dev); +} + +static int +pvr_mips_fw_process(struct pvr_device *pvr_dev, const u8 *fw, + u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr, + u32 core_code_alloc_size) +{ + struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; + struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data; + const struct pvr_fw_layout_entry *boot_code_entry; + const struct pvr_fw_layout_entry *boot_data_entry; + const struct pvr_fw_layout_entry *exception_code_entry; + const struct pvr_fw_layout_entry *stack_entry; + struct rogue_mipsfw_boot_data *boot_data; + dma_addr_t dma_addr; + u32 page_nr; + int err; + + err = process_elf_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr, fw_core_code_ptr, + fw_core_data_ptr); + if (err) + return err; + + boot_code_entry = pvr_fw_find_layout_entry(pvr_dev, MIPS_BOOT_CODE); + boot_data_entry = pvr_fw_find_layout_entry(pvr_dev, MIPS_BOOT_DATA); + exception_code_entry = pvr_fw_find_layout_entry(pvr_dev, MIPS_EXCEPTIONS_CODE); + if (!boot_code_entry || !boot_data_entry || !exception_code_entry) + return -EINVAL; + + WARN_ON(pvr_gem_get_dma_addr(fw_dev->mem.code_obj->gem, boot_code_entry->alloc_offset, + &mips_data->boot_code_dma_addr)); + WARN_ON(pvr_gem_get_dma_addr(fw_dev->mem.data_obj->gem, boot_data_entry->alloc_offset, + &mips_data->boot_data_dma_addr)); + WARN_ON(pvr_gem_get_dma_addr(fw_dev->mem.code_obj->gem, + exception_code_entry->alloc_offset, + &mips_data->exception_code_dma_addr)); + + stack_entry = pvr_fw_find_layout_entry(pvr_dev, MIPS_STACK); + if (!stack_entry) + return -EINVAL; + + boot_data = (struct rogue_mipsfw_boot_data *)(fw_data_ptr + boot_data_entry->alloc_offset + + ROGUE_MIPSFW_BOOTLDR_CONF_OFFSET); + + WARN_ON(pvr_fw_object_get_dma_addr(fw_dev->mem.data_obj, stack_entry->alloc_offset, + &dma_addr)); + boot_data->stack_phys_addr = dma_addr; + + boot_data->reg_base = pvr_dev->regs_resource->start; + + for (page_nr = 0; page_nr < ARRAY_SIZE(boot_data->pt_phys_addr); page_nr++) { + /* Firmware expects 4k pages, but host page size might be different. */ + u32 src_page_nr = (page_nr * ROGUE_MIPSFW_PAGE_SIZE_4K) >> PAGE_SHIFT; + u32 page_offset = (page_nr * ROGUE_MIPSFW_PAGE_SIZE_4K) & ~PAGE_MASK; + + boot_data->pt_phys_addr[page_nr] = mips_data->pt_dma_addr[src_page_nr] + + page_offset; + } + + boot_data->pt_log2_page_size = ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K; + boot_data->pt_num_pages = ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES; + boot_data->reserved1 = 0; + boot_data->reserved2 = 0; + + return 0; +} + +static int +pvr_mips_wrapper_init(struct pvr_device *pvr_dev) +{ + struct pvr_fw_mips_data *mips_data = pvr_dev->fw_dev.processor_data.mips_data; + const u64 remap_settings = ROGUE_MIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE; + u32 phys_bus_width; + + int err = PVR_FEATURE_VALUE(pvr_dev, phys_bus_width, &phys_bus_width); + + if (WARN_ON(err)) + return err; + + /* Currently MIPS FW only supported with physical bus width > 32 bits. */ + if (WARN_ON(phys_bus_width <= 32)) + return -EINVAL; + + pvr_cr_write32(pvr_dev, ROGUE_CR_MIPS_WRAPPER_CONFIG, + (ROGUE_MIPSFW_REGISTERS_VIRTUAL_BASE >> + ROGUE_MIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN) | + ROGUE_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS); + + /* Configure remap for boot code, boot data and exceptions code areas. */ + pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1, + ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN | + ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN); + pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2, + (mips_data->boot_code_dma_addr & + ~ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK) | remap_settings); + + if (PVR_HAS_QUIRK(pvr_dev, 63553)) { + /* + * WA always required on 36 bit cores, to avoid continuous unmapped memory accesses + * to address 0x0. + */ + WARN_ON(phys_bus_width != 36); + + pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1, + ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN); + pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2, + (mips_data->boot_code_dma_addr & + ~ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK) | + remap_settings); + } + + pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1, + ROGUE_MIPSFW_DATA_REMAP_PHYS_ADDR_IN | + ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN); + pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2, + (mips_data->boot_data_dma_addr & + ~ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK) | remap_settings); + + pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1, + ROGUE_MIPSFW_CODE_REMAP_PHYS_ADDR_IN | + ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN); + pvr_cr_write64(pvr_dev, ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2, + (mips_data->exception_code_dma_addr & + ~ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK) | remap_settings); + + /* Garten IDLE bit controlled by MIPS. */ + pvr_cr_write64(pvr_dev, ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG, + ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META); + + /* Turn on the EJTAG probe. */ + pvr_cr_write32(pvr_dev, ROGUE_CR_MIPS_DEBUG_CONFIG, 0); + + return 0; +} + +static u32 +pvr_mips_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset) +{ + struct pvr_device *pvr_dev = to_pvr_device(gem_from_pvr_gem(fw_obj->gem)->dev); + + /* MIPS cacheability is determined by page table. */ + return ((fw_obj->fw_addr_offset + offset) & pvr_dev->fw_dev.fw_heap_info.offset_mask) | + ROGUE_FW_HEAP_MIPS_BASE; +} + +static bool +pvr_mips_has_fixed_data_addr(void) +{ + return true; +} + +const struct pvr_fw_defs pvr_fw_defs_mips = { + .init = pvr_mips_init, + .fini = pvr_mips_fini, + .fw_process = pvr_mips_fw_process, + .vm_map = pvr_vm_mips_map, + .vm_unmap = pvr_vm_mips_unmap, + .get_fw_addr_with_offset = pvr_mips_get_fw_addr_with_offset, + .wrapper_init = pvr_mips_wrapper_init, + .has_fixed_data_addr = pvr_mips_has_fixed_data_addr, + .irq = { + .enable_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE, + .status_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS, + .clear_reg = ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR, + .event_mask = ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN, + .clear_mask = ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN, + }, +}; diff --git a/drivers/gpu/drm/imagination/pvr_fw_mips.h b/drivers/gpu/drm/imagination/pvr_fw_mips.h new file mode 100644 index 0000000000..408dbe63a9 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_fw_mips.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_FW_MIPS_H +#define PVR_FW_MIPS_H + +#include "pvr_rogue_mips.h" + +#include +#include + +/* Forward declaration from pvr_gem.h. */ +struct pvr_gem_object; + +#define PVR_MIPS_PT_PAGE_COUNT ((ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * ROGUE_MIPSFW_PAGE_SIZE_4K) \ + >> PAGE_SHIFT) +/** + * struct pvr_fw_mips_data - MIPS-specific data + */ +struct pvr_fw_mips_data { + /** + * @pt_pages: Pages containing MIPS pagetable. + */ + struct page *pt_pages[PVR_MIPS_PT_PAGE_COUNT]; + + /** @pt: Pointer to CPU mapping of MIPS pagetable. */ + u32 *pt; + + /** @pt_dma_addr: DMA mappings of MIPS pagetable. */ + dma_addr_t pt_dma_addr[PVR_MIPS_PT_PAGE_COUNT]; + + /** @boot_code_dma_addr: DMA address of MIPS boot code. */ + dma_addr_t boot_code_dma_addr; + + /** @boot_data_dma_addr: DMA address of MIPS boot data. */ + dma_addr_t boot_data_dma_addr; + + /** @exception_code_dma_addr: DMA address of MIPS exception code. */ + dma_addr_t exception_code_dma_addr; + + /** @cache_policy: Cache policy for this processor. */ + u32 cache_policy; + + /** @pfn_mask: PFN mask for MIPS pagetable. */ + u32 pfn_mask; +}; + +#endif /* PVR_FW_MIPS_H */ diff --git a/drivers/gpu/drm/imagination/pvr_fw_startstop.c b/drivers/gpu/drm/imagination/pvr_fw_startstop.c new file mode 100644 index 0000000000..36cec227cf --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_fw_startstop.c @@ -0,0 +1,306 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_device.h" +#include "pvr_fw.h" +#include "pvr_fw_meta.h" +#include "pvr_fw_startstop.h" +#include "pvr_rogue_cr_defs.h" +#include "pvr_rogue_meta.h" +#include "pvr_vm.h" + +#include +#include +#include +#include + +#define POLL_TIMEOUT_USEC 1000000 + +static void +rogue_axi_ace_list_init(struct pvr_device *pvr_dev) +{ + /* Setup AXI-ACE config. Set everything to outer cache. */ + u64 reg_val = + (3U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT) | + (3U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT) | + (2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT) | + (2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT) | + (2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT) | + (2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT) | + (2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT) | + (2U << ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT); + + pvr_cr_write64(pvr_dev, ROGUE_CR_AXI_ACE_LITE_CONFIGURATION, reg_val); +} + +static void +rogue_bif_init(struct pvr_device *pvr_dev) +{ + dma_addr_t pc_dma_addr; + u64 pc_addr; + + /* Acquire the address of the Kernel Page Catalogue. */ + pc_dma_addr = pvr_vm_get_page_table_root_addr(pvr_dev->kernel_vm_ctx); + + /* Write the kernel catalogue base. */ + pc_addr = ((((u64)pc_dma_addr >> ROGUE_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT) + << ROGUE_CR_BIF_CAT_BASE0_ADDR_SHIFT) & + ~ROGUE_CR_BIF_CAT_BASE0_ADDR_CLRMSK); + + pvr_cr_write64(pvr_dev, BIF_CAT_BASEX(MMU_CONTEXT_MAPPING_FWPRIV), + pc_addr); +} + +static int +rogue_slc_init(struct pvr_device *pvr_dev) +{ + u16 slc_cache_line_size_bits; + u32 reg_val; + int err; + + /* + * SLC Misc control. + * + * Note: This is a 64bit register and we set only the lower 32bits + * leaving the top 32bits (ROGUE_CR_SLC_CTRL_MISC_SCRAMBLE_BITS) + * unchanged from the HW default. + */ + reg_val = (pvr_cr_read32(pvr_dev, ROGUE_CR_SLC_CTRL_MISC) & + ROGUE_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN) | + ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1; + + err = PVR_FEATURE_VALUE(pvr_dev, slc_cache_line_size_bits, &slc_cache_line_size_bits); + if (err) + return err; + + /* Bypass burst combiner if SLC line size is smaller than 1024 bits. */ + if (slc_cache_line_size_bits < 1024) + reg_val |= ROGUE_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN; + + if (PVR_HAS_QUIRK(pvr_dev, 71242) && !PVR_HAS_FEATURE(pvr_dev, gpu_multicore_support)) + reg_val |= ROGUE_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_EN; + + pvr_cr_write32(pvr_dev, ROGUE_CR_SLC_CTRL_MISC, reg_val); + + return 0; +} + +/** + * pvr_fw_start() - Start FW processor and boot firmware + * @pvr_dev: Target PowerVR device. + * + * Returns: + * * 0 on success, or + * * Any error returned by rogue_slc_init(). + */ +int +pvr_fw_start(struct pvr_device *pvr_dev) +{ + bool has_reset2 = PVR_HAS_FEATURE(pvr_dev, xe_tpu2); + u64 soft_reset_mask; + int err; + + if (PVR_HAS_FEATURE(pvr_dev, pbe2_in_xe)) + soft_reset_mask = ROGUE_CR_SOFT_RESET__PBE2_XE__MASKFULL; + else + soft_reset_mask = ROGUE_CR_SOFT_RESET_MASKFULL; + + if (PVR_HAS_FEATURE(pvr_dev, sys_bus_secure_reset)) { + /* + * Disable the default sys_bus_secure protection to perform + * minimal setup. + */ + pvr_cr_write32(pvr_dev, ROGUE_CR_SYS_BUS_SECURE, 0); + (void)pvr_cr_read32(pvr_dev, ROGUE_CR_SYS_BUS_SECURE); /* Fence write */ + } + + /* Set Rogue in soft-reset. */ + pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, soft_reset_mask); + if (has_reset2) + pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET2, ROGUE_CR_SOFT_RESET2_MASKFULL); + + /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline. */ + (void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET); + if (has_reset2) + (void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET2); + + /* Take Rascal and Dust out of reset. */ + pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, + soft_reset_mask ^ ROGUE_CR_SOFT_RESET_RASCALDUSTS_EN); + if (has_reset2) + pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET2, 0); + + (void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET); + if (has_reset2) + (void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET2); + + /* Take everything out of reset but the FW processor. */ + pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, ROGUE_CR_SOFT_RESET_GARTEN_EN); + if (has_reset2) + pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET2, 0); + + (void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET); + if (has_reset2) + (void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET2); + + err = rogue_slc_init(pvr_dev); + if (err) + goto err_reset; + + /* Initialise Firmware wrapper. */ + pvr_dev->fw_dev.defs->wrapper_init(pvr_dev); + + /* We must init the AXI-ACE interface before first BIF transaction. */ + rogue_axi_ace_list_init(pvr_dev); + + if (pvr_dev->fw_dev.processor_type != PVR_FW_PROCESSOR_TYPE_MIPS) { + /* Initialise BIF. */ + rogue_bif_init(pvr_dev); + } + + /* Need to wait for at least 16 cycles before taking the FW processor out of reset ... */ + udelay(3); + + pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, 0x0); + (void)pvr_cr_read64(pvr_dev, ROGUE_CR_SOFT_RESET); + + /* ... and afterwards. */ + udelay(3); + + return 0; + +err_reset: + /* Put everything back into soft-reset. */ + pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, soft_reset_mask); + + return err; +} + +/** + * pvr_fw_stop() - Stop FW processor + * @pvr_dev: Target PowerVR device. + * + * Returns: + * * 0 on success, or + * * Any error returned by pvr_cr_poll_reg32(). + */ +int +pvr_fw_stop(struct pvr_device *pvr_dev) +{ + const u32 sidekick_idle_mask = ROGUE_CR_SIDEKICK_IDLE_MASKFULL & + ~(ROGUE_CR_SIDEKICK_IDLE_GARTEN_EN | + ROGUE_CR_SIDEKICK_IDLE_SOCIF_EN | + ROGUE_CR_SIDEKICK_IDLE_HOSTIF_EN); + bool skip_garten_idle = false; + u32 reg_value; + int err; + + /* + * Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper. + * For cores with the LAYOUT_MARS feature, SIDEKICK would have been + * powered down by the FW. + */ + err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_SIDEKICK_IDLE, sidekick_idle_mask, + sidekick_idle_mask, POLL_TIMEOUT_USEC); + if (err) + return err; + + /* Unset MTS DM association with threads. */ + pvr_cr_write32(pvr_dev, ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC, + ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL & + ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK); + pvr_cr_write32(pvr_dev, ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC, + ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL & + ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK); + pvr_cr_write32(pvr_dev, ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC, + ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL & + ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK); + pvr_cr_write32(pvr_dev, ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC, + ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL & + ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK); + + /* Extra Idle checks. */ + err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_BIF_STATUS_MMU, 0, + ROGUE_CR_BIF_STATUS_MMU_MASKFULL, + POLL_TIMEOUT_USEC); + if (err) + return err; + + err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_BIFPM_STATUS_MMU, 0, + ROGUE_CR_BIFPM_STATUS_MMU_MASKFULL, + POLL_TIMEOUT_USEC); + if (err) + return err; + + if (!PVR_HAS_FEATURE(pvr_dev, xt_top_infrastructure)) { + err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_BIF_READS_EXT_STATUS, 0, + ROGUE_CR_BIF_READS_EXT_STATUS_MASKFULL, + POLL_TIMEOUT_USEC); + if (err) + return err; + } + + err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_BIFPM_READS_EXT_STATUS, 0, + ROGUE_CR_BIFPM_READS_EXT_STATUS_MASKFULL, + POLL_TIMEOUT_USEC); + if (err) + return err; + + err = pvr_cr_poll_reg64(pvr_dev, ROGUE_CR_SLC_STATUS1, 0, + ROGUE_CR_SLC_STATUS1_MASKFULL, + POLL_TIMEOUT_USEC); + if (err) + return err; + + /* + * Wait for SLC to signal IDLE. + * For cores with the LAYOUT_MARS feature, SLC would have been powered + * down by the FW. + */ + err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_SLC_IDLE, + ROGUE_CR_SLC_IDLE_MASKFULL, + ROGUE_CR_SLC_IDLE_MASKFULL, POLL_TIMEOUT_USEC); + if (err) + return err; + + /* + * Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper. + * For cores with the LAYOUT_MARS feature, SIDEKICK would have been powered + * down by the FW. + */ + err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_SIDEKICK_IDLE, sidekick_idle_mask, + sidekick_idle_mask, POLL_TIMEOUT_USEC); + if (err) + return err; + + if (pvr_dev->fw_dev.processor_type == PVR_FW_PROCESSOR_TYPE_META) { + err = pvr_meta_cr_read32(pvr_dev, META_CR_TxVECINT_BHALT, ®_value); + if (err) + return err; + + /* + * Wait for Sidekick/Jones to signal IDLE including the Garten + * Wrapper if there is no debugger attached (TxVECINT_BHALT = + * 0x0). + */ + if (reg_value) + skip_garten_idle = true; + } + + if (!skip_garten_idle) { + err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_SIDEKICK_IDLE, + ROGUE_CR_SIDEKICK_IDLE_GARTEN_EN, + ROGUE_CR_SIDEKICK_IDLE_GARTEN_EN, + POLL_TIMEOUT_USEC); + if (err) + return err; + } + + if (PVR_HAS_FEATURE(pvr_dev, pbe2_in_xe)) + pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, + ROGUE_CR_SOFT_RESET__PBE2_XE__MASKFULL); + else + pvr_cr_write64(pvr_dev, ROGUE_CR_SOFT_RESET, ROGUE_CR_SOFT_RESET_MASKFULL); + + return 0; +} diff --git a/drivers/gpu/drm/imagination/pvr_fw_startstop.h b/drivers/gpu/drm/imagination/pvr_fw_startstop.h new file mode 100644 index 0000000000..a3cef061bd --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_fw_startstop.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_FW_STARTSTOP_H +#define PVR_FW_STARTSTOP_H + +/* Forward declaration from pvr_device.h. */ +struct pvr_device; + +int pvr_fw_start(struct pvr_device *pvr_dev); +int pvr_fw_stop(struct pvr_device *pvr_dev); + +#endif /* PVR_FW_STARTSTOP_H */ diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c new file mode 100644 index 0000000000..31199e45b7 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c @@ -0,0 +1,471 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_device.h" +#include "pvr_gem.h" +#include "pvr_rogue_fwif.h" +#include "pvr_rogue_fwif_sf.h" +#include "pvr_fw_trace.h" + +#include +#include + +#include +#include +#include +#include + +static void +tracebuf_ctrl_init(void *cpu_ptr, void *priv) +{ + struct rogue_fwif_tracebuf *tracebuf_ctrl = cpu_ptr; + struct pvr_fw_trace *fw_trace = priv; + u32 thread_nr; + + tracebuf_ctrl->tracebuf_size_in_dwords = ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS; + tracebuf_ctrl->tracebuf_flags = 0; + + if (fw_trace->group_mask) + tracebuf_ctrl->log_type = fw_trace->group_mask | ROGUE_FWIF_LOG_TYPE_TRACE; + else + tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_NONE; + + for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) { + struct rogue_fwif_tracebuf_space *tracebuf_space = + &tracebuf_ctrl->tracebuf[thread_nr]; + struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr]; + + pvr_fw_object_get_fw_addr(trace_buffer->buf_obj, + &tracebuf_space->trace_buffer_fw_addr); + + tracebuf_space->trace_buffer = trace_buffer->buf; + tracebuf_space->trace_pointer = 0; + } +} + +int pvr_fw_trace_init(struct pvr_device *pvr_dev) +{ + struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace; + struct drm_device *drm_dev = from_pvr_device(pvr_dev); + u32 thread_nr; + int err; + + for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) { + struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr]; + + trace_buffer->buf = + pvr_fw_object_create_and_map(pvr_dev, + ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS * + sizeof(*trace_buffer->buf), + PVR_BO_FW_FLAGS_DEVICE_UNCACHED | + PVR_BO_FW_NO_CLEAR_ON_RESET, + NULL, NULL, &trace_buffer->buf_obj); + if (IS_ERR(trace_buffer->buf)) { + drm_err(drm_dev, "Unable to allocate trace buffer\n"); + err = PTR_ERR(trace_buffer->buf); + trace_buffer->buf = NULL; + goto err_free_buf; + } + } + + /* TODO: Provide control of group mask. */ + fw_trace->group_mask = 0; + + fw_trace->tracebuf_ctrl = + pvr_fw_object_create_and_map(pvr_dev, + sizeof(*fw_trace->tracebuf_ctrl), + PVR_BO_FW_FLAGS_DEVICE_UNCACHED | + PVR_BO_FW_NO_CLEAR_ON_RESET, + tracebuf_ctrl_init, fw_trace, + &fw_trace->tracebuf_ctrl_obj); + if (IS_ERR(fw_trace->tracebuf_ctrl)) { + drm_err(drm_dev, "Unable to allocate trace buffer control structure\n"); + err = PTR_ERR(fw_trace->tracebuf_ctrl); + goto err_free_buf; + } + + BUILD_BUG_ON(ARRAY_SIZE(fw_trace->tracebuf_ctrl->tracebuf) != + ARRAY_SIZE(fw_trace->buffers)); + + for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) { + struct rogue_fwif_tracebuf_space *tracebuf_space = + &fw_trace->tracebuf_ctrl->tracebuf[thread_nr]; + struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr]; + + trace_buffer->tracebuf_space = tracebuf_space; + } + + return 0; + +err_free_buf: + for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) { + struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr]; + + if (trace_buffer->buf) + pvr_fw_object_unmap_and_destroy(trace_buffer->buf_obj); + } + + return err; +} + +void pvr_fw_trace_fini(struct pvr_device *pvr_dev) +{ + struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace; + u32 thread_nr; + + for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); thread_nr++) { + struct pvr_fw_trace_buffer *trace_buffer = &fw_trace->buffers[thread_nr]; + + pvr_fw_object_unmap_and_destroy(trace_buffer->buf_obj); + } + pvr_fw_object_unmap_and_destroy(fw_trace->tracebuf_ctrl_obj); +} + +#if defined(CONFIG_DEBUG_FS) + +/** + * update_logtype() - Send KCCB command to trigger FW to update logtype + * @pvr_dev: Target PowerVR device + * @group_mask: New log group mask. + * + * Returns: + * * 0 on success, + * * Any error returned by pvr_kccb_send_cmd(), or + * * -%EIO if the device is lost. + */ +static int +update_logtype(struct pvr_device *pvr_dev, u32 group_mask) +{ + struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace; + struct rogue_fwif_kccb_cmd cmd; + int idx; + int err; + + if (group_mask) + fw_trace->tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_TRACE | group_mask; + else + fw_trace->tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_NONE; + + fw_trace->group_mask = group_mask; + + down_read(&pvr_dev->reset_sem); + if (!drm_dev_enter(from_pvr_device(pvr_dev), &idx)) { + err = -EIO; + goto err_up_read; + } + + cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_LOGTYPE_UPDATE; + cmd.kccb_flags = 0; + + err = pvr_kccb_send_cmd(pvr_dev, &cmd, NULL); + + drm_dev_exit(idx); + +err_up_read: + up_read(&pvr_dev->reset_sem); + + return err; +} + +struct pvr_fw_trace_seq_data { + /** @buffer: Pointer to copy of trace data. */ + u32 *buffer; + + /** @start_offset: Starting offset in trace data, as reported by FW. */ + u32 start_offset; + + /** @idx: Current index into trace data. */ + u32 idx; + + /** @assert_buf: Trace assert buffer, as reported by FW. */ + struct rogue_fwif_file_info_buf assert_buf; +}; + +static u32 find_sfid(u32 id) +{ + u32 i; + + for (i = 0; i < ARRAY_SIZE(stid_fmts); i++) { + if (stid_fmts[i].id == id) + return i; + } + + return ROGUE_FW_SF_LAST; +} + +static u32 read_fw_trace(struct pvr_fw_trace_seq_data *trace_seq_data, u32 offset) +{ + u32 idx; + + idx = trace_seq_data->idx + offset; + if (idx >= ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) + return 0; + + idx = (idx + trace_seq_data->start_offset) % ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS; + return trace_seq_data->buffer[idx]; +} + +/** + * fw_trace_get_next() - Advance trace index to next entry + * @trace_seq_data: Trace sequence data. + * + * Returns: + * * %true if trace index is now pointing to a valid entry, or + * * %false if trace index is pointing to an invalid entry, or has hit the end + * of the trace. + */ +static bool fw_trace_get_next(struct pvr_fw_trace_seq_data *trace_seq_data) +{ + u32 id, sf_id; + + while (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) { + id = read_fw_trace(trace_seq_data, 0); + trace_seq_data->idx++; + if (!ROGUE_FW_LOG_VALIDID(id)) + continue; + if (id == ROGUE_FW_SF_MAIN_ASSERT_FAILED) { + /* Assertion failure marks the end of the trace. */ + return false; + } + + sf_id = find_sfid(id); + if (sf_id == ROGUE_FW_SF_FIRST) + continue; + if (sf_id == ROGUE_FW_SF_LAST) { + /* + * Could not match with an ID in the SF table, trace is + * most likely corrupt from this point. + */ + return false; + } + + /* Skip over the timestamp, and any parameters. */ + trace_seq_data->idx += 2 + ROGUE_FW_SF_PARAMNUM(id); + + /* Ensure index is now pointing to a valid trace entry. */ + id = read_fw_trace(trace_seq_data, 0); + if (!ROGUE_FW_LOG_VALIDID(id)) + continue; + + return true; + } + + /* Hit end of trace data. */ + return false; +} + +/** + * fw_trace_get_first() - Find first valid entry in trace + * @trace_seq_data: Trace sequence data. + * + * Skips over invalid (usually zero) and ROGUE_FW_SF_FIRST entries. + * + * If the trace has no valid entries, this function will exit with the trace + * index pointing to the end of the trace. trace_seq_show() will return an error + * in this state. + */ +static void fw_trace_get_first(struct pvr_fw_trace_seq_data *trace_seq_data) +{ + trace_seq_data->idx = 0; + + while (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) { + u32 id = read_fw_trace(trace_seq_data, 0); + + if (ROGUE_FW_LOG_VALIDID(id)) { + u32 sf_id = find_sfid(id); + + if (sf_id != ROGUE_FW_SF_FIRST) + break; + } + trace_seq_data->idx++; + } +} + +static void *fw_trace_seq_start(struct seq_file *s, loff_t *pos) +{ + struct pvr_fw_trace_seq_data *trace_seq_data = s->private; + u32 i; + + /* Reset trace index, then advance to *pos. */ + fw_trace_get_first(trace_seq_data); + + for (i = 0; i < *pos; i++) { + if (!fw_trace_get_next(trace_seq_data)) + return NULL; + } + + return (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) ? pos : NULL; +} + +static void *fw_trace_seq_next(struct seq_file *s, void *v, loff_t *pos) +{ + struct pvr_fw_trace_seq_data *trace_seq_data = s->private; + + (*pos)++; + if (!fw_trace_get_next(trace_seq_data)) + return NULL; + + return (trace_seq_data->idx < ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) ? pos : NULL; +} + +static void fw_trace_seq_stop(struct seq_file *s, void *v) +{ +} + +static int fw_trace_seq_show(struct seq_file *s, void *v) +{ + struct pvr_fw_trace_seq_data *trace_seq_data = s->private; + u64 timestamp; + u32 id; + u32 sf_id; + + if (trace_seq_data->idx >= ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS) + return -EINVAL; + + id = read_fw_trace(trace_seq_data, 0); + /* Index is not pointing at a valid entry. */ + if (!ROGUE_FW_LOG_VALIDID(id)) + return -EINVAL; + + sf_id = find_sfid(id); + /* Index is not pointing at a valid entry. */ + if (sf_id == ROGUE_FW_SF_LAST) + return -EINVAL; + + timestamp = read_fw_trace(trace_seq_data, 1) | + ((u64)read_fw_trace(trace_seq_data, 2) << 32); + timestamp = (timestamp & ~ROGUE_FWT_TIMESTAMP_TIME_CLRMSK) >> + ROGUE_FWT_TIMESTAMP_TIME_SHIFT; + + seq_printf(s, "[%llu] : ", timestamp); + if (id == ROGUE_FW_SF_MAIN_ASSERT_FAILED) { + seq_printf(s, "ASSERTION %s failed at %s:%u", + trace_seq_data->assert_buf.info, + trace_seq_data->assert_buf.path, + trace_seq_data->assert_buf.line_num); + } else { + seq_printf(s, stid_fmts[sf_id].name, + read_fw_trace(trace_seq_data, 3), + read_fw_trace(trace_seq_data, 4), + read_fw_trace(trace_seq_data, 5), + read_fw_trace(trace_seq_data, 6), + read_fw_trace(trace_seq_data, 7), + read_fw_trace(trace_seq_data, 8), + read_fw_trace(trace_seq_data, 9), + read_fw_trace(trace_seq_data, 10), + read_fw_trace(trace_seq_data, 11), + read_fw_trace(trace_seq_data, 12), + read_fw_trace(trace_seq_data, 13), + read_fw_trace(trace_seq_data, 14), + read_fw_trace(trace_seq_data, 15), + read_fw_trace(trace_seq_data, 16), + read_fw_trace(trace_seq_data, 17), + read_fw_trace(trace_seq_data, 18), + read_fw_trace(trace_seq_data, 19), + read_fw_trace(trace_seq_data, 20), + read_fw_trace(trace_seq_data, 21), + read_fw_trace(trace_seq_data, 22)); + } + seq_puts(s, "\n"); + return 0; +} + +static const struct seq_operations pvr_fw_trace_seq_ops = { + .start = fw_trace_seq_start, + .next = fw_trace_seq_next, + .stop = fw_trace_seq_stop, + .show = fw_trace_seq_show +}; + +static int fw_trace_open(struct inode *inode, struct file *file) +{ + struct pvr_fw_trace_buffer *trace_buffer = inode->i_private; + struct rogue_fwif_tracebuf_space *tracebuf_space = + trace_buffer->tracebuf_space; + struct pvr_fw_trace_seq_data *trace_seq_data; + int err; + + trace_seq_data = kzalloc(sizeof(*trace_seq_data), GFP_KERNEL); + if (!trace_seq_data) + return -ENOMEM; + + trace_seq_data->buffer = kcalloc(ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS, + sizeof(*trace_seq_data->buffer), GFP_KERNEL); + if (!trace_seq_data->buffer) { + err = -ENOMEM; + goto err_free_data; + } + + /* + * Take a local copy of the trace buffer, as firmware may still be + * writing to it. This will exist as long as this file is open. + */ + memcpy(trace_seq_data->buffer, trace_buffer->buf, + ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS * sizeof(u32)); + trace_seq_data->start_offset = READ_ONCE(tracebuf_space->trace_pointer); + trace_seq_data->assert_buf = tracebuf_space->assert_buf; + fw_trace_get_first(trace_seq_data); + + err = seq_open(file, &pvr_fw_trace_seq_ops); + if (err) + goto err_free_buffer; + + ((struct seq_file *)file->private_data)->private = trace_seq_data; + + return 0; + +err_free_buffer: + kfree(trace_seq_data->buffer); + +err_free_data: + kfree(trace_seq_data); + + return err; +} + +static int fw_trace_release(struct inode *inode, struct file *file) +{ + struct pvr_fw_trace_seq_data *trace_seq_data = + ((struct seq_file *)file->private_data)->private; + + seq_release(inode, file); + kfree(trace_seq_data->buffer); + kfree(trace_seq_data); + + return 0; +} + +static const struct file_operations pvr_fw_trace_fops = { + .owner = THIS_MODULE, + .open = fw_trace_open, + .read = seq_read, + .llseek = seq_lseek, + .release = fw_trace_release, +}; + +void +pvr_fw_trace_mask_update(struct pvr_device *pvr_dev, u32 old_mask, u32 new_mask) +{ + if (old_mask != new_mask) + update_logtype(pvr_dev, new_mask); +} + +void +pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir) +{ + struct pvr_fw_trace *fw_trace = &pvr_dev->fw_dev.fw_trace; + u32 thread_nr; + + static_assert(ARRAY_SIZE(fw_trace->buffers) <= 10, + "The filename buffer is only large enough for a single-digit thread count"); + + for (thread_nr = 0; thread_nr < ARRAY_SIZE(fw_trace->buffers); ++thread_nr) { + char filename[8]; + + snprintf(filename, ARRAY_SIZE(filename), "trace_%u", thread_nr); + debugfs_create_file(filename, 0400, dir, + &fw_trace->buffers[thread_nr], + &pvr_fw_trace_fops); + } +} +#endif diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.h b/drivers/gpu/drm/imagination/pvr_fw_trace.h new file mode 100644 index 0000000000..0074d2b18d --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_fw_trace.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_FW_TRACE_H +#define PVR_FW_TRACE_H + +#include +#include + +#include "pvr_rogue_fwif.h" + +/* Forward declaration from pvr_device.h. */ +struct pvr_device; + +/* Forward declaration from pvr_gem.h. */ +struct pvr_fw_object; + +/* Forward declarations from pvr_rogue_fwif.h */ +struct rogue_fwif_tracebuf; +struct rogue_fwif_tracebuf_space; + +/** + * struct pvr_fw_trace_buffer - Structure representing a trace buffer + */ +struct pvr_fw_trace_buffer { + /** @buf_obj: FW buffer object representing trace buffer. */ + struct pvr_fw_object *buf_obj; + + /** @buf: Pointer to CPU mapping of trace buffer. */ + u32 *buf; + + /** + * @tracebuf_space: Pointer to FW tracebuf_space structure for this + * trace buffer. + */ + struct rogue_fwif_tracebuf_space *tracebuf_space; +}; + +/** + * struct pvr_fw_trace - Device firmware trace data + */ +struct pvr_fw_trace { + /** + * @tracebuf_ctrl_obj: Object representing FW trace buffer control + * structure. + */ + struct pvr_fw_object *tracebuf_ctrl_obj; + + /** + * @tracebuf_ctrl: Pointer to CPU mapping of FW trace buffer control + * structure. + */ + struct rogue_fwif_tracebuf *tracebuf_ctrl; + + /** + * @buffers: Array representing the actual trace buffers owned by this + * device. + */ + struct pvr_fw_trace_buffer buffers[ROGUE_FW_THREAD_MAX]; + + /** @group_mask: Mask of enabled trace groups. */ + u32 group_mask; +}; + +int pvr_fw_trace_init(struct pvr_device *pvr_dev); +void pvr_fw_trace_fini(struct pvr_device *pvr_dev); + +#if defined(CONFIG_DEBUG_FS) +/* Forward declaration from . */ +struct dentry; + +void pvr_fw_trace_mask_update(struct pvr_device *pvr_dev, u32 old_mask, + u32 new_mask); + +void pvr_fw_trace_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir); +#endif /* defined(CONFIG_DEBUG_FS) */ + +#endif /* PVR_FW_TRACE_H */ diff --git a/drivers/gpu/drm/imagination/pvr_gem.c b/drivers/gpu/drm/imagination/pvr_gem.c new file mode 100644 index 0000000000..6a8c81fe8c --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_gem.c @@ -0,0 +1,414 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_device.h" +#include "pvr_gem.h" +#include "pvr_vm.h" + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void pvr_gem_object_free(struct drm_gem_object *obj) +{ + drm_gem_shmem_object_free(obj); +} + +static int pvr_gem_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *vma) +{ + struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(gem_obj); + struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj); + + if (!(pvr_obj->flags & DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS)) + return -EINVAL; + + return drm_gem_shmem_mmap(shmem_obj, vma); +} + +static const struct drm_gem_object_funcs pvr_gem_object_funcs = { + .free = pvr_gem_object_free, + .print_info = drm_gem_shmem_object_print_info, + .pin = drm_gem_shmem_object_pin, + .unpin = drm_gem_shmem_object_unpin, + .get_sg_table = drm_gem_shmem_object_get_sg_table, + .vmap = drm_gem_shmem_object_vmap, + .vunmap = drm_gem_shmem_object_vunmap, + .mmap = pvr_gem_mmap, + .vm_ops = &drm_gem_shmem_vm_ops, +}; + +/** + * pvr_gem_object_flags_validate() - Verify that a collection of PowerVR GEM + * mapping and/or creation flags form a valid combination. + * @flags: PowerVR GEM mapping/creation flags to validate. + * + * This function explicitly allows kernel-only flags. All ioctl entrypoints + * should do their own validation as well as relying on this function. + * + * Return: + * * %true if @flags contains valid mapping and/or creation flags, or + * * %false otherwise. + */ +static bool +pvr_gem_object_flags_validate(u64 flags) +{ + static const u64 invalid_combinations[] = { + /* + * Memory flagged as PM/FW-protected cannot be mapped to + * userspace. To make this explicit, we require that the two + * flags allowing each of these respective features are never + * specified together. + */ + (DRM_PVR_BO_PM_FW_PROTECT | + DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS), + }; + + int i; + + /* + * Check for bits set in undefined regions. Reserved regions refer to + * options that can only be set by the kernel. These are explicitly + * allowed in most cases, and must be checked specifically in IOCTL + * callback code. + */ + if ((flags & PVR_BO_UNDEFINED_MASK) != 0) + return false; + + /* + * Check for all combinations of flags marked as invalid in the array + * above. + */ + for (i = 0; i < ARRAY_SIZE(invalid_combinations); ++i) { + u64 combo = invalid_combinations[i]; + + if ((flags & combo) == combo) + return false; + } + + return true; +} + +/** + * pvr_gem_object_into_handle() - Convert a reference to an object into a + * userspace-accessible handle. + * @pvr_obj: [IN] Target PowerVR-specific object. + * @pvr_file: [IN] File to associate the handle with. + * @handle: [OUT] Pointer to store the created handle in. Remains unmodified if + * an error is encountered. + * + * If an error is encountered, ownership of @pvr_obj will not have been + * transferred. If this function succeeds, however, further use of @pvr_obj is + * considered undefined behaviour unless another reference to it is explicitly + * held. + * + * Return: + * * 0 on success, or + * * Any error encountered while attempting to allocate a handle on @pvr_file. + */ +int +pvr_gem_object_into_handle(struct pvr_gem_object *pvr_obj, + struct pvr_file *pvr_file, u32 *handle) +{ + struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj); + struct drm_file *file = from_pvr_file(pvr_file); + + u32 new_handle; + int err; + + err = drm_gem_handle_create(file, gem_obj, &new_handle); + if (err) + return err; + + /* + * Release our reference to @pvr_obj, effectively transferring + * ownership to the handle. + */ + pvr_gem_object_put(pvr_obj); + + /* + * Do not store the new handle in @handle until no more errors can + * occur. + */ + *handle = new_handle; + + return 0; +} + +/** + * pvr_gem_object_from_handle() - Obtain a reference to an object from a + * userspace handle. + * @pvr_file: PowerVR-specific file to which @handle is associated. + * @handle: Userspace handle referencing the target object. + * + * On return, @handle always maintains its reference to the requested object + * (if it had one in the first place). If this function succeeds, the returned + * object will hold an additional reference. When the caller is finished with + * the returned object, they should call pvr_gem_object_put() on it to release + * this reference. + * + * Return: + * * A pointer to the requested PowerVR-specific object on success, or + * * %NULL otherwise. + */ +struct pvr_gem_object * +pvr_gem_object_from_handle(struct pvr_file *pvr_file, u32 handle) +{ + struct drm_file *file = from_pvr_file(pvr_file); + struct drm_gem_object *gem_obj; + + gem_obj = drm_gem_object_lookup(file, handle); + if (!gem_obj) + return NULL; + + return gem_to_pvr_gem(gem_obj); +} + +/** + * pvr_gem_object_vmap() - Map a PowerVR GEM object into CPU virtual address + * space. + * @pvr_obj: Target PowerVR GEM object. + * + * Once the caller is finished with the CPU mapping, they must call + * pvr_gem_object_vunmap() on @pvr_obj. + * + * If @pvr_obj is CPU-cached, dma_sync_sgtable_for_cpu() is called to make + * sure the CPU mapping is consistent. + * + * Return: + * * A pointer to the CPU mapping on success, + * * -%ENOMEM if the mapping fails, or + * * Any error encountered while attempting to acquire a reference to the + * backing pages for @pvr_obj. + */ +void * +pvr_gem_object_vmap(struct pvr_gem_object *pvr_obj) +{ + struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj); + struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj); + struct iosys_map map; + int err; + + dma_resv_lock(obj->resv, NULL); + + err = drm_gem_shmem_vmap(shmem_obj, &map); + if (err) + goto err_unlock; + + if (pvr_obj->flags & PVR_BO_CPU_CACHED) { + struct device *dev = shmem_obj->base.dev->dev; + + /* If shmem_obj->sgt is NULL, that means the buffer hasn't been mapped + * in GPU space yet. + */ + if (shmem_obj->sgt) + dma_sync_sgtable_for_cpu(dev, shmem_obj->sgt, DMA_BIDIRECTIONAL); + } + + dma_resv_unlock(obj->resv); + + return map.vaddr; + +err_unlock: + dma_resv_unlock(obj->resv); + + return ERR_PTR(err); +} + +/** + * pvr_gem_object_vunmap() - Unmap a PowerVR memory object from CPU virtual + * address space. + * @pvr_obj: Target PowerVR GEM object. + * + * If @pvr_obj is CPU-cached, dma_sync_sgtable_for_device() is called to make + * sure the GPU mapping is consistent. + */ +void +pvr_gem_object_vunmap(struct pvr_gem_object *pvr_obj) +{ + struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(shmem_obj->vaddr); + struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj); + + if (WARN_ON(!map.vaddr)) + return; + + dma_resv_lock(obj->resv, NULL); + + if (pvr_obj->flags & PVR_BO_CPU_CACHED) { + struct device *dev = shmem_obj->base.dev->dev; + + /* If shmem_obj->sgt is NULL, that means the buffer hasn't been mapped + * in GPU space yet. + */ + if (shmem_obj->sgt) + dma_sync_sgtable_for_device(dev, shmem_obj->sgt, DMA_BIDIRECTIONAL); + } + + drm_gem_shmem_vunmap(shmem_obj, &map); + + dma_resv_unlock(obj->resv); +} + +/** + * pvr_gem_object_zero() - Zeroes the physical memory behind an object. + * @pvr_obj: Target PowerVR GEM object. + * + * Return: + * * 0 on success, or + * * Any error encountered while attempting to map @pvr_obj to the CPU (see + * pvr_gem_object_vmap()). + */ +static int +pvr_gem_object_zero(struct pvr_gem_object *pvr_obj) +{ + void *cpu_ptr; + + cpu_ptr = pvr_gem_object_vmap(pvr_obj); + if (IS_ERR(cpu_ptr)) + return PTR_ERR(cpu_ptr); + + memset(cpu_ptr, 0, pvr_gem_object_size(pvr_obj)); + + /* Make sure the zero-ing is done before vumap-ing the object. */ + wmb(); + + pvr_gem_object_vunmap(pvr_obj); + + return 0; +} + +/** + * pvr_gem_create_object() - Allocate and pre-initializes a pvr_gem_object + * @drm_dev: DRM device creating this object. + * @size: Size of the object to allocate in bytes. + * + * Return: + * * The new pre-initialized GEM object on success, + * * -ENOMEM if the allocation failed. + */ +struct drm_gem_object *pvr_gem_create_object(struct drm_device *drm_dev, size_t size) +{ + struct drm_gem_object *gem_obj; + struct pvr_gem_object *pvr_obj; + + pvr_obj = kzalloc(sizeof(*pvr_obj), GFP_KERNEL); + if (!pvr_obj) + return ERR_PTR(-ENOMEM); + + gem_obj = gem_from_pvr_gem(pvr_obj); + gem_obj->funcs = &pvr_gem_object_funcs; + + return gem_obj; +} + +/** + * pvr_gem_object_create() - Creates a PowerVR-specific buffer object. + * @pvr_dev: Target PowerVR device. + * @size: Size of the object to allocate in bytes. Must be greater than zero. + * Any value which is not an exact multiple of the system page size will be + * rounded up to satisfy this condition. + * @flags: Options which affect both this operation and future mapping + * operations performed on the returned object. Must be a combination of + * DRM_PVR_BO_* and/or PVR_BO_* flags. + * + * The created object may be larger than @size, but can never be smaller. To + * get the exact size, call pvr_gem_object_size() on the returned pointer. + * + * Return: + * * The newly-minted PowerVR-specific buffer object on success, + * * -%EINVAL if @size is zero or @flags is not valid, + * * -%ENOMEM if sufficient physical memory cannot be allocated, or + * * Any other error returned by drm_gem_create_mmap_offset(). + */ +struct pvr_gem_object * +pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags) +{ + struct drm_gem_shmem_object *shmem_obj; + struct pvr_gem_object *pvr_obj; + struct sg_table *sgt; + int err; + + /* Verify @size and @flags before continuing. */ + if (size == 0 || !pvr_gem_object_flags_validate(flags)) + return ERR_PTR(-EINVAL); + + shmem_obj = drm_gem_shmem_create(from_pvr_device(pvr_dev), size); + if (IS_ERR(shmem_obj)) + return ERR_CAST(shmem_obj); + + shmem_obj->pages_mark_dirty_on_put = true; + shmem_obj->map_wc = !(flags & PVR_BO_CPU_CACHED); + pvr_obj = shmem_gem_to_pvr_gem(shmem_obj); + pvr_obj->flags = flags; + + sgt = drm_gem_shmem_get_pages_sgt(shmem_obj); + if (IS_ERR(sgt)) { + err = PTR_ERR(sgt); + goto err_shmem_object_free; + } + + dma_sync_sgtable_for_device(shmem_obj->base.dev->dev, sgt, + DMA_BIDIRECTIONAL); + + /* + * Do this last because pvr_gem_object_zero() requires a fully + * configured instance of struct pvr_gem_object. + */ + pvr_gem_object_zero(pvr_obj); + + return pvr_obj; + +err_shmem_object_free: + drm_gem_shmem_free(shmem_obj); + + return ERR_PTR(err); +} + +/** + * pvr_gem_get_dma_addr() - Get DMA address for given offset in object + * @pvr_obj: Pointer to object to lookup address in. + * @offset: Offset within object to lookup address at. + * @dma_addr_out: Pointer to location to store DMA address. + * + * Returns: + * * 0 on success, or + * * -%EINVAL if object is not currently backed, or if @offset is out of valid + * range for this object. + */ +int +pvr_gem_get_dma_addr(struct pvr_gem_object *pvr_obj, u32 offset, + dma_addr_t *dma_addr_out) +{ + struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj); + u32 accumulated_offset = 0; + struct scatterlist *sgl; + unsigned int sgt_idx; + + WARN_ON(!shmem_obj->sgt); + for_each_sgtable_dma_sg(shmem_obj->sgt, sgl, sgt_idx) { + u32 new_offset = accumulated_offset + sg_dma_len(sgl); + + if (offset >= accumulated_offset && offset < new_offset) { + *dma_addr_out = sg_dma_address(sgl) + + (offset - accumulated_offset); + return 0; + } + + accumulated_offset = new_offset; + } + + return -EINVAL; +} diff --git a/drivers/gpu/drm/imagination/pvr_gem.h b/drivers/gpu/drm/imagination/pvr_gem.h new file mode 100644 index 0000000000..e0e5ea509a --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_gem.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_GEM_H +#define PVR_GEM_H + +#include "pvr_rogue_heap_config.h" +#include "pvr_rogue_meta.h" + +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Forward declaration from "pvr_device.h". */ +struct pvr_device; +struct pvr_file; + +/** + * DOC: Flags for DRM_IOCTL_PVR_CREATE_BO (kernel-only) + * + * Kernel-only values allowed in &pvr_gem_object->flags. The majority of options + * for this field are specified in the UAPI header "pvr_drm.h" with a + * DRM_PVR_BO_ prefix. To distinguish these internal options (which must exist + * in ranges marked as "reserved" in the UAPI header), we drop the DRM prefix. + * The public options should be used directly, DRM prefix and all. + * + * To avoid potentially confusing gaps in the UAPI options, these kernel-only + * options are specified "in reverse", starting at bit 63. + * + * We use "reserved" to refer to bits defined here and not exposed in the UAPI. + * Bits not defined anywhere are "undefined". + * + * CPU mapping options + * :PVR_BO_CPU_CACHED: By default, all GEM objects are mapped write-combined on the CPU. Set this + * flag to override this behaviour and map the object cached. + * + * Firmware options + * :PVR_BO_FW_NO_CLEAR_ON_RESET: By default, all FW objects are cleared and reinitialised on hard + * reset. Set this flag to override this behaviour and preserve buffer contents on reset. + */ +#define PVR_BO_CPU_CACHED BIT_ULL(63) + +#define PVR_BO_FW_NO_CLEAR_ON_RESET BIT_ULL(62) + +#define PVR_BO_KERNEL_FLAGS_MASK (PVR_BO_CPU_CACHED | PVR_BO_FW_NO_CLEAR_ON_RESET) + +/* Bits 61..3 are undefined. */ +/* Bits 2..0 are defined in the UAPI. */ + +/* Other utilities. */ +#define PVR_BO_UNDEFINED_MASK ~(PVR_BO_KERNEL_FLAGS_MASK | DRM_PVR_BO_FLAGS_MASK) + +/* + * All firmware-mapped memory uses (mostly) the same flags. Specifically, + * firmware-mapped memory should be: + * * Read/write on the device, + * * Read/write on the CPU, and + * * Write-combined on the CPU. + * + * The only variation is in caching on the device. + */ +#define PVR_BO_FW_FLAGS_DEVICE_CACHED (ULL(0)) +#define PVR_BO_FW_FLAGS_DEVICE_UNCACHED DRM_PVR_BO_BYPASS_DEVICE_CACHE + +/** + * struct pvr_gem_object - powervr-specific wrapper for &struct drm_gem_object + */ +struct pvr_gem_object { + /** + * @base: The underlying &struct drm_gem_shmem_object. + * + * Do not access this member directly, instead call + * shem_gem_from_pvr_gem(). + */ + struct drm_gem_shmem_object base; + + /** + * @flags: Options set at creation-time. Some of these options apply to + * the creation operation itself (which are stored here for reference) + * with the remainder used for mapping options to both the device and + * CPU. These are used every time this object is mapped, but may be + * changed after creation. + * + * Must be a combination of DRM_PVR_BO_* and/or PVR_BO_* flags. + * + * .. note:: + * + * This member is declared const to indicate that none of these + * options may change or be changed throughout the object's + * lifetime. + */ + u64 flags; + +}; + +static_assert(offsetof(struct pvr_gem_object, base) == 0, + "offsetof(struct pvr_gem_object, base) not zero"); + +#define shmem_gem_from_pvr_gem(pvr_obj) (&(pvr_obj)->base) + +#define shmem_gem_to_pvr_gem(shmem_obj) container_of_const(shmem_obj, struct pvr_gem_object, base) + +#define gem_from_pvr_gem(pvr_obj) (&(pvr_obj)->base.base) + +#define gem_to_pvr_gem(gem_obj) container_of_const(gem_obj, struct pvr_gem_object, base.base) + +/* Functions defined in pvr_gem.c */ + +struct drm_gem_object *pvr_gem_create_object(struct drm_device *drm_dev, size_t size); + +struct pvr_gem_object *pvr_gem_object_create(struct pvr_device *pvr_dev, + size_t size, u64 flags); + +int pvr_gem_object_into_handle(struct pvr_gem_object *pvr_obj, + struct pvr_file *pvr_file, u32 *handle); +struct pvr_gem_object *pvr_gem_object_from_handle(struct pvr_file *pvr_file, + u32 handle); + +static __always_inline struct sg_table * +pvr_gem_object_get_pages_sgt(struct pvr_gem_object *pvr_obj) +{ + return drm_gem_shmem_get_pages_sgt(shmem_gem_from_pvr_gem(pvr_obj)); +} + +void *pvr_gem_object_vmap(struct pvr_gem_object *pvr_obj); +void pvr_gem_object_vunmap(struct pvr_gem_object *pvr_obj); + +int pvr_gem_get_dma_addr(struct pvr_gem_object *pvr_obj, u32 offset, + dma_addr_t *dma_addr_out); + +/** + * pvr_gem_object_get() - Acquire reference on pvr_gem_object + * @pvr_obj: Pointer to object to acquire reference on. + */ +static __always_inline void +pvr_gem_object_get(struct pvr_gem_object *pvr_obj) +{ + drm_gem_object_get(gem_from_pvr_gem(pvr_obj)); +} + +/** + * pvr_gem_object_put() - Release reference on pvr_gem_object + * @pvr_obj: Pointer to object to release reference on. + */ +static __always_inline void +pvr_gem_object_put(struct pvr_gem_object *pvr_obj) +{ + drm_gem_object_put(gem_from_pvr_gem(pvr_obj)); +} + +static __always_inline size_t +pvr_gem_object_size(struct pvr_gem_object *pvr_obj) +{ + return gem_from_pvr_gem(pvr_obj)->size; +} + +#endif /* PVR_GEM_H */ diff --git a/drivers/gpu/drm/imagination/pvr_hwrt.c b/drivers/gpu/drm/imagination/pvr_hwrt.c new file mode 100644 index 0000000000..54f88d6c01 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_hwrt.c @@ -0,0 +1,550 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_free_list.h" +#include "pvr_hwrt.h" +#include "pvr_gem.h" +#include "pvr_rogue_cr_defs_client.h" +#include "pvr_rogue_fwif.h" + +#include +#include +#include +#include +#include +#include + +static_assert(ROGUE_FWIF_NUM_RTDATAS == 2); +static_assert(ROGUE_FWIF_NUM_GEOMDATAS == 1); +static_assert(ROGUE_FWIF_NUM_RTDATA_FREELISTS == 2); + +/* + * struct pvr_rt_mtile_info - Render target macrotile information + */ +struct pvr_rt_mtile_info { + u32 mtile_x[3]; + u32 mtile_y[3]; + u32 tile_max_x; + u32 tile_max_y; + u32 tile_size_x; + u32 tile_size_y; + u32 num_tiles_x; + u32 num_tiles_y; +}; + +/* Size of Shadow Render Target Cache entry */ +#define SRTC_ENTRY_SIZE sizeof(u32) +/* Size of Renders Accumulation Array entry */ +#define RAA_ENTRY_SIZE sizeof(u32) + +static int +hwrt_init_kernel_structure(struct pvr_file *pvr_file, + struct drm_pvr_ioctl_create_hwrt_dataset_args *args, + struct pvr_hwrt_dataset *hwrt) +{ + struct pvr_device *pvr_dev = pvr_file->pvr_dev; + int err; + int i; + + hwrt->pvr_dev = pvr_dev; + hwrt->max_rts = args->layers; + + /* Get pointers to the free lists */ + for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) { + hwrt->free_lists[i] = pvr_free_list_lookup(pvr_file, args->free_list_handles[i]); + if (!hwrt->free_lists[i]) { + err = -EINVAL; + goto err_put_free_lists; + } + } + + if (hwrt->free_lists[ROGUE_FW_LOCAL_FREELIST]->current_pages < + pvr_get_free_list_min_pages(pvr_dev)) { + err = -EINVAL; + goto err_put_free_lists; + } + + return 0; + +err_put_free_lists: + for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) { + pvr_free_list_put(hwrt->free_lists[i]); + hwrt->free_lists[i] = NULL; + } + + return err; +} + +static void +hwrt_fini_kernel_structure(struct pvr_hwrt_dataset *hwrt) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(hwrt->free_lists); i++) { + pvr_free_list_put(hwrt->free_lists[i]); + hwrt->free_lists[i] = NULL; + } +} + +static void +hwrt_fini_common_fw_structure(struct pvr_hwrt_dataset *hwrt) +{ + pvr_fw_object_destroy(hwrt->common_fw_obj); +} + +static int +get_cr_isp_mtile_size_val(struct pvr_device *pvr_dev, u32 samples, + struct pvr_rt_mtile_info *info, u32 *value_out) +{ + u32 x = info->mtile_x[0]; + u32 y = info->mtile_y[0]; + u32 samples_per_pixel; + int err; + + err = PVR_FEATURE_VALUE(pvr_dev, isp_samples_per_pixel, &samples_per_pixel); + if (err) + return err; + + if (samples_per_pixel == 1) { + if (samples >= 4) + x <<= 1; + if (samples >= 2) + y <<= 1; + } else if (samples_per_pixel == 2) { + if (samples >= 8) + x <<= 1; + if (samples >= 4) + y <<= 1; + } else if (samples_per_pixel == 4) { + if (samples >= 8) + y <<= 1; + } else { + WARN(true, "Unsupported ISP samples per pixel value"); + return -EINVAL; + } + + *value_out = ((x << ROGUE_CR_ISP_MTILE_SIZE_X_SHIFT) & ~ROGUE_CR_ISP_MTILE_SIZE_X_CLRMSK) | + ((y << ROGUE_CR_ISP_MTILE_SIZE_Y_SHIFT) & ~ROGUE_CR_ISP_MTILE_SIZE_Y_CLRMSK); + + return 0; +} + +static int +get_cr_multisamplectl_val(u32 samples, bool y_flip, u64 *value_out) +{ + static const struct { + u8 x[8]; + u8 y[8]; + } sample_positions[4] = { + /* 1 sample */ + { + .x = { 8 }, + .y = { 8 }, + }, + /* 2 samples */ + { + .x = { 12, 4 }, + .y = { 12, 4 }, + }, + /* 4 samples */ + { + .x = { 6, 14, 2, 10 }, + .y = { 2, 6, 10, 14 }, + }, + /* 8 samples */ + { + .x = { 9, 7, 13, 5, 3, 1, 11, 15 }, + .y = { 5, 11, 9, 3, 13, 7, 15, 1 }, + }, + }; + const int idx = fls(samples) - 1; + u64 value = 0; + + if (idx < 0 || idx > 3) + return -EINVAL; + + for (u32 i = 0; i < 8; i++) { + value |= ((u64)sample_positions[idx].x[i]) << (i * 8); + if (y_flip) + value |= (((u64)(16 - sample_positions[idx].y[i]) & 0xf)) << (i * 8 + 4); + else + value |= ((u64)sample_positions[idx].y[i]) << (i * 8 + 4); + } + + *value_out = value; + + return 0; +} + +static int +get_cr_te_aa_val(struct pvr_device *pvr_dev, u32 samples, u32 *value_out) +{ + u32 samples_per_pixel; + u32 value = 0; + int err = 0; + + err = PVR_FEATURE_VALUE(pvr_dev, isp_samples_per_pixel, &samples_per_pixel); + if (err) + return err; + + switch (samples_per_pixel) { + case 1: + if (samples >= 2) + value |= ROGUE_CR_TE_AA_Y_EN; + if (samples >= 4) + value |= ROGUE_CR_TE_AA_X_EN; + break; + case 2: + if (samples >= 2) + value |= ROGUE_CR_TE_AA_X2_EN; + if (samples >= 4) + value |= ROGUE_CR_TE_AA_Y_EN; + if (samples >= 8) + value |= ROGUE_CR_TE_AA_X_EN; + break; + case 4: + if (samples >= 2) + value |= ROGUE_CR_TE_AA_X2_EN; + if (samples >= 4) + value |= ROGUE_CR_TE_AA_Y2_EN; + if (samples >= 8) + value |= ROGUE_CR_TE_AA_Y_EN; + break; + default: + WARN(true, "Unsupported ISP samples per pixel value"); + return -EINVAL; + } + + *value_out = value; + + return 0; +} + +static void +hwrtdata_common_init(void *cpu_ptr, void *priv) +{ + struct pvr_hwrt_dataset *hwrt = priv; + + memcpy(cpu_ptr, &hwrt->common, sizeof(hwrt->common)); +} + +static int +hwrt_init_common_fw_structure(struct pvr_file *pvr_file, + struct drm_pvr_ioctl_create_hwrt_dataset_args *args, + struct pvr_hwrt_dataset *hwrt) +{ + struct drm_pvr_create_hwrt_geom_data_args *geom_data_args = &args->geom_data_args; + struct pvr_device *pvr_dev = pvr_file->pvr_dev; + struct pvr_rt_mtile_info info; + int err; + + err = PVR_FEATURE_VALUE(pvr_dev, tile_size_x, &info.tile_size_x); + if (WARN_ON(err)) + return err; + + err = PVR_FEATURE_VALUE(pvr_dev, tile_size_y, &info.tile_size_y); + if (WARN_ON(err)) + return err; + + info.num_tiles_x = DIV_ROUND_UP(args->width, info.tile_size_x); + info.num_tiles_y = DIV_ROUND_UP(args->height, info.tile_size_y); + + if (PVR_HAS_FEATURE(pvr_dev, simple_parameter_format_version)) { + u32 parameter_format; + + err = PVR_FEATURE_VALUE(pvr_dev, simple_parameter_format_version, + ¶meter_format); + if (WARN_ON(err)) + return err; + + WARN_ON(parameter_format != 2); + + /* + * Set up 16 macrotiles with a multiple of 2x2 tiles per macrotile, which is + * aligned to a tile group. + */ + info.mtile_x[0] = DIV_ROUND_UP(info.num_tiles_x, 8) * 2; + info.mtile_y[0] = DIV_ROUND_UP(info.num_tiles_y, 8) * 2; + info.mtile_x[1] = 0; + info.mtile_y[1] = 0; + info.mtile_x[2] = 0; + info.mtile_y[2] = 0; + info.tile_max_x = round_up(info.num_tiles_x, 2) - 1; + info.tile_max_y = round_up(info.num_tiles_y, 2) - 1; + } else { + /* Set up 16 macrotiles with a multiple of 4x4 tiles per macrotile. */ + info.mtile_x[0] = round_up(DIV_ROUND_UP(info.num_tiles_x, 4), 4); + info.mtile_y[0] = round_up(DIV_ROUND_UP(info.num_tiles_y, 4), 4); + info.mtile_x[1] = info.mtile_x[0] * 2; + info.mtile_y[1] = info.mtile_y[0] * 2; + info.mtile_x[2] = info.mtile_x[0] * 3; + info.mtile_y[2] = info.mtile_y[0] * 3; + info.tile_max_x = info.num_tiles_x - 1; + info.tile_max_y = info.num_tiles_y - 1; + } + + hwrt->common.geom_caches_need_zeroing = false; + + hwrt->common.isp_merge_lower_x = args->isp_merge_lower_x; + hwrt->common.isp_merge_lower_y = args->isp_merge_lower_y; + hwrt->common.isp_merge_upper_x = args->isp_merge_upper_x; + hwrt->common.isp_merge_upper_y = args->isp_merge_upper_y; + hwrt->common.isp_merge_scale_x = args->isp_merge_scale_x; + hwrt->common.isp_merge_scale_y = args->isp_merge_scale_y; + + err = get_cr_multisamplectl_val(args->samples, false, + &hwrt->common.multi_sample_ctl); + if (err) + return err; + + err = get_cr_multisamplectl_val(args->samples, true, + &hwrt->common.flipped_multi_sample_ctl); + if (err) + return err; + + hwrt->common.mtile_stride = info.mtile_x[0] * info.mtile_y[0]; + + err = get_cr_te_aa_val(pvr_dev, args->samples, &hwrt->common.teaa); + if (err) + return err; + + hwrt->common.screen_pixel_max = + (((args->width - 1) << ROGUE_CR_PPP_SCREEN_PIXXMAX_SHIFT) & + ~ROGUE_CR_PPP_SCREEN_PIXXMAX_CLRMSK) | + (((args->height - 1) << ROGUE_CR_PPP_SCREEN_PIXYMAX_SHIFT) & + ~ROGUE_CR_PPP_SCREEN_PIXYMAX_CLRMSK); + + hwrt->common.te_screen = + ((info.tile_max_x << ROGUE_CR_TE_SCREEN_XMAX_SHIFT) & + ~ROGUE_CR_TE_SCREEN_XMAX_CLRMSK) | + ((info.tile_max_y << ROGUE_CR_TE_SCREEN_YMAX_SHIFT) & + ~ROGUE_CR_TE_SCREEN_YMAX_CLRMSK); + hwrt->common.te_mtile1 = + ((info.mtile_x[0] << ROGUE_CR_TE_MTILE1_X1_SHIFT) & ~ROGUE_CR_TE_MTILE1_X1_CLRMSK) | + ((info.mtile_x[1] << ROGUE_CR_TE_MTILE1_X2_SHIFT) & ~ROGUE_CR_TE_MTILE1_X2_CLRMSK) | + ((info.mtile_x[2] << ROGUE_CR_TE_MTILE1_X3_SHIFT) & ~ROGUE_CR_TE_MTILE1_X3_CLRMSK); + hwrt->common.te_mtile2 = + ((info.mtile_y[0] << ROGUE_CR_TE_MTILE2_Y1_SHIFT) & ~ROGUE_CR_TE_MTILE2_Y1_CLRMSK) | + ((info.mtile_y[1] << ROGUE_CR_TE_MTILE2_Y2_SHIFT) & ~ROGUE_CR_TE_MTILE2_Y2_CLRMSK) | + ((info.mtile_y[2] << ROGUE_CR_TE_MTILE2_Y3_SHIFT) & ~ROGUE_CR_TE_MTILE2_Y3_CLRMSK); + + err = get_cr_isp_mtile_size_val(pvr_dev, args->samples, &info, + &hwrt->common.isp_mtile_size); + if (err) + return err; + + hwrt->common.tpc_stride = geom_data_args->tpc_stride; + hwrt->common.tpc_size = geom_data_args->tpc_size; + + hwrt->common.rgn_header_size = args->region_header_size; + + err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_hwrtdata_common), + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, hwrtdata_common_init, hwrt, + &hwrt->common_fw_obj); + + return err; +} + +static void +hwrt_fw_data_init(void *cpu_ptr, void *priv) +{ + struct pvr_hwrt_data *hwrt_data = priv; + + memcpy(cpu_ptr, &hwrt_data->data, sizeof(hwrt_data->data)); +} + +static int +hwrt_data_init_fw_structure(struct pvr_file *pvr_file, + struct pvr_hwrt_dataset *hwrt, + struct drm_pvr_ioctl_create_hwrt_dataset_args *args, + struct drm_pvr_create_hwrt_rt_data_args *rt_data_args, + struct pvr_hwrt_data *hwrt_data) +{ + struct drm_pvr_create_hwrt_geom_data_args *geom_data_args = &args->geom_data_args; + struct pvr_device *pvr_dev = pvr_file->pvr_dev; + struct rogue_fwif_rta_ctl *rta_ctl; + int free_list_i; + int err; + + pvr_fw_object_get_fw_addr(hwrt->common_fw_obj, + &hwrt_data->data.hwrt_data_common_fw_addr); + + for (free_list_i = 0; free_list_i < ARRAY_SIZE(hwrt->free_lists); free_list_i++) { + pvr_fw_object_get_fw_addr(hwrt->free_lists[free_list_i]->fw_obj, + &hwrt_data->data.freelists_fw_addr[free_list_i]); + } + + hwrt_data->data.tail_ptrs_dev_addr = geom_data_args->tpc_dev_addr; + hwrt_data->data.vheap_table_dev_addr = geom_data_args->vheap_table_dev_addr; + hwrt_data->data.rtc_dev_addr = geom_data_args->rtc_dev_addr; + + hwrt_data->data.pm_mlist_dev_addr = rt_data_args->pm_mlist_dev_addr; + hwrt_data->data.macrotile_array_dev_addr = rt_data_args->macrotile_array_dev_addr; + hwrt_data->data.rgn_header_dev_addr = rt_data_args->region_header_dev_addr; + + rta_ctl = &hwrt_data->data.rta_ctl; + + rta_ctl->render_target_index = 0; + rta_ctl->active_render_targets = 0; + rta_ctl->valid_render_targets_fw_addr = 0; + rta_ctl->rta_num_partial_renders_fw_addr = 0; + rta_ctl->max_rts = args->layers; + + if (args->layers > 1) { + err = pvr_fw_object_create(pvr_dev, args->layers * SRTC_ENTRY_SIZE, + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + NULL, NULL, &hwrt_data->srtc_obj); + if (err) + return err; + pvr_fw_object_get_fw_addr(hwrt_data->srtc_obj, + &rta_ctl->valid_render_targets_fw_addr); + + err = pvr_fw_object_create(pvr_dev, args->layers * RAA_ENTRY_SIZE, + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + NULL, NULL, &hwrt_data->raa_obj); + if (err) + goto err_put_shadow_rt_cache; + pvr_fw_object_get_fw_addr(hwrt_data->raa_obj, + &rta_ctl->rta_num_partial_renders_fw_addr); + } + + err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_hwrtdata), + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + hwrt_fw_data_init, hwrt_data, &hwrt_data->fw_obj); + if (err) + goto err_put_raa_obj; + + pvr_free_list_add_hwrt(hwrt->free_lists[0], hwrt_data); + + return 0; + +err_put_raa_obj: + if (args->layers > 1) + pvr_fw_object_destroy(hwrt_data->raa_obj); + +err_put_shadow_rt_cache: + if (args->layers > 1) + pvr_fw_object_destroy(hwrt_data->srtc_obj); + + return err; +} + +static void +hwrt_data_fini_fw_structure(struct pvr_hwrt_dataset *hwrt, int hwrt_nr) +{ + struct pvr_hwrt_data *hwrt_data = &hwrt->data[hwrt_nr]; + + pvr_free_list_remove_hwrt(hwrt->free_lists[0], hwrt_data); + + if (hwrt->max_rts > 1) { + pvr_fw_object_destroy(hwrt_data->raa_obj); + pvr_fw_object_destroy(hwrt_data->srtc_obj); + } + + pvr_fw_object_destroy(hwrt_data->fw_obj); +} + +/** + * pvr_hwrt_dataset_create() - Create a new HWRT dataset + * @pvr_file: Pointer to pvr_file structure. + * @args: Creation arguments from userspace. + * + * Return: + * * Pointer to new HWRT, or + * * ERR_PTR(-%ENOMEM) on out of memory. + */ +struct pvr_hwrt_dataset * +pvr_hwrt_dataset_create(struct pvr_file *pvr_file, + struct drm_pvr_ioctl_create_hwrt_dataset_args *args) +{ + struct pvr_hwrt_dataset *hwrt; + int err, i = 0; + + /* Create and fill out the kernel structure */ + hwrt = kzalloc(sizeof(*hwrt), GFP_KERNEL); + + if (!hwrt) + return ERR_PTR(-ENOMEM); + + err = hwrt_init_kernel_structure(pvr_file, args, hwrt); + if (err < 0) + goto err_free; + + err = hwrt_init_common_fw_structure(pvr_file, args, hwrt); + if (err < 0) + goto err_fini_kernel_structure; + + for (; i < ARRAY_SIZE(hwrt->data); i++) { + err = hwrt_data_init_fw_structure(pvr_file, hwrt, args, + &args->rt_data_args[i], + &hwrt->data[i]); + if (err < 0) + goto err_fini_data_structures; + + hwrt->data[i].hwrt_dataset = hwrt; + } + + kref_init(&hwrt->ref_count); + return hwrt; + +err_fini_data_structures: + while (--i >= 0) + hwrt_data_fini_fw_structure(hwrt, i); + +err_fini_kernel_structure: + hwrt_fini_kernel_structure(hwrt); + +err_free: + kfree(hwrt); + + return ERR_PTR(err); +} + +static void +pvr_hwrt_dataset_release(struct kref *ref_count) +{ + struct pvr_hwrt_dataset *hwrt = + container_of(ref_count, struct pvr_hwrt_dataset, ref_count); + + for (int i = ARRAY_SIZE(hwrt->data) - 1; i >= 0; i--) { + WARN_ON(pvr_fw_structure_cleanup(hwrt->pvr_dev, ROGUE_FWIF_CLEANUP_HWRTDATA, + hwrt->data[i].fw_obj, 0)); + hwrt_data_fini_fw_structure(hwrt, i); + } + + hwrt_fini_common_fw_structure(hwrt); + hwrt_fini_kernel_structure(hwrt); + + kfree(hwrt); +} + +/** + * pvr_destroy_hwrt_datasets_for_file: Destroy any HWRT datasets associated + * with the given file. + * @pvr_file: Pointer to pvr_file structure. + * + * Removes all HWRT datasets associated with @pvr_file from the device + * hwrt_dataset list and drops initial references. HWRT datasets will then be + * destroyed once all outstanding references are dropped. + */ +void pvr_destroy_hwrt_datasets_for_file(struct pvr_file *pvr_file) +{ + struct pvr_hwrt_dataset *hwrt; + unsigned long handle; + + xa_for_each(&pvr_file->hwrt_handles, handle, hwrt) { + (void)hwrt; + pvr_hwrt_dataset_put(xa_erase(&pvr_file->hwrt_handles, handle)); + } +} + +/** + * pvr_hwrt_dataset_put() - Release reference on HWRT dataset + * @hwrt: Pointer to HWRT dataset to release reference on + */ +void +pvr_hwrt_dataset_put(struct pvr_hwrt_dataset *hwrt) +{ + if (hwrt) + kref_put(&hwrt->ref_count, pvr_hwrt_dataset_release); +} diff --git a/drivers/gpu/drm/imagination/pvr_hwrt.h b/drivers/gpu/drm/imagination/pvr_hwrt.h new file mode 100644 index 0000000000..676070b20c --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_hwrt.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_HWRT_H +#define PVR_HWRT_H + +#include +#include +#include +#include +#include +#include + +#include "pvr_device.h" +#include "pvr_rogue_fwif_shared.h" + +/* Forward declaration from pvr_free_list.h. */ +struct pvr_free_list; + +/* Forward declaration from pvr_gem.h. */ +struct pvr_fw_object; + +/** + * struct pvr_hwrt_data - structure representing HWRT data + */ +struct pvr_hwrt_data { + /** @fw_obj: FW object representing the FW-side structure. */ + struct pvr_fw_object *fw_obj; + + /** @data: Local copy of FW-side structure. */ + struct rogue_fwif_hwrtdata data; + + /** @freelist_node: List node connecting this HWRT to the local freelist. */ + struct list_head freelist_node; + + /** + * @srtc_obj: FW object representing shadow render target cache. + * + * Only valid if @max_rts > 1. + */ + struct pvr_fw_object *srtc_obj; + + /** + * @raa_obj: FW object representing renders accumulation array. + * + * Only valid if @max_rts > 1. + */ + struct pvr_fw_object *raa_obj; + + /** @hwrt_dataset: Back pointer to owning HWRT dataset. */ + struct pvr_hwrt_dataset *hwrt_dataset; +}; + +/** + * struct pvr_hwrt_dataset - structure representing a HWRT data set. + */ +struct pvr_hwrt_dataset { + /** @ref_count: Reference count of object. */ + struct kref ref_count; + + /** @pvr_dev: Pointer to device that owns this object. */ + struct pvr_device *pvr_dev; + + /** @common_fw_obj: FW object representing common FW-side structure. */ + struct pvr_fw_object *common_fw_obj; + + /** @common: Common HWRT data. */ + struct rogue_fwif_hwrtdata_common common; + + /** @data: HWRT data structures belonging to this set. */ + struct pvr_hwrt_data data[ROGUE_FWIF_NUM_RTDATAS]; + + /** @free_lists: Free lists used by HWRT data set. */ + struct pvr_free_list *free_lists[ROGUE_FWIF_NUM_RTDATA_FREELISTS]; + + /** @max_rts: Maximum render targets for this HWRT data set. */ + u16 max_rts; +}; + +struct pvr_hwrt_dataset * +pvr_hwrt_dataset_create(struct pvr_file *pvr_file, + struct drm_pvr_ioctl_create_hwrt_dataset_args *args); + +void +pvr_destroy_hwrt_datasets_for_file(struct pvr_file *pvr_file); + +/** + * pvr_hwrt_dataset_lookup() - Lookup HWRT dataset pointer from handle + * @pvr_file: Pointer to pvr_file structure. + * @handle: Object handle. + * + * Takes reference on dataset object. Call pvr_hwrt_dataset_put() to release. + * + * Returns: + * * The requested object on success, or + * * %NULL on failure (object does not exist in list, or is not a HWRT + * dataset) + */ +static __always_inline struct pvr_hwrt_dataset * +pvr_hwrt_dataset_lookup(struct pvr_file *pvr_file, u32 handle) +{ + struct pvr_hwrt_dataset *hwrt; + + xa_lock(&pvr_file->hwrt_handles); + hwrt = xa_load(&pvr_file->hwrt_handles, handle); + + if (hwrt) + kref_get(&hwrt->ref_count); + + xa_unlock(&pvr_file->hwrt_handles); + + return hwrt; +} + +void +pvr_hwrt_dataset_put(struct pvr_hwrt_dataset *hwrt); + +/** + * pvr_hwrt_data_lookup() - Lookup HWRT data pointer from handle and index + * @pvr_file: Pointer to pvr_file structure. + * @handle: Object handle. + * @index: Index of RT data within dataset. + * + * Takes reference on dataset object. Call pvr_hwrt_data_put() to release. + * + * Returns: + * * The requested object on success, or + * * %NULL on failure (object does not exist in list, or is not a HWRT + * dataset, or index is out of range) + */ +static __always_inline struct pvr_hwrt_data * +pvr_hwrt_data_lookup(struct pvr_file *pvr_file, u32 handle, u32 index) +{ + struct pvr_hwrt_dataset *hwrt_dataset = pvr_hwrt_dataset_lookup(pvr_file, handle); + + if (hwrt_dataset) { + if (index < ARRAY_SIZE(hwrt_dataset->data)) + return &hwrt_dataset->data[index]; + + pvr_hwrt_dataset_put(hwrt_dataset); + } + + return NULL; +} + +/** + * pvr_hwrt_data_put() - Release reference on HWRT data + * @hwrt: Pointer to HWRT data to release reference on + */ +static __always_inline void +pvr_hwrt_data_put(struct pvr_hwrt_data *hwrt) +{ + if (hwrt) + pvr_hwrt_dataset_put(hwrt->hwrt_dataset); +} + +static __always_inline struct pvr_hwrt_data * +pvr_hwrt_data_get(struct pvr_hwrt_data *hwrt) +{ + if (hwrt) + kref_get(&hwrt->hwrt_dataset->ref_count); + + return hwrt; +} + +#endif /* PVR_HWRT_H */ diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c new file mode 100644 index 0000000000..78c2f3c6dc --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_job.c @@ -0,0 +1,786 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_context.h" +#include "pvr_device.h" +#include "pvr_drv.h" +#include "pvr_gem.h" +#include "pvr_hwrt.h" +#include "pvr_job.h" +#include "pvr_mmu.h" +#include "pvr_power.h" +#include "pvr_rogue_fwif.h" +#include "pvr_rogue_fwif_client.h" +#include "pvr_stream.h" +#include "pvr_stream_defs.h" +#include "pvr_sync.h" + +#include +#include +#include +#include + +static void pvr_job_release(struct kref *kref) +{ + struct pvr_job *job = container_of(kref, struct pvr_job, ref_count); + + xa_erase(&job->pvr_dev->job_ids, job->id); + + pvr_hwrt_data_put(job->hwrt); + pvr_context_put(job->ctx); + + WARN_ON(job->paired_job); + + pvr_queue_job_cleanup(job); + pvr_job_release_pm_ref(job); + + kfree(job->cmd); + kfree(job); +} + +/** + * pvr_job_put() - Release reference on job + * @job: Target job. + */ +void +pvr_job_put(struct pvr_job *job) +{ + if (job) + kref_put(&job->ref_count, pvr_job_release); +} + +/** + * pvr_job_process_stream() - Build job FW structure from stream + * @pvr_dev: Device pointer. + * @cmd_defs: Stream definition. + * @stream: Pointer to command stream. + * @stream_size: Size of command stream, in bytes. + * @job: Pointer to job. + * + * Caller is responsible for freeing the output structure. + * + * Returns: + * * 0 on success, + * * -%ENOMEM on out of memory, or + * * -%EINVAL on malformed stream. + */ +static int +pvr_job_process_stream(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs, + void *stream, u32 stream_size, struct pvr_job *job) +{ + int err; + + job->cmd = kzalloc(cmd_defs->dest_size, GFP_KERNEL); + if (!job->cmd) + return -ENOMEM; + + job->cmd_len = cmd_defs->dest_size; + + err = pvr_stream_process(pvr_dev, cmd_defs, stream, stream_size, job->cmd); + if (err) + kfree(job->cmd); + + return err; +} + +static int pvr_fw_cmd_init(struct pvr_device *pvr_dev, struct pvr_job *job, + const struct pvr_stream_cmd_defs *stream_def, + u64 stream_userptr, u32 stream_len) +{ + void *stream; + int err; + + stream = kzalloc(stream_len, GFP_KERNEL); + if (!stream) + return -ENOMEM; + + if (copy_from_user(stream, u64_to_user_ptr(stream_userptr), stream_len)) { + err = -EFAULT; + goto err_free_stream; + } + + err = pvr_job_process_stream(pvr_dev, stream_def, stream, stream_len, job); + +err_free_stream: + kfree(stream); + + return err; +} + +static u32 +convert_geom_flags(u32 in_flags) +{ + u32 out_flags = 0; + + if (in_flags & DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST) + out_flags |= ROGUE_GEOM_FLAGS_FIRSTKICK; + if (in_flags & DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST) + out_flags |= ROGUE_GEOM_FLAGS_LASTKICK; + if (in_flags & DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE) + out_flags |= ROGUE_GEOM_FLAGS_SINGLE_CORE; + + return out_flags; +} + +static u32 +convert_frag_flags(u32 in_flags) +{ + u32 out_flags = 0; + + if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE) + out_flags |= ROGUE_FRAG_FLAGS_SINGLE_CORE; + if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER) + out_flags |= ROGUE_FRAG_FLAGS_DEPTHBUFFER; + if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER) + out_flags |= ROGUE_FRAG_FLAGS_STENCILBUFFER; + if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP) + out_flags |= ROGUE_FRAG_FLAGS_PREVENT_CDM_OVERLAP; + if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER) + out_flags |= ROGUE_FRAG_FLAGS_SCRATCHBUFFER; + if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS) + out_flags |= ROGUE_FRAG_FLAGS_GET_VIS_RESULTS; + if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE) + out_flags |= ROGUE_FRAG_FLAGS_DISABLE_PIXELMERGE; + + return out_flags; +} + +static int +pvr_geom_job_fw_cmd_init(struct pvr_job *job, + struct drm_pvr_job *args) +{ + struct rogue_fwif_cmd_geom *cmd; + int err; + + if (args->flags & ~DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK) + return -EINVAL; + + if (job->ctx->type != DRM_PVR_CTX_TYPE_RENDER) + return -EINVAL; + + if (!job->hwrt) + return -EINVAL; + + job->fw_ccb_cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_GEOM; + err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_geom_stream, + args->cmd_stream, args->cmd_stream_len); + if (err) + return err; + + cmd = job->cmd; + cmd->cmd_shared.cmn.frame_num = 0; + cmd->flags = convert_geom_flags(args->flags); + pvr_fw_object_get_fw_addr(job->hwrt->fw_obj, &cmd->cmd_shared.hwrt_data_fw_addr); + return 0; +} + +static int +pvr_frag_job_fw_cmd_init(struct pvr_job *job, + struct drm_pvr_job *args) +{ + struct rogue_fwif_cmd_frag *cmd; + int err; + + if (args->flags & ~DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK) + return -EINVAL; + + if (job->ctx->type != DRM_PVR_CTX_TYPE_RENDER) + return -EINVAL; + + if (!job->hwrt) + return -EINVAL; + + job->fw_ccb_cmd_type = (args->flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER) ? + ROGUE_FWIF_CCB_CMD_TYPE_FRAG_PR : + ROGUE_FWIF_CCB_CMD_TYPE_FRAG; + err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_frag_stream, + args->cmd_stream, args->cmd_stream_len); + if (err) + return err; + + cmd = job->cmd; + cmd->cmd_shared.cmn.frame_num = 0; + cmd->flags = convert_frag_flags(args->flags); + pvr_fw_object_get_fw_addr(job->hwrt->fw_obj, &cmd->cmd_shared.hwrt_data_fw_addr); + return 0; +} + +static u32 +convert_compute_flags(u32 in_flags) +{ + u32 out_flags = 0; + + if (in_flags & DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP) + out_flags |= ROGUE_COMPUTE_FLAG_PREVENT_ALL_OVERLAP; + if (in_flags & DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE) + out_flags |= ROGUE_COMPUTE_FLAG_SINGLE_CORE; + + return out_flags; +} + +static int +pvr_compute_job_fw_cmd_init(struct pvr_job *job, + struct drm_pvr_job *args) +{ + struct rogue_fwif_cmd_compute *cmd; + int err; + + if (args->flags & ~DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_FLAGS_MASK) + return -EINVAL; + + if (job->ctx->type != DRM_PVR_CTX_TYPE_COMPUTE) + return -EINVAL; + + job->fw_ccb_cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_CDM; + err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_compute_stream, + args->cmd_stream, args->cmd_stream_len); + if (err) + return err; + + cmd = job->cmd; + cmd->common.frame_num = 0; + cmd->flags = convert_compute_flags(args->flags); + return 0; +} + +static u32 +convert_transfer_flags(u32 in_flags) +{ + u32 out_flags = 0; + + if (in_flags & DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE) + out_flags |= ROGUE_TRANSFER_FLAGS_SINGLE_CORE; + + return out_flags; +} + +static int +pvr_transfer_job_fw_cmd_init(struct pvr_job *job, + struct drm_pvr_job *args) +{ + struct rogue_fwif_cmd_transfer *cmd; + int err; + + if (args->flags & ~DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_FLAGS_MASK) + return -EINVAL; + + if (job->ctx->type != DRM_PVR_CTX_TYPE_TRANSFER_FRAG) + return -EINVAL; + + job->fw_ccb_cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_TQ_3D; + err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_transfer_stream, + args->cmd_stream, args->cmd_stream_len); + if (err) + return err; + + cmd = job->cmd; + cmd->common.frame_num = 0; + cmd->flags = convert_transfer_flags(args->flags); + return 0; +} + +static int +pvr_job_fw_cmd_init(struct pvr_job *job, + struct drm_pvr_job *args) +{ + switch (args->type) { + case DRM_PVR_JOB_TYPE_GEOMETRY: + return pvr_geom_job_fw_cmd_init(job, args); + + case DRM_PVR_JOB_TYPE_FRAGMENT: + return pvr_frag_job_fw_cmd_init(job, args); + + case DRM_PVR_JOB_TYPE_COMPUTE: + return pvr_compute_job_fw_cmd_init(job, args); + + case DRM_PVR_JOB_TYPE_TRANSFER_FRAG: + return pvr_transfer_job_fw_cmd_init(job, args); + + default: + return -EINVAL; + } +} + +/** + * struct pvr_job_data - Helper container for pairing jobs with the + * sync_ops supplied for them by the user. + */ +struct pvr_job_data { + /** @job: Pointer to the job. */ + struct pvr_job *job; + + /** @sync_ops: Pointer to the sync_ops associated with @job. */ + struct drm_pvr_sync_op *sync_ops; + + /** @sync_op_count: Number of members of @sync_ops. */ + u32 sync_op_count; +}; + +/** + * prepare_job_syncs() - Prepare all sync objects for a single job. + * @pvr_file: PowerVR file. + * @job_data: Precreated job and sync_ops array. + * @signal_array: xarray to receive signal sync objects. + * + * Returns: + * * 0 on success, or + * * Any error code returned by pvr_sync_signal_array_collect_ops(), + * pvr_sync_add_deps_to_job(), drm_sched_job_add_resv_dependencies() or + * pvr_sync_signal_array_update_fences(). + */ +static int +prepare_job_syncs(struct pvr_file *pvr_file, + struct pvr_job_data *job_data, + struct xarray *signal_array) +{ + struct dma_fence *done_fence; + int err = pvr_sync_signal_array_collect_ops(signal_array, + from_pvr_file(pvr_file), + job_data->sync_op_count, + job_data->sync_ops); + + if (err) + return err; + + err = pvr_sync_add_deps_to_job(pvr_file, &job_data->job->base, + job_data->sync_op_count, + job_data->sync_ops, signal_array); + if (err) + return err; + + if (job_data->job->hwrt) { + /* The geometry job writes the HWRT region headers, which are + * then read by the fragment job. + */ + struct drm_gem_object *obj = + gem_from_pvr_gem(job_data->job->hwrt->fw_obj->gem); + enum dma_resv_usage usage = + dma_resv_usage_rw(job_data->job->type == + DRM_PVR_JOB_TYPE_GEOMETRY); + + dma_resv_lock(obj->resv, NULL); + err = drm_sched_job_add_resv_dependencies(&job_data->job->base, + obj->resv, usage); + dma_resv_unlock(obj->resv); + if (err) + return err; + } + + /* We need to arm the job to get the job done fence. */ + done_fence = pvr_queue_job_arm(job_data->job); + + err = pvr_sync_signal_array_update_fences(signal_array, + job_data->sync_op_count, + job_data->sync_ops, + done_fence); + return err; +} + +/** + * prepare_job_syncs_for_each() - Prepare all sync objects for an array of jobs. + * @pvr_file: PowerVR file. + * @job_data: Array of precreated jobs and their sync_ops. + * @job_count: Number of jobs. + * @signal_array: xarray to receive signal sync objects. + * + * Returns: + * * 0 on success, or + * * Any error code returned by pvr_vm_bind_job_prepare_syncs(). + */ +static int +prepare_job_syncs_for_each(struct pvr_file *pvr_file, + struct pvr_job_data *job_data, + u32 *job_count, + struct xarray *signal_array) +{ + for (u32 i = 0; i < *job_count; i++) { + int err = prepare_job_syncs(pvr_file, &job_data[i], + signal_array); + + if (err) { + *job_count = i; + return err; + } + } + + return 0; +} + +static struct pvr_job * +create_job(struct pvr_device *pvr_dev, + struct pvr_file *pvr_file, + struct drm_pvr_job *args) +{ + struct pvr_job *job = NULL; + int err; + + if (!args->cmd_stream || !args->cmd_stream_len) + return ERR_PTR(-EINVAL); + + if (args->type != DRM_PVR_JOB_TYPE_GEOMETRY && + args->type != DRM_PVR_JOB_TYPE_FRAGMENT && + (args->hwrt.set_handle || args->hwrt.data_index)) + return ERR_PTR(-EINVAL); + + job = kzalloc(sizeof(*job), GFP_KERNEL); + if (!job) + return ERR_PTR(-ENOMEM); + + kref_init(&job->ref_count); + job->type = args->type; + job->pvr_dev = pvr_dev; + + err = xa_alloc(&pvr_dev->job_ids, &job->id, job, xa_limit_32b, GFP_KERNEL); + if (err) + goto err_put_job; + + job->ctx = pvr_context_lookup(pvr_file, args->context_handle); + if (!job->ctx) { + err = -EINVAL; + goto err_put_job; + } + + if (args->hwrt.set_handle) { + job->hwrt = pvr_hwrt_data_lookup(pvr_file, args->hwrt.set_handle, + args->hwrt.data_index); + if (!job->hwrt) { + err = -EINVAL; + goto err_put_job; + } + } + + err = pvr_job_fw_cmd_init(job, args); + if (err) + goto err_put_job; + + err = pvr_queue_job_init(job); + if (err) + goto err_put_job; + + return job; + +err_put_job: + pvr_job_put(job); + return ERR_PTR(err); +} + +/** + * pvr_job_data_fini() - Cleanup all allocs used to set up job submission. + * @job_data: Job data array. + * @job_count: Number of members of @job_data. + */ +static void +pvr_job_data_fini(struct pvr_job_data *job_data, u32 job_count) +{ + for (u32 i = 0; i < job_count; i++) { + pvr_job_put(job_data[i].job); + kvfree(job_data[i].sync_ops); + } +} + +/** + * pvr_job_data_init() - Init an array of created jobs, associating them with + * the appropriate sync_ops args, which will be copied in. + * @pvr_dev: Target PowerVR device. + * @pvr_file: Pointer to PowerVR file structure. + * @job_args: Job args array copied from user. + * @job_count: Number of members of @job_args. + * @job_data_out: Job data array. + */ +static int pvr_job_data_init(struct pvr_device *pvr_dev, + struct pvr_file *pvr_file, + struct drm_pvr_job *job_args, + u32 *job_count, + struct pvr_job_data *job_data_out) +{ + int err = 0, i = 0; + + for (; i < *job_count; i++) { + job_data_out[i].job = + create_job(pvr_dev, pvr_file, &job_args[i]); + err = PTR_ERR_OR_ZERO(job_data_out[i].job); + + if (err) { + *job_count = i; + job_data_out[i].job = NULL; + goto err_cleanup; + } + + err = PVR_UOBJ_GET_ARRAY(job_data_out[i].sync_ops, + &job_args[i].sync_ops); + if (err) { + *job_count = i; + + /* Ensure the job created above is also cleaned up. */ + i++; + goto err_cleanup; + } + + job_data_out[i].sync_op_count = job_args[i].sync_ops.count; + } + + return 0; + +err_cleanup: + pvr_job_data_fini(job_data_out, i); + + return err; +} + +static void +push_jobs(struct pvr_job_data *job_data, u32 job_count) +{ + for (u32 i = 0; i < job_count; i++) + pvr_queue_job_push(job_data[i].job); +} + +static int +prepare_fw_obj_resv(struct drm_exec *exec, struct pvr_fw_object *fw_obj) +{ + return drm_exec_prepare_obj(exec, gem_from_pvr_gem(fw_obj->gem), 1); +} + +static int +jobs_lock_all_objs(struct drm_exec *exec, struct pvr_job_data *job_data, + u32 job_count) +{ + for (u32 i = 0; i < job_count; i++) { + struct pvr_job *job = job_data[i].job; + + /* Grab a lock on a the context, to guard against + * concurrent submission to the same queue. + */ + int err = drm_exec_lock_obj(exec, + gem_from_pvr_gem(job->ctx->fw_obj->gem)); + + if (err) + return err; + + if (job->hwrt) { + err = prepare_fw_obj_resv(exec, + job->hwrt->fw_obj); + if (err) + return err; + } + } + + return 0; +} + +static int +prepare_job_resvs_for_each(struct drm_exec *exec, struct pvr_job_data *job_data, + u32 job_count) +{ + drm_exec_until_all_locked(exec) { + int err = jobs_lock_all_objs(exec, job_data, job_count); + + drm_exec_retry_on_contention(exec); + if (err) + return err; + } + + return 0; +} + +static void +update_job_resvs(struct pvr_job *job) +{ + if (job->hwrt) { + enum dma_resv_usage usage = job->type == DRM_PVR_JOB_TYPE_GEOMETRY ? + DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ; + struct drm_gem_object *obj = gem_from_pvr_gem(job->hwrt->fw_obj->gem); + + dma_resv_add_fence(obj->resv, &job->base.s_fence->finished, usage); + } +} + +static void +update_job_resvs_for_each(struct pvr_job_data *job_data, u32 job_count) +{ + for (u32 i = 0; i < job_count; i++) + update_job_resvs(job_data[i].job); +} + +static bool can_combine_jobs(struct pvr_job *a, struct pvr_job *b) +{ + struct pvr_job *geom_job = a, *frag_job = b; + struct dma_fence *fence; + unsigned long index; + + /* Geometry and fragment jobs can be combined if they are queued to the + * same context and targeting the same HWRT. + */ + if (a->type != DRM_PVR_JOB_TYPE_GEOMETRY || + b->type != DRM_PVR_JOB_TYPE_FRAGMENT || + a->ctx != b->ctx || + a->hwrt != b->hwrt) + return false; + + xa_for_each(&frag_job->base.dependencies, index, fence) { + /* We combine when we see an explicit geom -> frag dep. */ + if (&geom_job->base.s_fence->scheduled == fence) + return true; + } + + return false; +} + +static struct dma_fence * +get_last_queued_job_scheduled_fence(struct pvr_queue *queue, + struct pvr_job_data *job_data, + u32 cur_job_pos) +{ + /* We iterate over the current job array in reverse order to grab the + * last to-be-queued job targeting the same queue. + */ + for (u32 i = cur_job_pos; i > 0; i--) { + struct pvr_job *job = job_data[i - 1].job; + + if (job->ctx == queue->ctx && job->type == queue->type) + return dma_fence_get(&job->base.s_fence->scheduled); + } + + /* If we didn't find any, we just return the last queued job scheduled + * fence attached to the queue. + */ + return dma_fence_get(queue->last_queued_job_scheduled_fence); +} + +static int +pvr_jobs_link_geom_frag(struct pvr_job_data *job_data, u32 *job_count) +{ + for (u32 i = 0; i < *job_count - 1; i++) { + struct pvr_job *geom_job = job_data[i].job; + struct pvr_job *frag_job = job_data[i + 1].job; + struct pvr_queue *frag_queue; + struct dma_fence *f; + + if (!can_combine_jobs(job_data[i].job, job_data[i + 1].job)) + continue; + + /* The fragment job will be submitted by the geometry queue. We + * need to make sure it comes after all the other fragment jobs + * queued before it. + */ + frag_queue = pvr_context_get_queue_for_job(frag_job->ctx, + frag_job->type); + f = get_last_queued_job_scheduled_fence(frag_queue, job_data, + i); + if (f) { + int err = drm_sched_job_add_dependency(&geom_job->base, + f); + if (err) { + *job_count = i; + return err; + } + } + + /* The KCCB slot will be reserved by the geometry job, so we can + * drop the KCCB fence on the fragment job. + */ + pvr_kccb_fence_put(frag_job->kccb_fence); + frag_job->kccb_fence = NULL; + + geom_job->paired_job = frag_job; + frag_job->paired_job = geom_job; + + /* Skip the fragment job we just paired to the geometry job. */ + i++; + } + + return 0; +} + +/** + * pvr_submit_jobs() - Submit jobs to the GPU + * @pvr_dev: Target PowerVR device. + * @pvr_file: Pointer to PowerVR file structure. + * @args: Ioctl args. + * + * This initial implementation is entirely synchronous; on return the GPU will + * be idle. This will not be the case for future implementations. + * + * Returns: + * * 0 on success, + * * -%EFAULT if arguments can not be copied from user space, or + * * -%EINVAL on invalid arguments, or + * * Any other error. + */ +int +pvr_submit_jobs(struct pvr_device *pvr_dev, struct pvr_file *pvr_file, + struct drm_pvr_ioctl_submit_jobs_args *args) +{ + struct pvr_job_data *job_data = NULL; + struct drm_pvr_job *job_args; + struct xarray signal_array; + u32 jobs_alloced = 0; + struct drm_exec exec; + int err; + + if (!args->jobs.count) + return -EINVAL; + + err = PVR_UOBJ_GET_ARRAY(job_args, &args->jobs); + if (err) + return err; + + job_data = kvmalloc_array(args->jobs.count, sizeof(*job_data), + GFP_KERNEL | __GFP_ZERO); + if (!job_data) { + err = -ENOMEM; + goto out_free; + } + + err = pvr_job_data_init(pvr_dev, pvr_file, job_args, &args->jobs.count, + job_data); + if (err) + goto out_free; + + jobs_alloced = args->jobs.count; + + /* + * Flush MMU if needed - this has been deferred until now to avoid + * overuse of this expensive operation. + */ + err = pvr_mmu_flush_exec(pvr_dev, false); + if (err) + goto out_job_data_cleanup; + + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES, 0); + + xa_init_flags(&signal_array, XA_FLAGS_ALLOC); + + err = prepare_job_syncs_for_each(pvr_file, job_data, &args->jobs.count, + &signal_array); + if (err) + goto out_exec_fini; + + err = prepare_job_resvs_for_each(&exec, job_data, args->jobs.count); + if (err) + goto out_exec_fini; + + err = pvr_jobs_link_geom_frag(job_data, &args->jobs.count); + if (err) + goto out_exec_fini; + + /* Anything after that point must succeed because we start exposing job + * finished fences to the outside world. + */ + update_job_resvs_for_each(job_data, args->jobs.count); + push_jobs(job_data, args->jobs.count); + pvr_sync_signal_array_push_fences(&signal_array); + err = 0; + +out_exec_fini: + drm_exec_fini(&exec); + pvr_sync_signal_array_cleanup(&signal_array); + +out_job_data_cleanup: + pvr_job_data_fini(job_data, jobs_alloced); + +out_free: + kvfree(job_data); + kvfree(job_args); + + return err; +} diff --git a/drivers/gpu/drm/imagination/pvr_job.h b/drivers/gpu/drm/imagination/pvr_job.h new file mode 100644 index 0000000000..0ca003c5c4 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_job.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_JOB_H +#define PVR_JOB_H + +#include + +#include +#include + +#include +#include + +#include "pvr_power.h" + +/* Forward declaration from "pvr_context.h". */ +struct pvr_context; + +/* Forward declarations from "pvr_device.h". */ +struct pvr_device; +struct pvr_file; + +/* Forward declarations from "pvr_hwrt.h". */ +struct pvr_hwrt_data; + +/* Forward declaration from "pvr_queue.h". */ +struct pvr_queue; + +struct pvr_job { + /** @base: drm_sched_job object. */ + struct drm_sched_job base; + + /** @ref_count: Refcount for job. */ + struct kref ref_count; + + /** @type: Type of job. */ + enum drm_pvr_job_type type; + + /** @id: Job ID number. */ + u32 id; + + /** + * @paired_job: Job paired to this job. + * + * This field is only meaningful for geometry and fragment jobs. + * + * Paired jobs are executed on the same context, and need to be submitted + * atomically to the FW, to make sure the partial render logic has a + * fragment job to execute when the Parameter Manager runs out of memory. + * + * The geometry job should point to the fragment job it's paired with, + * and the fragment job should point to the geometry job it's paired with. + */ + struct pvr_job *paired_job; + + /** @cccb_fence: Fence used to wait for CCCB space. */ + struct dma_fence *cccb_fence; + + /** @kccb_fence: Fence used to wait for KCCB space. */ + struct dma_fence *kccb_fence; + + /** @done_fence: Fence to signal when the job is done. */ + struct dma_fence *done_fence; + + /** @pvr_dev: Device pointer. */ + struct pvr_device *pvr_dev; + + /** @ctx: Pointer to owning context. */ + struct pvr_context *ctx; + + /** @cmd: Command data. Format depends on @type. */ + void *cmd; + + /** @cmd_len: Length of command data, in bytes. */ + u32 cmd_len; + + /** + * @fw_ccb_cmd_type: Firmware CCB command type. Must be one of %ROGUE_FWIF_CCB_CMD_TYPE_*. + */ + u32 fw_ccb_cmd_type; + + /** @hwrt: HWRT object. Will be NULL for compute and transfer jobs. */ + struct pvr_hwrt_data *hwrt; + + /** + * @has_pm_ref: True if the job has a power ref, thus forcing the GPU to stay on until + * the job is done. + */ + bool has_pm_ref; +}; + +/** + * pvr_job_get() - Take additional reference on job. + * @job: Job pointer. + * + * Call pvr_job_put() to release. + * + * Returns: + * * The requested job on success, or + * * %NULL if no job pointer passed. + */ +static __always_inline struct pvr_job * +pvr_job_get(struct pvr_job *job) +{ + if (job) + kref_get(&job->ref_count); + + return job; +} + +void pvr_job_put(struct pvr_job *job); + +/** + * pvr_job_release_pm_ref() - Release the PM ref if the job acquired it. + * @job: The job to release the PM ref on. + */ +static __always_inline void +pvr_job_release_pm_ref(struct pvr_job *job) +{ + if (job->has_pm_ref) { + pvr_power_put(job->pvr_dev); + job->has_pm_ref = false; + } +} + +/** + * pvr_job_get_pm_ref() - Get a PM ref and attach it to the job. + * @job: The job to attach the PM ref to. + * + * Return: + * * 0 on success, or + * * Any error returned by pvr_power_get() otherwise. + */ +static __always_inline int +pvr_job_get_pm_ref(struct pvr_job *job) +{ + int err; + + if (job->has_pm_ref) + return 0; + + err = pvr_power_get(job->pvr_dev); + if (!err) + job->has_pm_ref = true; + + return err; +} + +int pvr_job_wait_first_non_signaled_native_dep(struct pvr_job *job); + +bool pvr_job_non_native_deps_done(struct pvr_job *job); + +int pvr_job_fits_in_cccb(struct pvr_job *job, unsigned long native_dep_count); + +void pvr_job_submit(struct pvr_job *job); + +int pvr_submit_jobs(struct pvr_device *pvr_dev, struct pvr_file *pvr_file, + struct drm_pvr_ioctl_submit_jobs_args *args); + +#endif /* PVR_JOB_H */ diff --git a/drivers/gpu/drm/imagination/pvr_mmu.c b/drivers/gpu/drm/imagination/pvr_mmu.c new file mode 100644 index 0000000000..4fe70610ed --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_mmu.c @@ -0,0 +1,2640 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_mmu.h" + +#include "pvr_ccb.h" +#include "pvr_device.h" +#include "pvr_fw.h" +#include "pvr_gem.h" +#include "pvr_power.h" +#include "pvr_rogue_fwif.h" +#include "pvr_rogue_mmu_defs.h" + +#include +#include +#include +#include +#include +#include +#include + +#define PVR_SHIFT_FROM_SIZE(size_) (__builtin_ctzll(size_)) +#define PVR_MASK_FROM_SIZE(size_) (~((size_) - U64_C(1))) + +/* + * The value of the device page size (%PVR_DEVICE_PAGE_SIZE) is currently + * pegged to the host page size (%PAGE_SIZE). This chunk of macro goodness both + * ensures that the selected host page size corresponds to a valid device page + * size and sets up values needed by the MMU code below. + */ +#if (PVR_DEVICE_PAGE_SIZE == SZ_4K) +# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_4KB +# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_4KB_RANGE_SHIFT +# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_4KB_RANGE_CLRMSK +#elif (PVR_DEVICE_PAGE_SIZE == SZ_16K) +# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_16KB +# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_16KB_RANGE_SHIFT +# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_16KB_RANGE_CLRMSK +#elif (PVR_DEVICE_PAGE_SIZE == SZ_64K) +# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_64KB +# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_64KB_RANGE_SHIFT +# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_64KB_RANGE_CLRMSK +#elif (PVR_DEVICE_PAGE_SIZE == SZ_256K) +# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_256KB +# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_256KB_RANGE_SHIFT +# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_256KB_RANGE_CLRMSK +#elif (PVR_DEVICE_PAGE_SIZE == SZ_1M) +# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_1MB +# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_1MB_RANGE_SHIFT +# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_1MB_RANGE_CLRMSK +#elif (PVR_DEVICE_PAGE_SIZE == SZ_2M) +# define ROGUE_MMUCTRL_PAGE_SIZE_X ROGUE_MMUCTRL_PAGE_SIZE_2MB +# define ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT ROGUE_MMUCTRL_PAGE_2MB_RANGE_SHIFT +# define ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK ROGUE_MMUCTRL_PAGE_2MB_RANGE_CLRMSK +#else +# error Unsupported device page size PVR_DEVICE_PAGE_SIZE +#endif + +#define ROGUE_MMUCTRL_ENTRIES_PT_VALUE_X \ + (ROGUE_MMUCTRL_ENTRIES_PT_VALUE >> \ + (PVR_DEVICE_PAGE_SHIFT - PVR_SHIFT_FROM_SIZE(SZ_4K))) + +enum pvr_mmu_sync_level { + PVR_MMU_SYNC_LEVEL_NONE = -1, + PVR_MMU_SYNC_LEVEL_0 = 0, + PVR_MMU_SYNC_LEVEL_1 = 1, + PVR_MMU_SYNC_LEVEL_2 = 2, +}; + +#define PVR_MMU_SYNC_LEVEL_0_FLAGS (ROGUE_FWIF_MMUCACHEDATA_FLAGS_PT | \ + ROGUE_FWIF_MMUCACHEDATA_FLAGS_INTERRUPT | \ + ROGUE_FWIF_MMUCACHEDATA_FLAGS_TLB) +#define PVR_MMU_SYNC_LEVEL_1_FLAGS (PVR_MMU_SYNC_LEVEL_0_FLAGS | ROGUE_FWIF_MMUCACHEDATA_FLAGS_PD) +#define PVR_MMU_SYNC_LEVEL_2_FLAGS (PVR_MMU_SYNC_LEVEL_1_FLAGS | ROGUE_FWIF_MMUCACHEDATA_FLAGS_PC) + +/** + * pvr_mmu_set_flush_flags() - Set MMU cache flush flags for next call to + * pvr_mmu_flush_exec(). + * @pvr_dev: Target PowerVR device. + * @flags: MMU flush flags. Must be one of %PVR_MMU_SYNC_LEVEL_*_FLAGS. + * + * This function must be called following any possible change to the MMU page + * tables. + */ +static void pvr_mmu_set_flush_flags(struct pvr_device *pvr_dev, u32 flags) +{ + atomic_fetch_or(flags, &pvr_dev->mmu_flush_cache_flags); +} + +/** + * pvr_mmu_flush_request_all() - Request flush of all MMU caches when + * subsequently calling pvr_mmu_flush_exec(). + * @pvr_dev: Target PowerVR device. + * + * This function must be called following any possible change to the MMU page + * tables. + */ +void pvr_mmu_flush_request_all(struct pvr_device *pvr_dev) +{ + pvr_mmu_set_flush_flags(pvr_dev, PVR_MMU_SYNC_LEVEL_2_FLAGS); +} + +/** + * pvr_mmu_flush_exec() - Execute a flush of all MMU caches previously + * requested. + * @pvr_dev: Target PowerVR device. + * @wait: Do not return until the flush is completed. + * + * This function must be called prior to submitting any new GPU job. The flush + * will complete before the jobs are scheduled, so this can be called once after + * a series of maps. However, a single unmap should always be immediately + * followed by a flush and it should be explicitly waited by setting @wait. + * + * As a failure to flush the MMU caches could risk memory corruption, if the + * flush fails (implying the firmware is not responding) then the GPU device is + * marked as lost. + * + * Returns: + * * 0 on success when @wait is true, or + * * -%EIO if the device is unavailable, or + * * Any error encountered while submitting the flush command via the KCCB. + */ +int pvr_mmu_flush_exec(struct pvr_device *pvr_dev, bool wait) +{ + struct rogue_fwif_kccb_cmd cmd_mmu_cache = {}; + struct rogue_fwif_mmucachedata *cmd_mmu_cache_data = + &cmd_mmu_cache.cmd_data.mmu_cache_data; + int err = 0; + u32 slot; + int idx; + + if (!drm_dev_enter(from_pvr_device(pvr_dev), &idx)) + return -EIO; + + /* Can't flush MMU if the firmware hasn't booted yet. */ + if (!pvr_dev->fw_dev.booted) + goto err_drm_dev_exit; + + cmd_mmu_cache_data->cache_flags = + atomic_xchg(&pvr_dev->mmu_flush_cache_flags, 0); + + if (!cmd_mmu_cache_data->cache_flags) + goto err_drm_dev_exit; + + cmd_mmu_cache.cmd_type = ROGUE_FWIF_KCCB_CMD_MMUCACHE; + + pvr_fw_object_get_fw_addr(pvr_dev->fw_dev.mem.mmucache_sync_obj, + &cmd_mmu_cache_data->mmu_cache_sync_fw_addr); + cmd_mmu_cache_data->mmu_cache_sync_update_value = 0; + + err = pvr_kccb_send_cmd(pvr_dev, &cmd_mmu_cache, &slot); + if (err) + goto err_reset_and_retry; + + err = pvr_kccb_wait_for_completion(pvr_dev, slot, HZ, NULL); + if (err) + goto err_reset_and_retry; + + drm_dev_exit(idx); + + return 0; + +err_reset_and_retry: + /* + * Flush command failure is most likely the result of a firmware lockup. Hard + * reset the GPU and retry. + */ + err = pvr_power_reset(pvr_dev, true); + if (err) + goto err_drm_dev_exit; /* Device is lost. */ + + /* Retry sending flush request. */ + err = pvr_kccb_send_cmd(pvr_dev, &cmd_mmu_cache, &slot); + if (err) { + pvr_device_lost(pvr_dev); + goto err_drm_dev_exit; + } + + if (wait) { + err = pvr_kccb_wait_for_completion(pvr_dev, slot, HZ, NULL); + if (err) + pvr_device_lost(pvr_dev); + } + +err_drm_dev_exit: + drm_dev_exit(idx); + + return err; +} + +/** + * DOC: PowerVR Virtual Memory Handling + */ +/** + * DOC: PowerVR Virtual Memory Handling (constants) + * + * .. c:macro:: PVR_IDX_INVALID + * + * Default value for a u16-based index. + * + * This value cannot be zero, since zero is a valid index value. + */ +#define PVR_IDX_INVALID ((u16)(-1)) + +/** + * DOC: MMU backing pages + */ +/** + * DOC: MMU backing pages (constants) + * + * .. c:macro:: PVR_MMU_BACKING_PAGE_SIZE + * + * Page size of a PowerVR device's integrated MMU. The CPU page size must be + * at least as large as this value for the current implementation; this is + * checked at compile-time. + */ +#define PVR_MMU_BACKING_PAGE_SIZE SZ_4K +static_assert(PAGE_SIZE >= PVR_MMU_BACKING_PAGE_SIZE); + +/** + * struct pvr_mmu_backing_page - Represents a single page used to back a page + * table of any level. + * @dma_addr: DMA address of this page. + * @host_ptr: CPU address of this page. + * @pvr_dev: The PowerVR device to which this page is associated. **For + * internal use only.** + */ +struct pvr_mmu_backing_page { + dma_addr_t dma_addr; + void *host_ptr; +/* private: internal use only */ + struct page *raw_page; + struct pvr_device *pvr_dev; +}; + +/** + * pvr_mmu_backing_page_init() - Initialize a MMU backing page. + * @page: Target backing page. + * @pvr_dev: Target PowerVR device. + * + * This function performs three distinct operations: + * + * 1. Allocate a single page, + * 2. Map the page to the CPU, and + * 3. Map the page to DMA-space. + * + * It is expected that @page be zeroed (e.g. from kzalloc()) before calling + * this function. + * + * Return: + * * 0 on success, or + * * -%ENOMEM if allocation of the backing page or mapping of the backing + * page to DMA fails. + */ +static int +pvr_mmu_backing_page_init(struct pvr_mmu_backing_page *page, + struct pvr_device *pvr_dev) +{ + struct device *dev = from_pvr_device(pvr_dev)->dev; + + struct page *raw_page; + int err; + + dma_addr_t dma_addr; + void *host_ptr; + + raw_page = alloc_page(__GFP_ZERO | GFP_KERNEL); + if (!raw_page) + return -ENOMEM; + + host_ptr = vmap(&raw_page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); + if (!host_ptr) { + err = -ENOMEM; + goto err_free_page; + } + + dma_addr = dma_map_page(dev, raw_page, 0, PVR_MMU_BACKING_PAGE_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + err = -ENOMEM; + goto err_unmap_page; + } + + page->dma_addr = dma_addr; + page->host_ptr = host_ptr; + page->pvr_dev = pvr_dev; + page->raw_page = raw_page; + kmemleak_alloc(page->host_ptr, PAGE_SIZE, 1, GFP_KERNEL); + + return 0; + +err_unmap_page: + vunmap(host_ptr); + +err_free_page: + __free_page(raw_page); + + return err; +} + +/** + * pvr_mmu_backing_page_fini() - Teardown a MMU backing page. + * @page: Target backing page. + * + * This function performs the mirror operations to pvr_mmu_backing_page_init(), + * in reverse order: + * + * 1. Unmap the page from DMA-space, + * 2. Unmap the page from the CPU, and + * 3. Free the page. + * + * It also zeros @page. + * + * It is a no-op to call this function a second (or further) time on any @page. + */ +static void +pvr_mmu_backing_page_fini(struct pvr_mmu_backing_page *page) +{ + struct device *dev; + + /* Do nothing if no allocation is present. */ + if (!page->pvr_dev) + return; + + dev = from_pvr_device(page->pvr_dev)->dev; + + dma_unmap_page(dev, page->dma_addr, PVR_MMU_BACKING_PAGE_SIZE, + DMA_TO_DEVICE); + + kmemleak_free(page->host_ptr); + vunmap(page->host_ptr); + + __free_page(page->raw_page); + + memset(page, 0, sizeof(*page)); +} + +/** + * pvr_mmu_backing_page_sync() - Flush a MMU backing page from the CPU to the + * device. + * @page: Target backing page. + * @flags: MMU flush flags. Must be one of %PVR_MMU_SYNC_LEVEL_*_FLAGS. + * + * .. caution:: + * + * **This is potentially an expensive function call.** Only call + * pvr_mmu_backing_page_sync() once you're sure you have no more changes to + * make to the backing page in the immediate future. + */ +static void +pvr_mmu_backing_page_sync(struct pvr_mmu_backing_page *page, u32 flags) +{ + struct pvr_device *pvr_dev = page->pvr_dev; + struct device *dev; + + /* + * Do nothing if no allocation is present. This may be the case if + * we are unmapping pages. + */ + if (!pvr_dev) + return; + + dev = from_pvr_device(pvr_dev)->dev; + + dma_sync_single_for_device(dev, page->dma_addr, + PVR_MMU_BACKING_PAGE_SIZE, DMA_TO_DEVICE); + + pvr_mmu_set_flush_flags(pvr_dev, flags); +} + +/** + * DOC: Raw page tables + */ + +#define PVR_PAGE_TABLE_TYPEOF_ENTRY(level_) \ + typeof_member(struct pvr_page_table_l##level_##_entry_raw, val) + +#define PVR_PAGE_TABLE_FIELD_GET(level_, name_, field_, entry_) \ + (((entry_).val & \ + ~ROGUE_MMUCTRL_##name_##_DATA_##field_##_CLRMSK) >> \ + ROGUE_MMUCTRL_##name_##_DATA_##field_##_SHIFT) + +#define PVR_PAGE_TABLE_FIELD_PREP(level_, name_, field_, val_) \ + ((((PVR_PAGE_TABLE_TYPEOF_ENTRY(level_))(val_)) \ + << ROGUE_MMUCTRL_##name_##_DATA_##field_##_SHIFT) & \ + ~ROGUE_MMUCTRL_##name_##_DATA_##field_##_CLRMSK) + +/** + * struct pvr_page_table_l2_entry_raw - A single entry in a level 2 page table. + * @val: The raw value of this entry. + * + * This type is a structure for type-checking purposes. At compile-time, its + * size is checked against %ROGUE_MMUCTRL_ENTRY_SIZE_PC_VALUE. + * + * The value stored in this structure can be decoded using the following bitmap: + * + * .. flat-table:: + * :widths: 1 5 + * :stub-columns: 1 + * + * * - 31..4 + * - **Level 1 Page Table Base Address:** Bits 39..12 of the L1 + * page table base address, which is 4KiB aligned. + * + * * - 3..2 + * - *(reserved)* + * + * * - 1 + * - **Pending:** When valid bit is not set, indicates that a valid + * entry is pending and the MMU should wait for the driver to map + * the entry. This is used to support page demand mapping of + * memory. + * + * * - 0 + * - **Valid:** Indicates that the entry contains a valid L1 page + * table. If the valid bit is not set, then an attempted use of + * the page would result in a page fault. + */ +struct pvr_page_table_l2_entry_raw { + u32 val; +} __packed; +static_assert(sizeof(struct pvr_page_table_l2_entry_raw) * 8 == + ROGUE_MMUCTRL_ENTRY_SIZE_PC_VALUE); + +static bool +pvr_page_table_l2_entry_raw_is_valid(struct pvr_page_table_l2_entry_raw entry) +{ + return PVR_PAGE_TABLE_FIELD_GET(2, PC, VALID, entry); +} + +/** + * pvr_page_table_l2_entry_raw_set() - Write a valid entry into a raw level 2 + * page table. + * @entry: Target raw level 2 page table entry. + * @child_table_dma_addr: DMA address of the level 1 page table to be + * associated with @entry. + * + * When calling this function, @child_table_dma_addr must be a valid DMA + * address and a multiple of %ROGUE_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE. + */ +static void +pvr_page_table_l2_entry_raw_set(struct pvr_page_table_l2_entry_raw *entry, + dma_addr_t child_table_dma_addr) +{ + child_table_dma_addr >>= ROGUE_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT; + + WRITE_ONCE(entry->val, + PVR_PAGE_TABLE_FIELD_PREP(2, PC, VALID, true) | + PVR_PAGE_TABLE_FIELD_PREP(2, PC, ENTRY_PENDING, false) | + PVR_PAGE_TABLE_FIELD_PREP(2, PC, PD_BASE, child_table_dma_addr)); +} + +static void +pvr_page_table_l2_entry_raw_clear(struct pvr_page_table_l2_entry_raw *entry) +{ + WRITE_ONCE(entry->val, 0); +} + +/** + * struct pvr_page_table_l1_entry_raw - A single entry in a level 1 page table. + * @val: The raw value of this entry. + * + * This type is a structure for type-checking purposes. At compile-time, its + * size is checked against %ROGUE_MMUCTRL_ENTRY_SIZE_PD_VALUE. + * + * The value stored in this structure can be decoded using the following bitmap: + * + * .. flat-table:: + * :widths: 1 5 + * :stub-columns: 1 + * + * * - 63..41 + * - *(reserved)* + * + * * - 40 + * - **Pending:** When valid bit is not set, indicates that a valid entry + * is pending and the MMU should wait for the driver to map the entry. + * This is used to support page demand mapping of memory. + * + * * - 39..5 + * - **Level 0 Page Table Base Address:** The way this value is + * interpreted depends on the page size. Bits not specified in the + * table below (e.g. bits 11..5 for page size 4KiB) should be + * considered reserved. + * + * This table shows the bits used in an L1 page table entry to + * represent the Physical Table Base Address for a given Page Size. + * Since each L1 page table entry covers 2MiB of address space, the + * maximum page size is 2MiB. + * + * .. flat-table:: + * :widths: 1 1 1 1 + * :header-rows: 1 + * :stub-columns: 1 + * + * * - Page size + * - L0 page table base address bits + * - Number of L0 page table entries + * - Size of L0 page table + * + * * - 4KiB + * - 39..12 + * - 512 + * - 4KiB + * + * * - 16KiB + * - 39..10 + * - 128 + * - 1KiB + * + * * - 64KiB + * - 39..8 + * - 32 + * - 256B + * + * * - 256KiB + * - 39..6 + * - 8 + * - 64B + * + * * - 1MiB + * - 39..5 (4 = '0') + * - 2 + * - 16B + * + * * - 2MiB + * - 39..5 (4..3 = '00') + * - 1 + * - 8B + * + * * - 4 + * - *(reserved)* + * + * * - 3..1 + * - **Page Size:** Sets the page size, from 4KiB to 2MiB. + * + * * - 0 + * - **Valid:** Indicates that the entry contains a valid L0 page table. + * If the valid bit is not set, then an attempted use of the page would + * result in a page fault. + */ +struct pvr_page_table_l1_entry_raw { + u64 val; +} __packed; +static_assert(sizeof(struct pvr_page_table_l1_entry_raw) * 8 == + ROGUE_MMUCTRL_ENTRY_SIZE_PD_VALUE); + +static bool +pvr_page_table_l1_entry_raw_is_valid(struct pvr_page_table_l1_entry_raw entry) +{ + return PVR_PAGE_TABLE_FIELD_GET(1, PD, VALID, entry); +} + +/** + * pvr_page_table_l1_entry_raw_set() - Write a valid entry into a raw level 1 + * page table. + * @entry: Target raw level 1 page table entry. + * @child_table_dma_addr: DMA address of the level 0 page table to be + * associated with @entry. + * + * When calling this function, @child_table_dma_addr must be a valid DMA + * address and a multiple of 4 KiB. + */ +static void +pvr_page_table_l1_entry_raw_set(struct pvr_page_table_l1_entry_raw *entry, + dma_addr_t child_table_dma_addr) +{ + WRITE_ONCE(entry->val, + PVR_PAGE_TABLE_FIELD_PREP(1, PD, VALID, true) | + PVR_PAGE_TABLE_FIELD_PREP(1, PD, ENTRY_PENDING, false) | + PVR_PAGE_TABLE_FIELD_PREP(1, PD, PAGE_SIZE, ROGUE_MMUCTRL_PAGE_SIZE_X) | + /* + * The use of a 4K-specific macro here is correct. It is + * a future optimization to allocate sub-host-page-sized + * blocks for individual tables, so the condition that any + * page table address is aligned to the size of the + * largest (a 4KB) table currently holds. + */ + (child_table_dma_addr & ~ROGUE_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK)); +} + +static void +pvr_page_table_l1_entry_raw_clear(struct pvr_page_table_l1_entry_raw *entry) +{ + WRITE_ONCE(entry->val, 0); +} + +/** + * struct pvr_page_table_l0_entry_raw - A single entry in a level 0 page table. + * @val: The raw value of this entry. + * + * This type is a structure for type-checking purposes. At compile-time, its + * size is checked against %ROGUE_MMUCTRL_ENTRY_SIZE_PT_VALUE. + * + * The value stored in this structure can be decoded using the following bitmap: + * + * .. flat-table:: + * :widths: 1 5 + * :stub-columns: 1 + * + * * - 63 + * - *(reserved)* + * + * * - 62 + * - **PM/FW Protect:** Indicates a protected region which only the + * Parameter Manager (PM) or firmware processor can write to. + * + * * - 61..40 + * - **VP Page (High):** Virtual-physical page used for Parameter Manager + * (PM) memory. This field is only used if the additional level of PB + * virtualization is enabled. The VP Page field is needed by the PM in + * order to correctly reconstitute the free lists after render + * completion. This (High) field holds bits 39..18 of the value; the + * Low field holds bits 17..12. Bits 11..0 are always zero because the + * value is always aligned to the 4KiB page size. + * + * * - 39..12 + * - **Physical Page Address:** The way this value is interpreted depends + * on the page size. Bits not specified in the table below (e.g. bits + * 20..12 for page size 2MiB) should be considered reserved. + * + * This table shows the bits used in an L0 page table entry to represent + * the Physical Page Address for a given page size (as defined in the + * associated L1 page table entry). + * + * .. flat-table:: + * :widths: 1 1 + * :header-rows: 1 + * :stub-columns: 1 + * + * * - Page size + * - Physical address bits + * + * * - 4KiB + * - 39..12 + * + * * - 16KiB + * - 39..14 + * + * * - 64KiB + * - 39..16 + * + * * - 256KiB + * - 39..18 + * + * * - 1MiB + * - 39..20 + * + * * - 2MiB + * - 39..21 + * + * * - 11..6 + * - **VP Page (Low):** Continuation of VP Page (High). + * + * * - 5 + * - **Pending:** When valid bit is not set, indicates that a valid entry + * is pending and the MMU should wait for the driver to map the entry. + * This is used to support page demand mapping of memory. + * + * * - 4 + * - **PM Src:** Set on Parameter Manager (PM) allocated page table + * entries when indicated by the PM. Note that this bit will only be set + * by the PM, not by the device driver. + * + * * - 3 + * - **SLC Bypass Control:** Specifies requests to this page should bypass + * the System Level Cache (SLC), if enabled in SLC configuration. + * + * * - 2 + * - **Cache Coherency:** Indicates that the page is coherent (i.e. it + * does not require a cache flush between operations on the CPU and the + * device). + * + * * - 1 + * - **Read Only:** If set, this bit indicates that the page is read only. + * An attempted write to this page would result in a write-protection + * fault. + * + * * - 0 + * - **Valid:** Indicates that the entry contains a valid page. If the + * valid bit is not set, then an attempted use of the page would result + * in a page fault. + */ +struct pvr_page_table_l0_entry_raw { + u64 val; +} __packed; +static_assert(sizeof(struct pvr_page_table_l0_entry_raw) * 8 == + ROGUE_MMUCTRL_ENTRY_SIZE_PT_VALUE); + +/** + * struct pvr_page_flags_raw - The configurable flags from a single entry in a + * level 0 page table. + * @val: The raw value of these flags. Since these are a strict subset of + * &struct pvr_page_table_l0_entry_raw; use that type for our member here. + * + * The flags stored in this type are: PM/FW Protect; SLC Bypass Control; Cache + * Coherency, and Read Only (bits 62, 3, 2 and 1 respectively). + * + * This type should never be instantiated directly; instead use + * pvr_page_flags_raw_create() to ensure only valid bits of @val are set. + */ +struct pvr_page_flags_raw { + struct pvr_page_table_l0_entry_raw val; +} __packed; +static_assert(sizeof(struct pvr_page_flags_raw) == + sizeof(struct pvr_page_table_l0_entry_raw)); + +static bool +pvr_page_table_l0_entry_raw_is_valid(struct pvr_page_table_l0_entry_raw entry) +{ + return PVR_PAGE_TABLE_FIELD_GET(0, PT, VALID, entry); +} + +/** + * pvr_page_table_l0_entry_raw_set() - Write a valid entry into a raw level 0 + * page table. + * @entry: Target raw level 0 page table entry. + * @dma_addr: DMA address of the physical page to be associated with @entry. + * @flags: Options to be set on @entry. + * + * When calling this function, @child_table_dma_addr must be a valid DMA + * address and a multiple of %PVR_DEVICE_PAGE_SIZE. + * + * The @flags parameter is directly assigned into @entry. It is the callers + * responsibility to ensure that only bits specified in + * &struct pvr_page_flags_raw are set in @flags. + */ +static void +pvr_page_table_l0_entry_raw_set(struct pvr_page_table_l0_entry_raw *entry, + dma_addr_t dma_addr, + struct pvr_page_flags_raw flags) +{ + WRITE_ONCE(entry->val, PVR_PAGE_TABLE_FIELD_PREP(0, PT, VALID, true) | + PVR_PAGE_TABLE_FIELD_PREP(0, PT, ENTRY_PENDING, false) | + (dma_addr & ~ROGUE_MMUCTRL_PAGE_X_RANGE_CLRMSK) | + flags.val.val); +} + +static void +pvr_page_table_l0_entry_raw_clear(struct pvr_page_table_l0_entry_raw *entry) +{ + WRITE_ONCE(entry->val, 0); +} + +/** + * pvr_page_flags_raw_create() - Initialize the flag bits of a raw level 0 page + * table entry. + * @read_only: This page is read-only (see: Read Only). + * @cache_coherent: This page does not require cache flushes (see: Cache + * Coherency). + * @slc_bypass: This page bypasses the device cache (see: SLC Bypass Control). + * @pm_fw_protect: This page is only for use by the firmware or Parameter + * Manager (see PM/FW Protect). + * + * For more details on the use of these four options, see their respective + * entries in the table under &struct pvr_page_table_l0_entry_raw. + * + * Return: + * A new &struct pvr_page_flags_raw instance which can be passed directly to + * pvr_page_table_l0_entry_raw_set() or pvr_page_table_l0_insert(). + */ +static struct pvr_page_flags_raw +pvr_page_flags_raw_create(bool read_only, bool cache_coherent, bool slc_bypass, + bool pm_fw_protect) +{ + struct pvr_page_flags_raw flags; + + flags.val.val = + PVR_PAGE_TABLE_FIELD_PREP(0, PT, READ_ONLY, read_only) | + PVR_PAGE_TABLE_FIELD_PREP(0, PT, CC, cache_coherent) | + PVR_PAGE_TABLE_FIELD_PREP(0, PT, SLC_BYPASS_CTRL, slc_bypass) | + PVR_PAGE_TABLE_FIELD_PREP(0, PT, PM_META_PROTECT, pm_fw_protect); + + return flags; +} + +/** + * struct pvr_page_table_l2_raw - The raw data of a level 2 page table. + * + * This type is a structure for type-checking purposes. At compile-time, its + * size is checked against %PVR_MMU_BACKING_PAGE_SIZE. + */ +struct pvr_page_table_l2_raw { + /** @entries: The raw values of this table. */ + struct pvr_page_table_l2_entry_raw + entries[ROGUE_MMUCTRL_ENTRIES_PC_VALUE]; +} __packed; +static_assert(sizeof(struct pvr_page_table_l2_raw) == PVR_MMU_BACKING_PAGE_SIZE); + +/** + * struct pvr_page_table_l1_raw - The raw data of a level 1 page table. + * + * This type is a structure for type-checking purposes. At compile-time, its + * size is checked against %PVR_MMU_BACKING_PAGE_SIZE. + */ +struct pvr_page_table_l1_raw { + /** @entries: The raw values of this table. */ + struct pvr_page_table_l1_entry_raw + entries[ROGUE_MMUCTRL_ENTRIES_PD_VALUE]; +} __packed; +static_assert(sizeof(struct pvr_page_table_l1_raw) == PVR_MMU_BACKING_PAGE_SIZE); + +/** + * struct pvr_page_table_l0_raw - The raw data of a level 0 page table. + * + * This type is a structure for type-checking purposes. At compile-time, its + * size is checked against %PVR_MMU_BACKING_PAGE_SIZE. + * + * .. caution:: + * + * The size of level 0 page tables is variable depending on the page size + * specified in the associated level 1 page table entry. Since the device + * page size in use is pegged to the host page size, it cannot vary at + * runtime. This structure is therefore only defined to contain the required + * number of entries for the current device page size. **You should never + * read or write beyond the last supported entry.** + */ +struct pvr_page_table_l0_raw { + /** @entries: The raw values of this table. */ + struct pvr_page_table_l0_entry_raw + entries[ROGUE_MMUCTRL_ENTRIES_PT_VALUE_X]; +} __packed; +static_assert(sizeof(struct pvr_page_table_l0_raw) <= PVR_MMU_BACKING_PAGE_SIZE); + +/** + * DOC: Mirror page tables + */ + +/* + * We pre-declare these types because they cross-depend on pointers to each + * other. + */ +struct pvr_page_table_l1; +struct pvr_page_table_l0; + +/** + * struct pvr_page_table_l2 - A wrapped level 2 page table. + * + * To access the raw part of this table, use pvr_page_table_l2_get_raw(). + * Alternatively to access a raw entry directly, use + * pvr_page_table_l2_get_entry_raw(). + * + * A level 2 page table forms the root of the page table tree structure, so + * this type has no &parent or &parent_idx members. + */ +struct pvr_page_table_l2 { + /** + * @entries: The children of this node in the page table tree + * structure. These are also mirror tables. The indexing of this array + * is identical to that of the raw equivalent + * (&pvr_page_table_l1_raw.entries). + */ + struct pvr_page_table_l1 *entries[ROGUE_MMUCTRL_ENTRIES_PC_VALUE]; + + /** + * @backing_page: A handle to the memory which holds the raw + * equivalent of this table. **For internal use only.** + */ + struct pvr_mmu_backing_page backing_page; + + /** + * @entry_count: The current number of valid entries (that we know of) + * in this table. This value is essentially a refcount - the table is + * destroyed when this value is decremented to zero by + * pvr_page_table_l2_remove(). + */ + u16 entry_count; +}; + +/** + * pvr_page_table_l2_init() - Initialize a level 2 page table. + * @table: Target level 2 page table. + * @pvr_dev: Target PowerVR device + * + * It is expected that @table be zeroed (e.g. from kzalloc()) before calling + * this function. + * + * Return: + * * 0 on success, or + * * Any error encountered while intializing &table->backing_page using + * pvr_mmu_backing_page_init(). + */ +static int +pvr_page_table_l2_init(struct pvr_page_table_l2 *table, + struct pvr_device *pvr_dev) +{ + return pvr_mmu_backing_page_init(&table->backing_page, pvr_dev); +} + +/** + * pvr_page_table_l2_fini() - Teardown a level 2 page table. + * @table: Target level 2 page table. + * + * It is an error to attempt to use @table after calling this function. + */ +static void +pvr_page_table_l2_fini(struct pvr_page_table_l2 *table) +{ + pvr_mmu_backing_page_fini(&table->backing_page); +} + +/** + * pvr_page_table_l2_sync() - Flush a level 2 page table from the CPU to the + * device. + * @table: Target level 2 page table. + * + * This is just a thin wrapper around pvr_mmu_backing_page_sync(), so the + * warning there applies here too: **Only call pvr_page_table_l2_sync() once + * you're sure you have no more changes to make to** @table **in the immediate + * future.** + * + * If child level 1 page tables of @table also need to be flushed, this should + * be done first using pvr_page_table_l1_sync() *before* calling this function. + */ +static void +pvr_page_table_l2_sync(struct pvr_page_table_l2 *table) +{ + pvr_mmu_backing_page_sync(&table->backing_page, PVR_MMU_SYNC_LEVEL_2_FLAGS); +} + +/** + * pvr_page_table_l2_get_raw() - Access the raw equivalent of a mirror level 2 + * page table. + * @table: Target level 2 page table. + * + * Essentially returns the CPU address of the raw equivalent of @table, cast to + * a &struct pvr_page_table_l2_raw pointer. + * + * You probably want to call pvr_page_table_l2_get_entry_raw() instead. + * + * Return: + * The raw equivalent of @table. + */ +static struct pvr_page_table_l2_raw * +pvr_page_table_l2_get_raw(struct pvr_page_table_l2 *table) +{ + return table->backing_page.host_ptr; +} + +/** + * pvr_page_table_l2_get_entry_raw() - Access an entry from the raw equivalent + * of a mirror level 2 page table. + * @table: Target level 2 page table. + * @idx: Index of the entry to access. + * + * Technically this function returns a pointer to a slot in a raw level 2 page + * table, since the returned "entry" is not guaranteed to be valid. The caller + * must verify the validity of the entry at the returned address (perhaps using + * pvr_page_table_l2_entry_raw_is_valid()) before reading or overwriting it. + * + * The value of @idx is not checked here; it is the callers responsibility to + * ensure @idx refers to a valid index within @table before dereferencing the + * returned pointer. + * + * Return: + * A pointer to the requested raw level 2 page table entry. + */ +static struct pvr_page_table_l2_entry_raw * +pvr_page_table_l2_get_entry_raw(struct pvr_page_table_l2 *table, u16 idx) +{ + return &pvr_page_table_l2_get_raw(table)->entries[idx]; +} + +/** + * pvr_page_table_l2_entry_is_valid() - Check if a level 2 page table entry is + * marked as valid. + * @table: Target level 2 page table. + * @idx: Index of the entry to check. + * + * The value of @idx is not checked here; it is the callers responsibility to + * ensure @idx refers to a valid index within @table before calling this + * function. + */ +static bool +pvr_page_table_l2_entry_is_valid(struct pvr_page_table_l2 *table, u16 idx) +{ + struct pvr_page_table_l2_entry_raw entry_raw = + *pvr_page_table_l2_get_entry_raw(table, idx); + + return pvr_page_table_l2_entry_raw_is_valid(entry_raw); +} + +/** + * struct pvr_page_table_l1 - A wrapped level 1 page table. + * + * To access the raw part of this table, use pvr_page_table_l1_get_raw(). + * Alternatively to access a raw entry directly, use + * pvr_page_table_l1_get_entry_raw(). + */ +struct pvr_page_table_l1 { + /** + * @entries: The children of this node in the page table tree + * structure. These are also mirror tables. The indexing of this array + * is identical to that of the raw equivalent + * (&pvr_page_table_l0_raw.entries). + */ + struct pvr_page_table_l0 *entries[ROGUE_MMUCTRL_ENTRIES_PD_VALUE]; + + /** + * @backing_page: A handle to the memory which holds the raw + * equivalent of this table. **For internal use only.** + */ + struct pvr_mmu_backing_page backing_page; + + union { + /** + * @parent: The parent of this node in the page table tree structure. + * + * This is also a mirror table. + * + * Only valid when the L1 page table is active. When the L1 page table + * has been removed and queued for destruction, the next_free field + * should be used instead. + */ + struct pvr_page_table_l2 *parent; + + /** + * @next_free: Pointer to the next L1 page table to take/free. + * + * Used to form a linked list of L1 page tables. This is used + * when preallocating tables and when the page table has been + * removed and queued for destruction. + */ + struct pvr_page_table_l1 *next_free; + }; + + /** + * @parent_idx: The index of the entry in the parent table (see + * @parent) which corresponds to this table. + */ + u16 parent_idx; + + /** + * @entry_count: The current number of valid entries (that we know of) + * in this table. This value is essentially a refcount - the table is + * destroyed when this value is decremented to zero by + * pvr_page_table_l1_remove(). + */ + u16 entry_count; +}; + +/** + * pvr_page_table_l1_init() - Initialize a level 1 page table. + * @table: Target level 1 page table. + * @pvr_dev: Target PowerVR device + * + * When this function returns successfully, @table is still not considered + * valid. It must be inserted into the page table tree structure with + * pvr_page_table_l2_insert() before it is ready for use. + * + * It is expected that @table be zeroed (e.g. from kzalloc()) before calling + * this function. + * + * Return: + * * 0 on success, or + * * Any error encountered while intializing &table->backing_page using + * pvr_mmu_backing_page_init(). + */ +static int +pvr_page_table_l1_init(struct pvr_page_table_l1 *table, + struct pvr_device *pvr_dev) +{ + table->parent_idx = PVR_IDX_INVALID; + + return pvr_mmu_backing_page_init(&table->backing_page, pvr_dev); +} + +/** + * pvr_page_table_l1_free() - Teardown a level 1 page table. + * @table: Target level 1 page table. + * + * It is an error to attempt to use @table after calling this function, even + * indirectly. This includes calling pvr_page_table_l2_remove(), which must + * be called *before* pvr_page_table_l1_free(). + */ +static void +pvr_page_table_l1_free(struct pvr_page_table_l1 *table) +{ + pvr_mmu_backing_page_fini(&table->backing_page); + kfree(table); +} + +/** + * pvr_page_table_l1_sync() - Flush a level 1 page table from the CPU to the + * device. + * @table: Target level 1 page table. + * + * This is just a thin wrapper around pvr_mmu_backing_page_sync(), so the + * warning there applies here too: **Only call pvr_page_table_l1_sync() once + * you're sure you have no more changes to make to** @table **in the immediate + * future.** + * + * If child level 0 page tables of @table also need to be flushed, this should + * be done first using pvr_page_table_l0_sync() *before* calling this function. + */ +static void +pvr_page_table_l1_sync(struct pvr_page_table_l1 *table) +{ + pvr_mmu_backing_page_sync(&table->backing_page, PVR_MMU_SYNC_LEVEL_1_FLAGS); +} + +/** + * pvr_page_table_l1_get_raw() - Access the raw equivalent of a mirror level 1 + * page table. + * @table: Target level 1 page table. + * + * Essentially returns the CPU address of the raw equivalent of @table, cast to + * a &struct pvr_page_table_l1_raw pointer. + * + * You probably want to call pvr_page_table_l1_get_entry_raw() instead. + * + * Return: + * The raw equivalent of @table. + */ +static struct pvr_page_table_l1_raw * +pvr_page_table_l1_get_raw(struct pvr_page_table_l1 *table) +{ + return table->backing_page.host_ptr; +} + +/** + * pvr_page_table_l1_get_entry_raw() - Access an entry from the raw equivalent + * of a mirror level 1 page table. + * @table: Target level 1 page table. + * @idx: Index of the entry to access. + * + * Technically this function returns a pointer to a slot in a raw level 1 page + * table, since the returned "entry" is not guaranteed to be valid. The caller + * must verify the validity of the entry at the returned address (perhaps using + * pvr_page_table_l1_entry_raw_is_valid()) before reading or overwriting it. + * + * The value of @idx is not checked here; it is the callers responsibility to + * ensure @idx refers to a valid index within @table before dereferencing the + * returned pointer. + * + * Return: + * A pointer to the requested raw level 1 page table entry. + */ +static struct pvr_page_table_l1_entry_raw * +pvr_page_table_l1_get_entry_raw(struct pvr_page_table_l1 *table, u16 idx) +{ + return &pvr_page_table_l1_get_raw(table)->entries[idx]; +} + +/** + * pvr_page_table_l1_entry_is_valid() - Check if a level 1 page table entry is + * marked as valid. + * @table: Target level 1 page table. + * @idx: Index of the entry to check. + * + * The value of @idx is not checked here; it is the callers responsibility to + * ensure @idx refers to a valid index within @table before calling this + * function. + */ +static bool +pvr_page_table_l1_entry_is_valid(struct pvr_page_table_l1 *table, u16 idx) +{ + struct pvr_page_table_l1_entry_raw entry_raw = + *pvr_page_table_l1_get_entry_raw(table, idx); + + return pvr_page_table_l1_entry_raw_is_valid(entry_raw); +} + +/** + * struct pvr_page_table_l0 - A wrapped level 0 page table. + * + * To access the raw part of this table, use pvr_page_table_l0_get_raw(). + * Alternatively to access a raw entry directly, use + * pvr_page_table_l0_get_entry_raw(). + * + * There is no mirror representation of an individual page, so this type has no + * &entries member. + */ +struct pvr_page_table_l0 { + /** + * @backing_page: A handle to the memory which holds the raw + * equivalent of this table. **For internal use only.** + */ + struct pvr_mmu_backing_page backing_page; + + union { + /** + * @parent: The parent of this node in the page table tree structure. + * + * This is also a mirror table. + * + * Only valid when the L0 page table is active. When the L0 page table + * has been removed and queued for destruction, the next_free field + * should be used instead. + */ + struct pvr_page_table_l1 *parent; + + /** + * @next_free: Pointer to the next L0 page table to take/free. + * + * Used to form a linked list of L0 page tables. This is used + * when preallocating tables and when the page table has been + * removed and queued for destruction. + */ + struct pvr_page_table_l0 *next_free; + }; + + /** + * @parent_idx: The index of the entry in the parent table (see + * @parent) which corresponds to this table. + */ + u16 parent_idx; + + /** + * @entry_count: The current number of valid entries (that we know of) + * in this table. This value is essentially a refcount - the table is + * destroyed when this value is decremented to zero by + * pvr_page_table_l0_remove(). + */ + u16 entry_count; +}; + +/** + * pvr_page_table_l0_init() - Initialize a level 0 page table. + * @table: Target level 0 page table. + * @pvr_dev: Target PowerVR device + * + * When this function returns successfully, @table is still not considered + * valid. It must be inserted into the page table tree structure with + * pvr_page_table_l1_insert() before it is ready for use. + * + * It is expected that @table be zeroed (e.g. from kzalloc()) before calling + * this function. + * + * Return: + * * 0 on success, or + * * Any error encountered while intializing &table->backing_page using + * pvr_mmu_backing_page_init(). + */ +static int +pvr_page_table_l0_init(struct pvr_page_table_l0 *table, + struct pvr_device *pvr_dev) +{ + table->parent_idx = PVR_IDX_INVALID; + + return pvr_mmu_backing_page_init(&table->backing_page, pvr_dev); +} + +/** + * pvr_page_table_l0_free() - Teardown a level 0 page table. + * @table: Target level 0 page table. + * + * It is an error to attempt to use @table after calling this function, even + * indirectly. This includes calling pvr_page_table_l1_remove(), which must + * be called *before* pvr_page_table_l0_free(). + */ +static void +pvr_page_table_l0_free(struct pvr_page_table_l0 *table) +{ + pvr_mmu_backing_page_fini(&table->backing_page); + kfree(table); +} + +/** + * pvr_page_table_l0_sync() - Flush a level 0 page table from the CPU to the + * device. + * @table: Target level 0 page table. + * + * This is just a thin wrapper around pvr_mmu_backing_page_sync(), so the + * warning there applies here too: **Only call pvr_page_table_l0_sync() once + * you're sure you have no more changes to make to** @table **in the immediate + * future.** + * + * If child pages of @table also need to be flushed, this should be done first + * using a DMA sync function (e.g. dma_sync_sg_for_device()) *before* calling + * this function. + */ +static void +pvr_page_table_l0_sync(struct pvr_page_table_l0 *table) +{ + pvr_mmu_backing_page_sync(&table->backing_page, PVR_MMU_SYNC_LEVEL_0_FLAGS); +} + +/** + * pvr_page_table_l0_get_raw() - Access the raw equivalent of a mirror level 0 + * page table. + * @table: Target level 0 page table. + * + * Essentially returns the CPU address of the raw equivalent of @table, cast to + * a &struct pvr_page_table_l0_raw pointer. + * + * You probably want to call pvr_page_table_l0_get_entry_raw() instead. + * + * Return: + * The raw equivalent of @table. + */ +static struct pvr_page_table_l0_raw * +pvr_page_table_l0_get_raw(struct pvr_page_table_l0 *table) +{ + return table->backing_page.host_ptr; +} + +/** + * pvr_page_table_l0_get_entry_raw() - Access an entry from the raw equivalent + * of a mirror level 0 page table. + * @table: Target level 0 page table. + * @idx: Index of the entry to access. + * + * Technically this function returns a pointer to a slot in a raw level 0 page + * table, since the returned "entry" is not guaranteed to be valid. The caller + * must verify the validity of the entry at the returned address (perhaps using + * pvr_page_table_l0_entry_raw_is_valid()) before reading or overwriting it. + * + * The value of @idx is not checked here; it is the callers responsibility to + * ensure @idx refers to a valid index within @table before dereferencing the + * returned pointer. This is espcially important for level 0 page tables, which + * can have a variable number of entries. + * + * Return: + * A pointer to the requested raw level 0 page table entry. + */ +static struct pvr_page_table_l0_entry_raw * +pvr_page_table_l0_get_entry_raw(struct pvr_page_table_l0 *table, u16 idx) +{ + return &pvr_page_table_l0_get_raw(table)->entries[idx]; +} + +/** + * pvr_page_table_l0_entry_is_valid() - Check if a level 0 page table entry is + * marked as valid. + * @table: Target level 0 page table. + * @idx: Index of the entry to check. + * + * The value of @idx is not checked here; it is the callers responsibility to + * ensure @idx refers to a valid index within @table before calling this + * function. + */ +static bool +pvr_page_table_l0_entry_is_valid(struct pvr_page_table_l0 *table, u16 idx) +{ + struct pvr_page_table_l0_entry_raw entry_raw = + *pvr_page_table_l0_get_entry_raw(table, idx); + + return pvr_page_table_l0_entry_raw_is_valid(entry_raw); +} + +/** + * struct pvr_mmu_context - context holding data for operations at page + * catalogue level, intended for use with a VM context. + */ +struct pvr_mmu_context { + /** @pvr_dev: The PVR device associated with the owning VM context. */ + struct pvr_device *pvr_dev; + + /** @page_table_l2: The MMU table root. */ + struct pvr_page_table_l2 page_table_l2; +}; + +/** + * struct pvr_page_table_ptr - A reference to a single physical page as indexed + * by the page table structure. + * + * Intended for embedding in a &struct pvr_mmu_op_context. + */ +struct pvr_page_table_ptr { + /** + * @l1_table: A cached handle to the level 1 page table the + * context is currently traversing. + */ + struct pvr_page_table_l1 *l1_table; + + /** + * @l0_table: A cached handle to the level 0 page table the + * context is currently traversing. + */ + struct pvr_page_table_l0 *l0_table; + + /** + * @l2_idx: Index into the level 2 page table the context is + * currently referencing. + */ + u16 l2_idx; + + /** + * @l1_idx: Index into the level 1 page table the context is + * currently referencing. + */ + u16 l1_idx; + + /** + * @l0_idx: Index into the level 0 page table the context is + * currently referencing. + */ + u16 l0_idx; +}; + +/** + * struct pvr_mmu_op_context - context holding data for individual + * device-virtual mapping operations. Intended for use with a VM bind operation. + */ +struct pvr_mmu_op_context { + /** @mmu_ctx: The MMU context associated with the owning VM context. */ + struct pvr_mmu_context *mmu_ctx; + + /** @map: Data specifically for map operations. */ + struct { + /** + * @sgt: Scatter gather table containing pages pinned for use by + * this context - these are currently pinned when initialising + * the VM bind operation. + */ + struct sg_table *sgt; + + /** @sgt_offset: Start address of the device-virtual mapping. */ + u64 sgt_offset; + + /** + * @l1_prealloc_tables: Preallocated l1 page table objects + * use by this context when creating a page mapping. Linked list + * fully created during initialisation. + */ + struct pvr_page_table_l1 *l1_prealloc_tables; + + /** + * @l0_prealloc_tables: Preallocated l0 page table objects + * use by this context when creating a page mapping. Linked list + * fully created during initialisation. + */ + struct pvr_page_table_l0 *l0_prealloc_tables; + } map; + + /** @unmap: Data specifically for unmap operations. */ + struct { + /** + * @l1_free_tables: Collects page table objects freed by unmap + * ops. Linked list empty at creation. + */ + struct pvr_page_table_l1 *l1_free_tables; + + /** + * @l0_free_tables: Collects page table objects freed by unmap + * ops. Linked list empty at creation. + */ + struct pvr_page_table_l0 *l0_free_tables; + } unmap; + + /** + * @curr_page: A reference to a single physical page as indexed by the + * page table structure. + */ + struct pvr_page_table_ptr curr_page; + + /** + * @sync_level_required: The maximum level of the page table tree + * structure which has (possibly) been modified since it was last + * flushed to the device. + * + * This field should only be set with pvr_mmu_op_context_require_sync() + * or indirectly by pvr_mmu_op_context_sync_partial(). + */ + enum pvr_mmu_sync_level sync_level_required; +}; + +/** + * pvr_page_table_l2_insert() - Insert an entry referring to a level 1 page + * table into a level 2 page table. + * @op_ctx: Target MMU op context pointing at the entry to insert the L1 page + * table into. + * @child_table: Target level 1 page table to be referenced by the new entry. + * + * It is the caller's responsibility to ensure @op_ctx.curr_page points to a + * valid L2 entry. + * + * It is the caller's responsibility to execute any memory barries to ensure + * that the creation of @child_table is ordered before the L2 entry is inserted. + */ +static void +pvr_page_table_l2_insert(struct pvr_mmu_op_context *op_ctx, + struct pvr_page_table_l1 *child_table) +{ + struct pvr_page_table_l2 *l2_table = + &op_ctx->mmu_ctx->page_table_l2; + struct pvr_page_table_l2_entry_raw *entry_raw = + pvr_page_table_l2_get_entry_raw(l2_table, + op_ctx->curr_page.l2_idx); + + pvr_page_table_l2_entry_raw_set(entry_raw, + child_table->backing_page.dma_addr); + + child_table->parent = l2_table; + child_table->parent_idx = op_ctx->curr_page.l2_idx; + l2_table->entries[op_ctx->curr_page.l2_idx] = child_table; + ++l2_table->entry_count; + op_ctx->curr_page.l1_table = child_table; +} + +/** + * pvr_page_table_l2_remove() - Remove a level 1 page table from a level 2 page + * table. + * @op_ctx: Target MMU op context pointing at the L2 entry to remove. + * + * It is the caller's responsibility to ensure @op_ctx.curr_page points to a + * valid L2 entry. + */ +static void +pvr_page_table_l2_remove(struct pvr_mmu_op_context *op_ctx) +{ + struct pvr_page_table_l2 *l2_table = + &op_ctx->mmu_ctx->page_table_l2; + struct pvr_page_table_l2_entry_raw *entry_raw = + pvr_page_table_l2_get_entry_raw(l2_table, + op_ctx->curr_page.l1_table->parent_idx); + + WARN_ON(op_ctx->curr_page.l1_table->parent != l2_table); + + pvr_page_table_l2_entry_raw_clear(entry_raw); + + l2_table->entries[op_ctx->curr_page.l1_table->parent_idx] = NULL; + op_ctx->curr_page.l1_table->parent_idx = PVR_IDX_INVALID; + op_ctx->curr_page.l1_table->next_free = op_ctx->unmap.l1_free_tables; + op_ctx->unmap.l1_free_tables = op_ctx->curr_page.l1_table; + op_ctx->curr_page.l1_table = NULL; + + --l2_table->entry_count; +} + +/** + * pvr_page_table_l1_insert() - Insert an entry referring to a level 0 page + * table into a level 1 page table. + * @op_ctx: Target MMU op context pointing at the entry to insert the L0 page + * table into. + * @child_table: L0 page table to insert. + * + * It is the caller's responsibility to ensure @op_ctx.curr_page points to a + * valid L1 entry. + * + * It is the caller's responsibility to execute any memory barries to ensure + * that the creation of @child_table is ordered before the L1 entry is inserted. + */ +static void +pvr_page_table_l1_insert(struct pvr_mmu_op_context *op_ctx, + struct pvr_page_table_l0 *child_table) +{ + struct pvr_page_table_l1_entry_raw *entry_raw = + pvr_page_table_l1_get_entry_raw(op_ctx->curr_page.l1_table, + op_ctx->curr_page.l1_idx); + + pvr_page_table_l1_entry_raw_set(entry_raw, + child_table->backing_page.dma_addr); + + child_table->parent = op_ctx->curr_page.l1_table; + child_table->parent_idx = op_ctx->curr_page.l1_idx; + op_ctx->curr_page.l1_table->entries[op_ctx->curr_page.l1_idx] = child_table; + ++op_ctx->curr_page.l1_table->entry_count; + op_ctx->curr_page.l0_table = child_table; +} + +/** + * pvr_page_table_l1_remove() - Remove a level 0 page table from a level 1 page + * table. + * @op_ctx: Target MMU op context pointing at the L1 entry to remove. + * + * If this function results in the L1 table becoming empty, it will be removed + * from its parent level 2 page table and destroyed. + * + * It is the caller's responsibility to ensure @op_ctx.curr_page points to a + * valid L1 entry. + */ +static void +pvr_page_table_l1_remove(struct pvr_mmu_op_context *op_ctx) +{ + struct pvr_page_table_l1_entry_raw *entry_raw = + pvr_page_table_l1_get_entry_raw(op_ctx->curr_page.l0_table->parent, + op_ctx->curr_page.l0_table->parent_idx); + + WARN_ON(op_ctx->curr_page.l0_table->parent != + op_ctx->curr_page.l1_table); + + pvr_page_table_l1_entry_raw_clear(entry_raw); + + op_ctx->curr_page.l1_table->entries[op_ctx->curr_page.l0_table->parent_idx] = NULL; + op_ctx->curr_page.l0_table->parent_idx = PVR_IDX_INVALID; + op_ctx->curr_page.l0_table->next_free = op_ctx->unmap.l0_free_tables; + op_ctx->unmap.l0_free_tables = op_ctx->curr_page.l0_table; + op_ctx->curr_page.l0_table = NULL; + + if (--op_ctx->curr_page.l1_table->entry_count == 0) { + /* Clear the parent L2 page table entry. */ + if (op_ctx->curr_page.l1_table->parent_idx != PVR_IDX_INVALID) + pvr_page_table_l2_remove(op_ctx); + } +} + +/** + * pvr_page_table_l0_insert() - Insert an entry referring to a physical page + * into a level 0 page table. + * @op_ctx: Target MMU op context pointing at the L0 entry to insert. + * @dma_addr: Target DMA address to be referenced by the new entry. + * @flags: Page options to be stored in the new entry. + * + * It is the caller's responsibility to ensure @op_ctx.curr_page points to a + * valid L0 entry. + */ +static void +pvr_page_table_l0_insert(struct pvr_mmu_op_context *op_ctx, + dma_addr_t dma_addr, struct pvr_page_flags_raw flags) +{ + struct pvr_page_table_l0_entry_raw *entry_raw = + pvr_page_table_l0_get_entry_raw(op_ctx->curr_page.l0_table, + op_ctx->curr_page.l0_idx); + + pvr_page_table_l0_entry_raw_set(entry_raw, dma_addr, flags); + + /* + * There is no entry to set here - we don't keep a mirror of + * individual pages. + */ + + ++op_ctx->curr_page.l0_table->entry_count; +} + +/** + * pvr_page_table_l0_remove() - Remove a physical page from a level 0 page + * table. + * @op_ctx: Target MMU op context pointing at the L0 entry to remove. + * + * If this function results in the L0 table becoming empty, it will be removed + * from its parent L1 page table and destroyed. + * + * It is the caller's responsibility to ensure @op_ctx.curr_page points to a + * valid L0 entry. + */ +static void +pvr_page_table_l0_remove(struct pvr_mmu_op_context *op_ctx) +{ + struct pvr_page_table_l0_entry_raw *entry_raw = + pvr_page_table_l0_get_entry_raw(op_ctx->curr_page.l0_table, + op_ctx->curr_page.l0_idx); + + pvr_page_table_l0_entry_raw_clear(entry_raw); + + /* + * There is no entry to clear here - we don't keep a mirror of + * individual pages. + */ + + if (--op_ctx->curr_page.l0_table->entry_count == 0) { + /* Clear the parent L1 page table entry. */ + if (op_ctx->curr_page.l0_table->parent_idx != PVR_IDX_INVALID) + pvr_page_table_l1_remove(op_ctx); + } +} + +/** + * DOC: Page table index utilities + */ + +/** + * pvr_page_table_l2_idx() - Calculate the level 2 page table index for a + * device-virtual address. + * @device_addr: Target device-virtual address. + * + * This function does not perform any bounds checking - it is the caller's + * responsibility to ensure that @device_addr is valid before interpreting + * the result. + * + * Return: + * The index into a level 2 page table corresponding to @device_addr. + */ +static u16 +pvr_page_table_l2_idx(u64 device_addr) +{ + return (device_addr & ~ROGUE_MMUCTRL_VADDR_PC_INDEX_CLRMSK) >> + ROGUE_MMUCTRL_VADDR_PC_INDEX_SHIFT; +} + +/** + * pvr_page_table_l1_idx() - Calculate the level 1 page table index for a + * device-virtual address. + * @device_addr: Target device-virtual address. + * + * This function does not perform any bounds checking - it is the caller's + * responsibility to ensure that @device_addr is valid before interpreting + * the result. + * + * Return: + * The index into a level 1 page table corresponding to @device_addr. + */ +static u16 +pvr_page_table_l1_idx(u64 device_addr) +{ + return (device_addr & ~ROGUE_MMUCTRL_VADDR_PD_INDEX_CLRMSK) >> + ROGUE_MMUCTRL_VADDR_PD_INDEX_SHIFT; +} + +/** + * pvr_page_table_l0_idx() - Calculate the level 0 page table index for a + * device-virtual address. + * @device_addr: Target device-virtual address. + * + * This function does not perform any bounds checking - it is the caller's + * responsibility to ensure that @device_addr is valid before interpreting + * the result. + * + * Return: + * The index into a level 0 page table corresponding to @device_addr. + */ +static u16 +pvr_page_table_l0_idx(u64 device_addr) +{ + return (device_addr & ~ROGUE_MMUCTRL_VADDR_PT_INDEX_CLRMSK) >> + ROGUE_MMUCTRL_PAGE_X_RANGE_SHIFT; +} + +/** + * DOC: High-level page table operations + */ + +/** + * pvr_page_table_l1_get_or_insert() - Retrieves (optionally inserting if + * necessary) a level 1 page table from the specified level 2 page table entry. + * @op_ctx: Target MMU op context. + * @should_insert: [IN] Specifies whether new page tables should be inserted + * when empty page table entries are encountered during traversal. + * + * Return: + * * 0 on success, or + * + * If @should_insert is %false: + * * -%ENXIO if a level 1 page table would have been inserted. + * + * If @should_insert is %true: + * * Any error encountered while inserting the level 1 page table. + */ +static int +pvr_page_table_l1_get_or_insert(struct pvr_mmu_op_context *op_ctx, + bool should_insert) +{ + struct pvr_page_table_l2 *l2_table = + &op_ctx->mmu_ctx->page_table_l2; + struct pvr_page_table_l1 *table; + + if (pvr_page_table_l2_entry_is_valid(l2_table, + op_ctx->curr_page.l2_idx)) { + op_ctx->curr_page.l1_table = + l2_table->entries[op_ctx->curr_page.l2_idx]; + return 0; + } + + if (!should_insert) + return -ENXIO; + + /* Take a prealloced table. */ + table = op_ctx->map.l1_prealloc_tables; + if (!table) + return -ENOMEM; + + /* Pop */ + op_ctx->map.l1_prealloc_tables = table->next_free; + table->next_free = NULL; + + /* Ensure new table is fully written out before adding to L2 page table. */ + wmb(); + + pvr_page_table_l2_insert(op_ctx, table); + + return 0; +} + +/** + * pvr_page_table_l0_get_or_insert() - Retrieves (optionally inserting if + * necessary) a level 0 page table from the specified level 1 page table entry. + * @op_ctx: Target MMU op context. + * @should_insert: [IN] Specifies whether new page tables should be inserted + * when empty page table entries are encountered during traversal. + * + * Return: + * * 0 on success, + * + * If @should_insert is %false: + * * -%ENXIO if a level 0 page table would have been inserted. + * + * If @should_insert is %true: + * * Any error encountered while inserting the level 0 page table. + */ +static int +pvr_page_table_l0_get_or_insert(struct pvr_mmu_op_context *op_ctx, + bool should_insert) +{ + struct pvr_page_table_l0 *table; + + if (pvr_page_table_l1_entry_is_valid(op_ctx->curr_page.l1_table, + op_ctx->curr_page.l1_idx)) { + op_ctx->curr_page.l0_table = + op_ctx->curr_page.l1_table->entries[op_ctx->curr_page.l1_idx]; + return 0; + } + + if (!should_insert) + return -ENXIO; + + /* Take a prealloced table. */ + table = op_ctx->map.l0_prealloc_tables; + if (!table) + return -ENOMEM; + + /* Pop */ + op_ctx->map.l0_prealloc_tables = table->next_free; + table->next_free = NULL; + + /* Ensure new table is fully written out before adding to L1 page table. */ + wmb(); + + pvr_page_table_l1_insert(op_ctx, table); + + return 0; +} + +/** + * pvr_mmu_context_create() - Create an MMU context. + * @pvr_dev: PVR device associated with owning VM context. + * + * Returns: + * * Newly created MMU context object on success, or + * * -%ENOMEM if no memory is available, + * * Any error code returned by pvr_page_table_l2_init(). + */ +struct pvr_mmu_context *pvr_mmu_context_create(struct pvr_device *pvr_dev) +{ + struct pvr_mmu_context *ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + int err; + + if (!ctx) + return ERR_PTR(-ENOMEM); + + err = pvr_page_table_l2_init(&ctx->page_table_l2, pvr_dev); + if (err) + return ERR_PTR(err); + + ctx->pvr_dev = pvr_dev; + + return ctx; +} + +/** + * pvr_mmu_context_destroy() - Destroy an MMU context. + * @ctx: Target MMU context. + */ +void pvr_mmu_context_destroy(struct pvr_mmu_context *ctx) +{ + pvr_page_table_l2_fini(&ctx->page_table_l2); + kfree(ctx); +} + +/** + * pvr_mmu_get_root_table_dma_addr() - Get the DMA address of the root of the + * page table structure behind a VM context. + * @ctx: Target MMU context. + */ +dma_addr_t pvr_mmu_get_root_table_dma_addr(struct pvr_mmu_context *ctx) +{ + return ctx->page_table_l2.backing_page.dma_addr; +} + +/** + * pvr_page_table_l1_alloc() - Allocate a l1 page_table object. + * @ctx: MMU context of owning VM context. + * + * Returns: + * * Newly created page table object on success, or + * * -%ENOMEM if no memory is available, + * * Any error code returned by pvr_page_table_l1_init(). + */ +static struct pvr_page_table_l1 * +pvr_page_table_l1_alloc(struct pvr_mmu_context *ctx) +{ + int err; + + struct pvr_page_table_l1 *table = + kzalloc(sizeof(*table), GFP_KERNEL); + + if (!table) + return ERR_PTR(-ENOMEM); + + err = pvr_page_table_l1_init(table, ctx->pvr_dev); + if (err) { + kfree(table); + return ERR_PTR(err); + } + + return table; +} + +/** + * pvr_page_table_l0_alloc() - Allocate a l0 page_table object. + * @ctx: MMU context of owning VM context. + * + * Returns: + * * Newly created page table object on success, or + * * -%ENOMEM if no memory is available, + * * Any error code returned by pvr_page_table_l0_init(). + */ +static struct pvr_page_table_l0 * +pvr_page_table_l0_alloc(struct pvr_mmu_context *ctx) +{ + int err; + + struct pvr_page_table_l0 *table = + kzalloc(sizeof(*table), GFP_KERNEL); + + if (!table) + return ERR_PTR(-ENOMEM); + + err = pvr_page_table_l0_init(table, ctx->pvr_dev); + if (err) { + kfree(table); + return ERR_PTR(err); + } + + return table; +} + +/** + * pvr_mmu_op_context_require_sync() - Mark an MMU op context as requiring a + * sync operation for the referenced page tables up to a specified level. + * @op_ctx: Target MMU op context. + * @level: Maximum page table level for which a sync is required. + */ +static void +pvr_mmu_op_context_require_sync(struct pvr_mmu_op_context *op_ctx, + enum pvr_mmu_sync_level level) +{ + if (op_ctx->sync_level_required < level) + op_ctx->sync_level_required = level; +} + +/** + * pvr_mmu_op_context_sync_manual() - Trigger a sync of some or all of the + * page tables referenced by a MMU op context. + * @op_ctx: Target MMU op context. + * @level: Maximum page table level to sync. + * + * Do not call this function directly. Instead use + * pvr_mmu_op_context_sync_partial() which is checked against the current + * value of &op_ctx->sync_level_required as set by + * pvr_mmu_op_context_require_sync(). + */ +static void +pvr_mmu_op_context_sync_manual(struct pvr_mmu_op_context *op_ctx, + enum pvr_mmu_sync_level level) +{ + /* + * We sync the page table levels in ascending order (starting from the + * leaf node) to ensure consistency. + */ + + WARN_ON(level < PVR_MMU_SYNC_LEVEL_NONE); + + if (level <= PVR_MMU_SYNC_LEVEL_NONE) + return; + + if (op_ctx->curr_page.l0_table) + pvr_page_table_l0_sync(op_ctx->curr_page.l0_table); + + if (level < PVR_MMU_SYNC_LEVEL_1) + return; + + if (op_ctx->curr_page.l1_table) + pvr_page_table_l1_sync(op_ctx->curr_page.l1_table); + + if (level < PVR_MMU_SYNC_LEVEL_2) + return; + + pvr_page_table_l2_sync(&op_ctx->mmu_ctx->page_table_l2); +} + +/** + * pvr_mmu_op_context_sync_partial() - Trigger a sync of some or all of the + * page tables referenced by a MMU op context. + * @op_ctx: Target MMU op context. + * @level: Requested page table level to sync up to (inclusive). + * + * If @level is greater than the maximum level recorded by @op_ctx as requiring + * a sync operation, only the previously recorded maximum will be used. + * + * Additionally, if @level is greater than or equal to the maximum level + * recorded by @op_ctx as requiring a sync operation, that maximum level will be + * reset as a full sync will be performed. This is equivalent to calling + * pvr_mmu_op_context_sync(). + */ +static void +pvr_mmu_op_context_sync_partial(struct pvr_mmu_op_context *op_ctx, + enum pvr_mmu_sync_level level) +{ + /* + * If the requested sync level is greater than or equal to the + * currently required sync level, we do two things: + * * Don't waste time syncing levels we haven't previously marked as + * requiring a sync, and + * * Reset the required sync level since we are about to sync + * everything that was previously marked as requiring a sync. + */ + if (level >= op_ctx->sync_level_required) { + level = op_ctx->sync_level_required; + op_ctx->sync_level_required = PVR_MMU_SYNC_LEVEL_NONE; + } + + pvr_mmu_op_context_sync_manual(op_ctx, level); +} + +/** + * pvr_mmu_op_context_sync() - Trigger a sync of every page table referenced by + * a MMU op context. + * @op_ctx: Target MMU op context. + * + * The maximum level marked internally as requiring a sync will be reset so + * that subsequent calls to this function will be no-ops unless @op_ctx is + * otherwise updated. + */ +static void +pvr_mmu_op_context_sync(struct pvr_mmu_op_context *op_ctx) +{ + pvr_mmu_op_context_sync_manual(op_ctx, op_ctx->sync_level_required); + + op_ctx->sync_level_required = PVR_MMU_SYNC_LEVEL_NONE; +} + +/** + * pvr_mmu_op_context_load_tables() - Load pointers to tables in each level of + * the page table tree structure needed to reference the physical page + * referenced by a MMU op context. + * @op_ctx: Target MMU op context. + * @should_create: Specifies whether new page tables should be created when + * empty page table entries are encountered during traversal. + * @load_level_required: Maximum page table level to load. + * + * If @should_create is %true, this function may modify the stored required + * sync level of @op_ctx as new page tables are created and inserted into their + * respective parents. + * + * Since there is only one root page table, it is technically incorrect to call + * this function with a value of @load_level_required greater than or equal to + * the root level number. However, this is not explicitly disallowed here. + * + * Return: + * * 0 on success, + * * Any error returned by pvr_page_table_l1_get_or_create() if + * @load_level_required >= 1 except -%ENXIO, or + * * Any error returned by pvr_page_table_l0_get_or_create() if + * @load_level_required >= 0 except -%ENXIO. + */ +static int +pvr_mmu_op_context_load_tables(struct pvr_mmu_op_context *op_ctx, + bool should_create, + enum pvr_mmu_sync_level load_level_required) +{ + const struct pvr_page_table_l1 *l1_head_before = + op_ctx->map.l1_prealloc_tables; + const struct pvr_page_table_l0 *l0_head_before = + op_ctx->map.l0_prealloc_tables; + int err; + + /* Clear tables we're about to fetch in case of error states. */ + if (load_level_required >= PVR_MMU_SYNC_LEVEL_1) + op_ctx->curr_page.l1_table = NULL; + + if (load_level_required >= PVR_MMU_SYNC_LEVEL_0) + op_ctx->curr_page.l0_table = NULL; + + /* Get or create L1 page table. */ + if (load_level_required >= PVR_MMU_SYNC_LEVEL_1) { + err = pvr_page_table_l1_get_or_insert(op_ctx, should_create); + if (err) { + /* + * If @should_create is %false and no L1 page table was + * found, return early but without an error. Since + * pvr_page_table_l1_get_or_create() can only return + * -%ENXIO if @should_create is %false, there is no + * need to check it here. + */ + if (err == -ENXIO) + err = 0; + + return err; + } + } + + /* Get or create L0 page table. */ + if (load_level_required >= PVR_MMU_SYNC_LEVEL_0) { + err = pvr_page_table_l0_get_or_insert(op_ctx, should_create); + if (err) { + /* + * If @should_create is %false and no L0 page table was + * found, return early but without an error. Since + * pvr_page_table_l0_get_or_insert() can only return + * -%ENXIO if @should_create is %false, there is no + * need to check it here. + */ + if (err == -ENXIO) + err = 0; + + /* + * At this point, an L1 page table could have been + * inserted but is now empty due to the failed attempt + * at inserting an L0 page table. In this instance, we + * must remove the empty L1 page table ourselves as + * pvr_page_table_l1_remove() is never called as part + * of the error path in + * pvr_page_table_l0_get_or_insert(). + */ + if (l1_head_before != op_ctx->map.l1_prealloc_tables) { + pvr_page_table_l2_remove(op_ctx); + pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_2); + } + + return err; + } + } + + /* + * A sync is only needed if table objects were inserted. This can be + * inferred by checking if the pointer at the head of the linked list + * has changed. + */ + if (l1_head_before != op_ctx->map.l1_prealloc_tables) + pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_2); + else if (l0_head_before != op_ctx->map.l0_prealloc_tables) + pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_1); + + return 0; +} + +/** + * pvr_mmu_op_context_set_curr_page() - Reassign the current page of an MMU op + * context, syncing any page tables previously assigned to it which are no + * longer relevant. + * @op_ctx: Target MMU op context. + * @device_addr: New pointer target. + * @should_create: Specify whether new page tables should be created when + * empty page table entries are encountered during traversal. + * + * This function performs a full sync on the pointer, regardless of which + * levels are modified. + * + * Return: + * * 0 on success, or + * * Any error returned by pvr_mmu_op_context_load_tables(). + */ +static int +pvr_mmu_op_context_set_curr_page(struct pvr_mmu_op_context *op_ctx, + u64 device_addr, bool should_create) +{ + pvr_mmu_op_context_sync(op_ctx); + + op_ctx->curr_page.l2_idx = pvr_page_table_l2_idx(device_addr); + op_ctx->curr_page.l1_idx = pvr_page_table_l1_idx(device_addr); + op_ctx->curr_page.l0_idx = pvr_page_table_l0_idx(device_addr); + op_ctx->curr_page.l1_table = NULL; + op_ctx->curr_page.l0_table = NULL; + + return pvr_mmu_op_context_load_tables(op_ctx, should_create, + PVR_MMU_SYNC_LEVEL_1); +} + +/** + * pvr_mmu_op_context_next_page() - Advance the current page of an MMU op + * context. + * @op_ctx: Target MMU op context. + * @should_create: Specify whether new page tables should be created when + * empty page table entries are encountered during traversal. + * + * If @should_create is %false, it is the caller's responsibility to verify that + * the state of the table references in @op_ctx is valid on return. If -%ENXIO + * is returned, at least one of the table references is invalid. It should be + * noted that @op_ctx as a whole will be left in a valid state if -%ENXIO is + * returned, unlike other error codes. The caller should check which references + * are invalid by comparing them to %NULL. Only &@ptr->l2_table is guaranteed + * to be valid, since it represents the root of the page table tree structure. + * + * Return: + * * 0 on success, + * * -%EPERM if the operation would wrap at the top of the page table + * hierarchy, + * * -%ENXIO if @should_create is %false and a page table of any level would + * have otherwise been created, or + * * Any error returned while attempting to create missing page tables if + * @should_create is %true. + */ +static int +pvr_mmu_op_context_next_page(struct pvr_mmu_op_context *op_ctx, + bool should_create) +{ + s8 load_level_required = PVR_MMU_SYNC_LEVEL_NONE; + + if (++op_ctx->curr_page.l0_idx != ROGUE_MMUCTRL_ENTRIES_PT_VALUE_X) + goto load_tables; + + op_ctx->curr_page.l0_idx = 0; + load_level_required = PVR_MMU_SYNC_LEVEL_0; + + if (++op_ctx->curr_page.l1_idx != ROGUE_MMUCTRL_ENTRIES_PD_VALUE) + goto load_tables; + + op_ctx->curr_page.l1_idx = 0; + load_level_required = PVR_MMU_SYNC_LEVEL_1; + + if (++op_ctx->curr_page.l2_idx != ROGUE_MMUCTRL_ENTRIES_PC_VALUE) + goto load_tables; + + /* + * If the pattern continued, we would set &op_ctx->curr_page.l2_idx to + * zero here. However, that would wrap the top layer of the page table + * hierarchy which is not a valid operation. Instead, we warn and return + * an error. + */ + WARN(true, + "%s(%p) attempted to loop the top of the page table hierarchy", + __func__, op_ctx); + return -EPERM; + + /* If indices have wrapped, we need to load new tables. */ +load_tables: + /* First, flush tables which will be unloaded. */ + pvr_mmu_op_context_sync_partial(op_ctx, load_level_required); + + /* Then load tables from the required level down. */ + return pvr_mmu_op_context_load_tables(op_ctx, should_create, + load_level_required); +} + +/** + * DOC: Single page operations + */ + +/** + * pvr_page_create() - Create a device-virtual memory page and insert it into + * a level 0 page table. + * @op_ctx: Target MMU op context pointing at the device-virtual address of the + * target page. + * @dma_addr: DMA address of the physical page backing the created page. + * @flags: Page options saved on the level 0 page table entry for reading by + * the device. + * + * Return: + * * 0 on success, or + * * -%EEXIST if the requested page already exists. + */ +static int +pvr_page_create(struct pvr_mmu_op_context *op_ctx, dma_addr_t dma_addr, + struct pvr_page_flags_raw flags) +{ + /* Do not create a new page if one already exists. */ + if (pvr_page_table_l0_entry_is_valid(op_ctx->curr_page.l0_table, + op_ctx->curr_page.l0_idx)) { + return -EEXIST; + } + + pvr_page_table_l0_insert(op_ctx, dma_addr, flags); + + pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_0); + + return 0; +} + +/** + * pvr_page_destroy() - Destroy a device page after removing it from its + * parent level 0 page table. + * @op_ctx: Target MMU op context. + */ +static void +pvr_page_destroy(struct pvr_mmu_op_context *op_ctx) +{ + /* Do nothing if the page does not exist. */ + if (!pvr_page_table_l0_entry_is_valid(op_ctx->curr_page.l0_table, + op_ctx->curr_page.l0_idx)) { + return; + } + + /* Clear the parent L0 page table entry. */ + pvr_page_table_l0_remove(op_ctx); + + pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_0); +} + +/** + * pvr_mmu_op_context_destroy() - Destroy an MMU op context. + * @op_ctx: Target MMU op context. + */ +void pvr_mmu_op_context_destroy(struct pvr_mmu_op_context *op_ctx) +{ + const bool flush_caches = + op_ctx->sync_level_required != PVR_MMU_SYNC_LEVEL_NONE; + + pvr_mmu_op_context_sync(op_ctx); + + /* Unmaps should be flushed immediately. Map flushes can be deferred. */ + if (flush_caches && !op_ctx->map.sgt) + pvr_mmu_flush_exec(op_ctx->mmu_ctx->pvr_dev, true); + + while (op_ctx->map.l0_prealloc_tables) { + struct pvr_page_table_l0 *tmp = op_ctx->map.l0_prealloc_tables; + + op_ctx->map.l0_prealloc_tables = + op_ctx->map.l0_prealloc_tables->next_free; + pvr_page_table_l0_free(tmp); + } + + while (op_ctx->map.l1_prealloc_tables) { + struct pvr_page_table_l1 *tmp = op_ctx->map.l1_prealloc_tables; + + op_ctx->map.l1_prealloc_tables = + op_ctx->map.l1_prealloc_tables->next_free; + pvr_page_table_l1_free(tmp); + } + + while (op_ctx->unmap.l0_free_tables) { + struct pvr_page_table_l0 *tmp = op_ctx->unmap.l0_free_tables; + + op_ctx->unmap.l0_free_tables = + op_ctx->unmap.l0_free_tables->next_free; + pvr_page_table_l0_free(tmp); + } + + while (op_ctx->unmap.l1_free_tables) { + struct pvr_page_table_l1 *tmp = op_ctx->unmap.l1_free_tables; + + op_ctx->unmap.l1_free_tables = + op_ctx->unmap.l1_free_tables->next_free; + pvr_page_table_l1_free(tmp); + } + + kfree(op_ctx); +} + +/** + * pvr_mmu_op_context_create() - Create an MMU op context. + * @ctx: MMU context associated with owning VM context. + * @sgt: Scatter gather table containing pages pinned for use by this context. + * @sgt_offset: Start offset of the requested device-virtual memory mapping. + * @size: Size in bytes of the requested device-virtual memory mapping. For an + * unmapping, this should be zero so that no page tables are allocated. + * + * Returns: + * * Newly created MMU op context object on success, or + * * -%ENOMEM if no memory is available, + * * Any error code returned by pvr_page_table_l2_init(). + */ +struct pvr_mmu_op_context * +pvr_mmu_op_context_create(struct pvr_mmu_context *ctx, struct sg_table *sgt, + u64 sgt_offset, u64 size) +{ + int err; + + struct pvr_mmu_op_context *op_ctx = + kzalloc(sizeof(*op_ctx), GFP_KERNEL); + + if (!op_ctx) + return ERR_PTR(-ENOMEM); + + op_ctx->mmu_ctx = ctx; + op_ctx->map.sgt = sgt; + op_ctx->map.sgt_offset = sgt_offset; + op_ctx->sync_level_required = PVR_MMU_SYNC_LEVEL_NONE; + + if (size) { + /* + * The number of page table objects we need to prealloc is + * indicated by the mapping size, start offset and the sizes + * of the areas mapped per PT or PD. The range calculation is + * identical to that for the index into a table for a device + * address, so we reuse those functions here. + */ + const u32 l1_start_idx = pvr_page_table_l2_idx(sgt_offset); + const u32 l1_end_idx = pvr_page_table_l2_idx(sgt_offset + size); + const u32 l1_count = l1_end_idx - l1_start_idx + 1; + const u32 l0_start_idx = pvr_page_table_l1_idx(sgt_offset); + const u32 l0_end_idx = pvr_page_table_l1_idx(sgt_offset + size); + const u32 l0_count = l0_end_idx - l0_start_idx + 1; + + /* + * Alloc and push page table entries until we have enough of + * each type, ending with linked lists of l0 and l1 entries in + * reverse order. + */ + for (int i = 0; i < l1_count; i++) { + struct pvr_page_table_l1 *l1_tmp = + pvr_page_table_l1_alloc(ctx); + + err = PTR_ERR_OR_ZERO(l1_tmp); + if (err) + goto err_cleanup; + + l1_tmp->next_free = op_ctx->map.l1_prealloc_tables; + op_ctx->map.l1_prealloc_tables = l1_tmp; + } + + for (int i = 0; i < l0_count; i++) { + struct pvr_page_table_l0 *l0_tmp = + pvr_page_table_l0_alloc(ctx); + + err = PTR_ERR_OR_ZERO(l0_tmp); + if (err) + goto err_cleanup; + + l0_tmp->next_free = op_ctx->map.l0_prealloc_tables; + op_ctx->map.l0_prealloc_tables = l0_tmp; + } + } + + return op_ctx; + +err_cleanup: + pvr_mmu_op_context_destroy(op_ctx); + + return ERR_PTR(err); +} + +/** + * pvr_mmu_op_context_unmap_curr_page() - Unmap pages from a memory context + * starting from the current page of an MMU op context. + * @op_ctx: Target MMU op context pointing at the first page to unmap. + * @nr_pages: Number of pages to unmap. + * + * Return: + * * 0 on success, or + * * Any error encountered while advancing @op_ctx.curr_page with + * pvr_mmu_op_context_next_page() (except -%ENXIO). + */ +static int +pvr_mmu_op_context_unmap_curr_page(struct pvr_mmu_op_context *op_ctx, + u64 nr_pages) +{ + int err; + + if (nr_pages == 0) + return 0; + + /* + * Destroy first page outside loop, as it doesn't require a page + * advance beforehand. If the L0 page table reference in + * @op_ctx.curr_page is %NULL, there cannot be a mapped page at + * @op_ctx.curr_page (so skip ahead). + */ + if (op_ctx->curr_page.l0_table) + pvr_page_destroy(op_ctx); + + for (u64 page = 1; page < nr_pages; ++page) { + err = pvr_mmu_op_context_next_page(op_ctx, false); + /* + * If the page table tree structure at @op_ctx.curr_page is + * incomplete, skip ahead. We don't care about unmapping pages + * that cannot exist. + * + * FIXME: This could be made more efficient by jumping ahead + * using pvr_mmu_op_context_set_curr_page(). + */ + if (err == -ENXIO) + continue; + else if (err) + return err; + + pvr_page_destroy(op_ctx); + } + + return 0; +} + +/** + * pvr_mmu_unmap() - Unmap pages from a memory context. + * @op_ctx: Target MMU op context. + * @device_addr: First device-virtual address to unmap. + * @size: Size in bytes to unmap. + * + * The total amount of device-virtual memory unmapped is + * @nr_pages * %PVR_DEVICE_PAGE_SIZE. + * + * Returns: + * * 0 on success, or + * * Any error code returned by pvr_page_table_ptr_init(), or + * * Any error code returned by pvr_page_table_ptr_unmap(). + */ +int pvr_mmu_unmap(struct pvr_mmu_op_context *op_ctx, u64 device_addr, u64 size) +{ + int err = pvr_mmu_op_context_set_curr_page(op_ctx, device_addr, false); + + if (err) + return err; + + return pvr_mmu_op_context_unmap_curr_page(op_ctx, + size >> PVR_DEVICE_PAGE_SHIFT); +} + +/** + * pvr_mmu_map_sgl() - Map part of a scatter-gather table entry to + * device-virtual memory. + * @op_ctx: Target MMU op context pointing to the first page that should be + * mapped. + * @sgl: Target scatter-gather table entry. + * @offset: Offset into @sgl to map from. Must result in a starting address + * from @sgl which is CPU page-aligned. + * @size: Size of the memory to be mapped in bytes. Must be a non-zero multiple + * of the device page size. + * @page_flags: Page options to be applied to every device-virtual memory page + * in the created mapping. + * + * Return: + * * 0 on success, + * * -%EINVAL if the range specified by @offset and @size is not completely + * within @sgl, or + * * Any error encountered while creating a page with pvr_page_create(), or + * * Any error encountered while advancing @op_ctx.curr_page with + * pvr_mmu_op_context_next_page(). + */ +static int +pvr_mmu_map_sgl(struct pvr_mmu_op_context *op_ctx, struct scatterlist *sgl, + u64 offset, u64 size, struct pvr_page_flags_raw page_flags) +{ + const unsigned int pages = size >> PVR_DEVICE_PAGE_SHIFT; + dma_addr_t dma_addr = sg_dma_address(sgl) + offset; + const unsigned int dma_len = sg_dma_len(sgl); + struct pvr_page_table_ptr ptr_copy; + unsigned int page; + int err; + + if (size > dma_len || offset > dma_len - size) + return -EINVAL; + + /* + * Before progressing, save a copy of the start pointer so we can use + * it again if we enter an error state and have to destroy pages. + */ + memcpy(&ptr_copy, &op_ctx->curr_page, sizeof(ptr_copy)); + + /* + * Create first page outside loop, as it doesn't require a page advance + * beforehand. + */ + err = pvr_page_create(op_ctx, dma_addr, page_flags); + if (err) + return err; + + for (page = 1; page < pages; ++page) { + err = pvr_mmu_op_context_next_page(op_ctx, true); + if (err) + goto err_destroy_pages; + + dma_addr += PVR_DEVICE_PAGE_SIZE; + + err = pvr_page_create(op_ctx, dma_addr, page_flags); + if (err) + goto err_destroy_pages; + } + + return 0; + +err_destroy_pages: + memcpy(&op_ctx->curr_page, &ptr_copy, sizeof(op_ctx->curr_page)); + err = pvr_mmu_op_context_unmap_curr_page(op_ctx, page); + + return err; +} + +/** + * pvr_mmu_map() - Map an object's virtual memory to physical memory. + * @op_ctx: Target MMU op context. + * @size: Size of memory to be mapped in bytes. Must be a non-zero multiple + * of the device page size. + * @flags: Flags from pvr_gem_object associated with the mapping. + * @device_addr: Virtual device address to map to. Must be device page-aligned. + * + * Returns: + * * 0 on success, or + * * Any error code returned by pvr_page_table_ptr_init(), or + * * Any error code returned by pvr_mmu_map_sgl(), or + * * Any error code returned by pvr_page_table_ptr_next_page(). + */ +int pvr_mmu_map(struct pvr_mmu_op_context *op_ctx, u64 size, u64 flags, + u64 device_addr) +{ + struct pvr_page_table_ptr ptr_copy; + struct pvr_page_flags_raw flags_raw; + struct scatterlist *sgl; + u64 mapped_size = 0; + unsigned int count; + int err; + + if (!size) + return 0; + + if ((op_ctx->map.sgt_offset | size) & ~PVR_DEVICE_PAGE_MASK) + return -EINVAL; + + err = pvr_mmu_op_context_set_curr_page(op_ctx, device_addr, true); + if (err) + return -EINVAL; + + memcpy(&ptr_copy, &op_ctx->curr_page, sizeof(ptr_copy)); + + flags_raw = pvr_page_flags_raw_create(false, false, + flags & DRM_PVR_BO_BYPASS_DEVICE_CACHE, + flags & DRM_PVR_BO_PM_FW_PROTECT); + + /* Map scatter gather table */ + for_each_sgtable_dma_sg(op_ctx->map.sgt, sgl, count) { + const size_t sgl_len = sg_dma_len(sgl); + u64 sgl_offset, map_sgl_len; + + if (sgl_len <= op_ctx->map.sgt_offset) { + op_ctx->map.sgt_offset -= sgl_len; + continue; + } + + sgl_offset = op_ctx->map.sgt_offset; + map_sgl_len = min_t(u64, sgl_len - sgl_offset, size - mapped_size); + + err = pvr_mmu_map_sgl(op_ctx, sgl, sgl_offset, map_sgl_len, + flags_raw); + if (err) + break; + + /* + * Flag the L0 page table as requiring a flush when the MMU op + * context is destroyed. + */ + pvr_mmu_op_context_require_sync(op_ctx, PVR_MMU_SYNC_LEVEL_0); + + op_ctx->map.sgt_offset = 0; + mapped_size += map_sgl_len; + + if (mapped_size >= size) + break; + + err = pvr_mmu_op_context_next_page(op_ctx, true); + if (err) + break; + } + + if (err && mapped_size) { + memcpy(&op_ctx->curr_page, &ptr_copy, sizeof(op_ctx->curr_page)); + pvr_mmu_op_context_unmap_curr_page(op_ctx, + mapped_size >> PVR_DEVICE_PAGE_SHIFT); + } + + return err; +} diff --git a/drivers/gpu/drm/imagination/pvr_mmu.h b/drivers/gpu/drm/imagination/pvr_mmu.h new file mode 100644 index 0000000000..a8ecd46016 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_mmu.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_MMU_H +#define PVR_MMU_H + +#include +#include + +/* Forward declaration from "pvr_device.h" */ +struct pvr_device; + +/* Forward declaration from "pvr_mmu.c" */ +struct pvr_mmu_context; +struct pvr_mmu_op_context; + +/* Forward declaration from "pvr_vm.c" */ +struct pvr_vm_context; + +/* Forward declaration from */ +struct sg_table; + +/** + * DOC: Public API (constants) + * + * .. c:macro:: PVR_DEVICE_PAGE_SIZE + * + * Fixed page size referenced by leaf nodes in the page table tree + * structure. In the current implementation, this value is pegged to the + * CPU page size (%PAGE_SIZE). It is therefore an error to specify a CPU + * page size which is not also a supported device page size. The supported + * device page sizes are: 4KiB, 16KiB, 64KiB, 256KiB, 1MiB and 2MiB. + * + * .. c:macro:: PVR_DEVICE_PAGE_SHIFT + * + * Shift value used to efficiently multiply or divide by + * %PVR_DEVICE_PAGE_SIZE. + * + * This value is derived from %PVR_DEVICE_PAGE_SIZE. + * + * .. c:macro:: PVR_DEVICE_PAGE_MASK + * + * Mask used to round a value down to the nearest multiple of + * %PVR_DEVICE_PAGE_SIZE. When bitwise negated, it will indicate whether a + * value is already a multiple of %PVR_DEVICE_PAGE_SIZE. + * + * This value is derived from %PVR_DEVICE_PAGE_SIZE. + */ + +/* PVR_DEVICE_PAGE_SIZE determines the page size */ +#define PVR_DEVICE_PAGE_SIZE (PAGE_SIZE) +#define PVR_DEVICE_PAGE_SHIFT (PAGE_SHIFT) +#define PVR_DEVICE_PAGE_MASK (PAGE_MASK) + +/** + * DOC: Page table index utilities (constants) + * + * .. c:macro:: PVR_PAGE_TABLE_ADDR_SPACE_SIZE + * + * Size of device-virtual address space which can be represented in the page + * table structure. + * + * This value is checked at runtime against + * &pvr_device_features.virtual_address_space_bits by + * pvr_vm_create_context(), which will return an error if the feature value + * does not match this constant. + * + * .. admonition:: Future work + * + * It should be possible to support other values of + * &pvr_device_features.virtual_address_space_bits, but so far no + * hardware has been created which advertises an unsupported value. + * + * .. c:macro:: PVR_PAGE_TABLE_ADDR_BITS + * + * Number of bits needed to represent any value less than + * %PVR_PAGE_TABLE_ADDR_SPACE_SIZE exactly. + * + * .. c:macro:: PVR_PAGE_TABLE_ADDR_MASK + * + * Bitmask of device-virtual addresses which are valid in the page table + * structure. + * + * This value is derived from %PVR_PAGE_TABLE_ADDR_SPACE_SIZE, so the same + * notes on that constant apply here. + */ +#define PVR_PAGE_TABLE_ADDR_SPACE_SIZE SZ_1T +#define PVR_PAGE_TABLE_ADDR_BITS __ffs(PVR_PAGE_TABLE_ADDR_SPACE_SIZE) +#define PVR_PAGE_TABLE_ADDR_MASK (PVR_PAGE_TABLE_ADDR_SPACE_SIZE - 1) + +void pvr_mmu_flush_request_all(struct pvr_device *pvr_dev); +int pvr_mmu_flush_exec(struct pvr_device *pvr_dev, bool wait); + +struct pvr_mmu_context *pvr_mmu_context_create(struct pvr_device *pvr_dev); +void pvr_mmu_context_destroy(struct pvr_mmu_context *ctx); + +dma_addr_t pvr_mmu_get_root_table_dma_addr(struct pvr_mmu_context *ctx); + +void pvr_mmu_op_context_destroy(struct pvr_mmu_op_context *op_ctx); +struct pvr_mmu_op_context * +pvr_mmu_op_context_create(struct pvr_mmu_context *ctx, + struct sg_table *sgt, u64 sgt_offset, u64 size); + +int pvr_mmu_map(struct pvr_mmu_op_context *op_ctx, u64 size, u64 flags, + u64 device_addr); +int pvr_mmu_unmap(struct pvr_mmu_op_context *op_ctx, u64 device_addr, u64 size); + +#endif /* PVR_MMU_H */ diff --git a/drivers/gpu/drm/imagination/pvr_params.c b/drivers/gpu/drm/imagination/pvr_params.c new file mode 100644 index 0000000000..b91759f362 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_params.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_params.h" + +#include +#include + +static struct pvr_device_params pvr_device_param_defaults __read_mostly = { +#define X(type_, name_, value_, desc_, ...) .name_ = (value_), + PVR_DEVICE_PARAMS +#undef X +}; + +#define PVR_DEVICE_PARAM_NAMED(name_, type_, desc_) \ + module_param_named(name_, pvr_device_param_defaults.name_, type_, \ + 0400); \ + MODULE_PARM_DESC(name_, desc_); + +/* + * This list of defines must contain every type specified in "pvr_params.h" as + * ``PVR_PARAM_TYPE_*_C``. + */ +#define PVR_PARAM_TYPE_X32_MODPARAM uint + +#define X(type_, name_, value_, desc_, ...) \ + PVR_DEVICE_PARAM_NAMED(name_, PVR_PARAM_TYPE_##type_##_MODPARAM, desc_); +PVR_DEVICE_PARAMS +#undef X + +int +pvr_device_params_init(struct pvr_device_params *params) +{ + /* + * If heap-allocated parameters are added in the future (e.g. + * modparam's charp type), they must be handled specially here (via + * kstrdup() in the case of charp). Since that's not necessary yet, + * a straight copy will do for now. This change will also require a + * pvr_device_params_fini() function to free any heap-allocated copies. + */ + + *params = pvr_device_param_defaults; + + return 0; +} + +#if defined(CONFIG_DEBUG_FS) +#include "pvr_device.h" + +#include +#include +#include +#include +#include + +/* + * This list of defines must contain every type specified in "pvr_params.h" as + * ``PVR_PARAM_TYPE_*_C``. + */ +#define PVR_PARAM_TYPE_X32_FMT "0x%08llx" + +#define X_SET(name_, mode_) X_SET_##mode_(name_) +#define X_SET_DEF(name_, update_, mode_) X_SET_DEF_##mode_(name_, update_) + +#define X_SET_RO(name_) NULL +#define X_SET_RW(name_) __pvr_device_param_##name_##set + +#define X_SET_DEF_RO(name_, update_) +#define X_SET_DEF_RW(name_, update_) \ + static int \ + X_SET_RW(name_)(void *data, u64 val) \ + { \ + struct pvr_device *pvr_dev = data; \ + /* This is not just (update_) to suppress -Waddress. */ \ + if ((void *)(update_) != NULL) \ + (update_)(pvr_dev, pvr_dev->params.name_, val); \ + pvr_dev->params.name_ = val; \ + return 0; \ + } + +#define X(type_, name_, value_, desc_, mode_, update_) \ + static int \ + __pvr_device_param_##name_##_get(void *data, u64 *val) \ + { \ + struct pvr_device *pvr_dev = data; \ + *val = pvr_dev->params.name_; \ + return 0; \ + } \ + X_SET_DEF(name_, update_, mode_) \ + static int \ + __pvr_device_param_##name_##_open(struct inode *inode, \ + struct file *file) \ + { \ + __simple_attr_check_format(PVR_PARAM_TYPE_##type_##_FMT, \ + 0ull); \ + return simple_attr_open(inode, file, \ + __pvr_device_param_##name_##_get, \ + X_SET(name_, mode_), \ + PVR_PARAM_TYPE_##type_##_FMT); \ + } +PVR_DEVICE_PARAMS +#undef X + +#undef X_SET +#undef X_SET_RO +#undef X_SET_RW +#undef X_SET_DEF +#undef X_SET_DEF_RO +#undef X_SET_DEF_RW + +static struct { +#define X(type_, name_, value_, desc_, mode_, update_) \ + const struct file_operations name_; + PVR_DEVICE_PARAMS +#undef X +} pvr_device_param_debugfs_fops = { +#define X(type_, name_, value_, desc_, mode_, update_) \ + .name_ = { \ + .owner = THIS_MODULE, \ + .open = __pvr_device_param_##name_##_open, \ + .release = simple_attr_release, \ + .read = simple_attr_read, \ + .write = simple_attr_write, \ + .llseek = generic_file_llseek, \ + }, + PVR_DEVICE_PARAMS +#undef X +}; + +void +pvr_params_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir) +{ +#define X_MODE(mode_) X_MODE_##mode_ +#define X_MODE_RO 0400 +#define X_MODE_RW 0600 + +#define X(type_, name_, value_, desc_, mode_, update_) \ + debugfs_create_file(#name_, X_MODE(mode_), dir, pvr_dev, \ + &pvr_device_param_debugfs_fops.name_); + PVR_DEVICE_PARAMS +#undef X + +#undef X_MODE +#undef X_MODE_RO +#undef X_MODE_RW +} +#endif diff --git a/drivers/gpu/drm/imagination/pvr_params.h b/drivers/gpu/drm/imagination/pvr_params.h new file mode 100644 index 0000000000..5807915b45 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_params.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_PARAMS_H +#define PVR_PARAMS_H + +#include "pvr_rogue_fwif.h" + +#include +#include + +/* + * This is the definitive list of types allowed in the definition of + * %PVR_DEVICE_PARAMS. + */ +#define PVR_PARAM_TYPE_X32_C u32 + +/* + * This macro defines all device-specific parameters; that is parameters which + * are set independently per device. + * + * The X-macro accepts the following arguments. Arguments marked with [debugfs] + * are ignored when debugfs is disabled; values used for these arguments may + * safely be gated behind CONFIG_DEBUG_FS. + * + * @type_: The definitive list of allowed values is PVR_PARAM_TYPE_*_C. + * @name_: Name of the parameter. This is used both as the field name in C and + * stringified as the parameter name. + * @value_: Initial/default value. + * @desc_: String literal used as help text to describe the usage of this + * parameter. + * @mode_: [debugfs] One of {RO,RW}. The access mode of the debugfs entry for + * this parameter. + * @update_: [debugfs] When debugfs support is enabled, parameters may be + * updated at runtime. When this happens, this function will be + * called to allow changes to propagate. The signature of this + * function is: + * + * void (*)(struct pvr_device *pvr_dev, T old_val, T new_val) + * + * Where T is the C type associated with @type_. + * + * If @mode_ does not allow write access, this function will never be + * called. In this case, or if no update callback is required, you + * should specify NULL for this argument. + */ +#define PVR_DEVICE_PARAMS \ + X(X32, fw_trace_mask, ROGUE_FWIF_LOG_TYPE_NONE, \ + "Enable FW trace for the specified groups. Specifying 0 disables " \ + "all FW tracing.", \ + RW, pvr_fw_trace_mask_update) + +struct pvr_device_params { +#define X(type_, name_, value_, desc_, ...) \ + PVR_PARAM_TYPE_##type_##_C name_; + PVR_DEVICE_PARAMS +#undef X +}; + +int pvr_device_params_init(struct pvr_device_params *params); + +#if defined(CONFIG_DEBUG_FS) +/* Forward declaration from "pvr_device.h". */ +struct pvr_device; + +/* Forward declaration from . */ +struct dentry; + +void pvr_params_debugfs_init(struct pvr_device *pvr_dev, struct dentry *dir); +#endif /* defined(CONFIG_DEBUG_FS) */ + +#endif /* PVR_PARAMS_H */ diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c new file mode 100644 index 0000000000..ba7816fd28 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_power.c @@ -0,0 +1,433 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_device.h" +#include "pvr_fw.h" +#include "pvr_fw_startstop.h" +#include "pvr_power.h" +#include "pvr_queue.h" +#include "pvr_rogue_fwif.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define POWER_SYNC_TIMEOUT_US (1000000) /* 1s */ + +#define WATCHDOG_TIME_MS (500) + +/** + * pvr_device_lost() - Mark GPU device as lost + * @pvr_dev: Target PowerVR device. + * + * This will cause the DRM device to be unplugged. + */ +void +pvr_device_lost(struct pvr_device *pvr_dev) +{ + if (!pvr_dev->lost) { + pvr_dev->lost = true; + drm_dev_unplug(from_pvr_device(pvr_dev)); + } +} + +static int +pvr_power_send_command(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *pow_cmd) +{ + struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; + u32 slot_nr; + u32 value; + int err; + + WRITE_ONCE(*fw_dev->power_sync, 0); + + err = pvr_kccb_send_cmd_powered(pvr_dev, pow_cmd, &slot_nr); + if (err) + return err; + + /* Wait for FW to acknowledge. */ + return readl_poll_timeout(pvr_dev->fw_dev.power_sync, value, value != 0, 100, + POWER_SYNC_TIMEOUT_US); +} + +static int +pvr_power_request_idle(struct pvr_device *pvr_dev) +{ + struct rogue_fwif_kccb_cmd pow_cmd; + + /* Send FORCED_IDLE request to FW. */ + pow_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_POW; + pow_cmd.cmd_data.pow_data.pow_type = ROGUE_FWIF_POW_FORCED_IDLE_REQ; + pow_cmd.cmd_data.pow_data.power_req_data.pow_request_type = ROGUE_FWIF_POWER_FORCE_IDLE; + + return pvr_power_send_command(pvr_dev, &pow_cmd); +} + +static int +pvr_power_request_pwr_off(struct pvr_device *pvr_dev) +{ + struct rogue_fwif_kccb_cmd pow_cmd; + + /* Send POW_OFF request to firmware. */ + pow_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_POW; + pow_cmd.cmd_data.pow_data.pow_type = ROGUE_FWIF_POW_OFF_REQ; + pow_cmd.cmd_data.pow_data.power_req_data.forced = true; + + return pvr_power_send_command(pvr_dev, &pow_cmd); +} + +static int +pvr_power_fw_disable(struct pvr_device *pvr_dev, bool hard_reset) +{ + if (!hard_reset) { + int err; + + cancel_delayed_work_sync(&pvr_dev->watchdog.work); + + err = pvr_power_request_idle(pvr_dev); + if (err) + return err; + + err = pvr_power_request_pwr_off(pvr_dev); + if (err) + return err; + } + + return pvr_fw_stop(pvr_dev); +} + +static int +pvr_power_fw_enable(struct pvr_device *pvr_dev) +{ + int err; + + err = pvr_fw_start(pvr_dev); + if (err) + return err; + + err = pvr_wait_for_fw_boot(pvr_dev); + if (err) { + drm_err(from_pvr_device(pvr_dev), "Firmware failed to boot\n"); + pvr_fw_stop(pvr_dev); + return err; + } + + queue_delayed_work(pvr_dev->sched_wq, &pvr_dev->watchdog.work, + msecs_to_jiffies(WATCHDOG_TIME_MS)); + + return 0; +} + +bool +pvr_power_is_idle(struct pvr_device *pvr_dev) +{ + /* + * FW power state can be out of date if a KCCB command has been submitted but the FW hasn't + * started processing it yet. So also check the KCCB status. + */ + enum rogue_fwif_pow_state pow_state = READ_ONCE(pvr_dev->fw_dev.fwif_sysdata->pow_state); + bool kccb_idle = pvr_kccb_is_idle(pvr_dev); + + return (pow_state == ROGUE_FWIF_POW_IDLE) && kccb_idle; +} + +static bool +pvr_watchdog_kccb_stalled(struct pvr_device *pvr_dev) +{ + /* Check KCCB commands are progressing. */ + u32 kccb_cmds_executed = pvr_dev->fw_dev.fwif_osdata->kccb_cmds_executed; + bool kccb_is_idle = pvr_kccb_is_idle(pvr_dev); + + if (pvr_dev->watchdog.old_kccb_cmds_executed == kccb_cmds_executed && !kccb_is_idle) { + pvr_dev->watchdog.kccb_stall_count++; + + /* + * If we have commands pending with no progress for 2 consecutive polls then + * consider KCCB command processing stalled. + */ + if (pvr_dev->watchdog.kccb_stall_count == 2) { + pvr_dev->watchdog.kccb_stall_count = 0; + return true; + } + } else if (pvr_dev->watchdog.old_kccb_cmds_executed == kccb_cmds_executed) { + bool has_active_contexts; + + mutex_lock(&pvr_dev->queues.lock); + has_active_contexts = list_empty(&pvr_dev->queues.active); + mutex_unlock(&pvr_dev->queues.lock); + + if (has_active_contexts) { + /* Send a HEALTH_CHECK command so we can verify FW is still alive. */ + struct rogue_fwif_kccb_cmd health_check_cmd; + + health_check_cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_HEALTH_CHECK; + + pvr_kccb_send_cmd_powered(pvr_dev, &health_check_cmd, NULL); + } + } else { + pvr_dev->watchdog.old_kccb_cmds_executed = kccb_cmds_executed; + pvr_dev->watchdog.kccb_stall_count = 0; + } + + return false; +} + +static void +pvr_watchdog_worker(struct work_struct *work) +{ + struct pvr_device *pvr_dev = container_of(work, struct pvr_device, + watchdog.work.work); + bool stalled; + + if (pvr_dev->lost) + return; + + if (pm_runtime_get_if_in_use(from_pvr_device(pvr_dev)->dev) <= 0) + goto out_requeue; + + if (!pvr_dev->fw_dev.booted) + goto out_pm_runtime_put; + + stalled = pvr_watchdog_kccb_stalled(pvr_dev); + + if (stalled) { + drm_err(from_pvr_device(pvr_dev), "FW stalled, trying hard reset"); + + pvr_power_reset(pvr_dev, true); + /* Device may be lost at this point. */ + } + +out_pm_runtime_put: + pm_runtime_put(from_pvr_device(pvr_dev)->dev); + +out_requeue: + if (!pvr_dev->lost) { + queue_delayed_work(pvr_dev->sched_wq, &pvr_dev->watchdog.work, + msecs_to_jiffies(WATCHDOG_TIME_MS)); + } +} + +/** + * pvr_watchdog_init() - Initialise watchdog for device + * @pvr_dev: Target PowerVR device. + * + * Returns: + * * 0 on success, or + * * -%ENOMEM on out of memory. + */ +int +pvr_watchdog_init(struct pvr_device *pvr_dev) +{ + INIT_DELAYED_WORK(&pvr_dev->watchdog.work, pvr_watchdog_worker); + + return 0; +} + +int +pvr_power_device_suspend(struct device *dev) +{ + struct platform_device *plat_dev = to_platform_device(dev); + struct drm_device *drm_dev = platform_get_drvdata(plat_dev); + struct pvr_device *pvr_dev = to_pvr_device(drm_dev); + int err = 0; + int idx; + + if (!drm_dev_enter(drm_dev, &idx)) + return -EIO; + + if (pvr_dev->fw_dev.booted) { + err = pvr_power_fw_disable(pvr_dev, false); + if (err) + goto err_drm_dev_exit; + } + + clk_disable_unprepare(pvr_dev->mem_clk); + clk_disable_unprepare(pvr_dev->sys_clk); + clk_disable_unprepare(pvr_dev->core_clk); + +err_drm_dev_exit: + drm_dev_exit(idx); + + return err; +} + +int +pvr_power_device_resume(struct device *dev) +{ + struct platform_device *plat_dev = to_platform_device(dev); + struct drm_device *drm_dev = platform_get_drvdata(plat_dev); + struct pvr_device *pvr_dev = to_pvr_device(drm_dev); + int idx; + int err; + + if (!drm_dev_enter(drm_dev, &idx)) + return -EIO; + + err = clk_prepare_enable(pvr_dev->core_clk); + if (err) + goto err_drm_dev_exit; + + err = clk_prepare_enable(pvr_dev->sys_clk); + if (err) + goto err_core_clk_disable; + + err = clk_prepare_enable(pvr_dev->mem_clk); + if (err) + goto err_sys_clk_disable; + + if (pvr_dev->fw_dev.booted) { + err = pvr_power_fw_enable(pvr_dev); + if (err) + goto err_mem_clk_disable; + } + + drm_dev_exit(idx); + + return 0; + +err_mem_clk_disable: + clk_disable_unprepare(pvr_dev->mem_clk); + +err_sys_clk_disable: + clk_disable_unprepare(pvr_dev->sys_clk); + +err_core_clk_disable: + clk_disable_unprepare(pvr_dev->core_clk); + +err_drm_dev_exit: + drm_dev_exit(idx); + + return err; +} + +int +pvr_power_device_idle(struct device *dev) +{ + struct platform_device *plat_dev = to_platform_device(dev); + struct drm_device *drm_dev = platform_get_drvdata(plat_dev); + struct pvr_device *pvr_dev = to_pvr_device(drm_dev); + + return pvr_power_is_idle(pvr_dev) ? 0 : -EBUSY; +} + +/** + * pvr_power_reset() - Reset the GPU + * @pvr_dev: Device pointer + * @hard_reset: %true for hard reset, %false for soft reset + * + * If @hard_reset is %false and the FW processor fails to respond during the reset process, this + * function will attempt a hard reset. + * + * If a hard reset fails then the GPU device is reported as lost. + * + * Returns: + * * 0 on success, or + * * Any error code returned by pvr_power_get, pvr_power_fw_disable or pvr_power_fw_enable(). + */ +int +pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset) +{ + bool queues_disabled = false; + int err; + + /* + * Take a power reference during the reset. This should prevent any interference with the + * power state during reset. + */ + WARN_ON(pvr_power_get(pvr_dev)); + + down_write(&pvr_dev->reset_sem); + + if (pvr_dev->lost) { + err = -EIO; + goto err_up_write; + } + + /* Disable IRQs for the duration of the reset. */ + disable_irq(pvr_dev->irq); + + do { + if (hard_reset) { + pvr_queue_device_pre_reset(pvr_dev); + queues_disabled = true; + } + + err = pvr_power_fw_disable(pvr_dev, hard_reset); + if (!err) { + if (hard_reset) { + pvr_dev->fw_dev.booted = false; + WARN_ON(pm_runtime_force_suspend(from_pvr_device(pvr_dev)->dev)); + + err = pvr_fw_hard_reset(pvr_dev); + if (err) + goto err_device_lost; + + err = pm_runtime_force_resume(from_pvr_device(pvr_dev)->dev); + pvr_dev->fw_dev.booted = true; + if (err) + goto err_device_lost; + } else { + /* Clear the FW faulted flags. */ + pvr_dev->fw_dev.fwif_sysdata->hwr_state_flags &= + ~(ROGUE_FWIF_HWR_FW_FAULT | + ROGUE_FWIF_HWR_RESTART_REQUESTED); + } + + pvr_fw_irq_clear(pvr_dev); + + err = pvr_power_fw_enable(pvr_dev); + } + + if (err && hard_reset) + goto err_device_lost; + + if (err && !hard_reset) { + drm_err(from_pvr_device(pvr_dev), "FW stalled, trying hard reset"); + hard_reset = true; + } + } while (err); + + if (queues_disabled) + pvr_queue_device_post_reset(pvr_dev); + + enable_irq(pvr_dev->irq); + + up_write(&pvr_dev->reset_sem); + + pvr_power_put(pvr_dev); + + return 0; + +err_device_lost: + drm_err(from_pvr_device(pvr_dev), "GPU device lost"); + pvr_device_lost(pvr_dev); + + /* Leave IRQs disabled if the device is lost. */ + + if (queues_disabled) + pvr_queue_device_post_reset(pvr_dev); + +err_up_write: + up_write(&pvr_dev->reset_sem); + + pvr_power_put(pvr_dev); + + return err; +} + +/** + * pvr_watchdog_fini() - Shutdown watchdog for device + * @pvr_dev: Target PowerVR device. + */ +void +pvr_watchdog_fini(struct pvr_device *pvr_dev) +{ + cancel_delayed_work_sync(&pvr_dev->watchdog.work); +} diff --git a/drivers/gpu/drm/imagination/pvr_power.h b/drivers/gpu/drm/imagination/pvr_power.h new file mode 100644 index 0000000000..9a9312dcb2 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_power.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_POWER_H +#define PVR_POWER_H + +#include "pvr_device.h" + +#include +#include + +int pvr_watchdog_init(struct pvr_device *pvr_dev); +void pvr_watchdog_fini(struct pvr_device *pvr_dev); + +void pvr_device_lost(struct pvr_device *pvr_dev); + +bool pvr_power_is_idle(struct pvr_device *pvr_dev); + +int pvr_power_device_suspend(struct device *dev); +int pvr_power_device_resume(struct device *dev); +int pvr_power_device_idle(struct device *dev); + +int pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset); + +static __always_inline int +pvr_power_get(struct pvr_device *pvr_dev) +{ + struct drm_device *drm_dev = from_pvr_device(pvr_dev); + + return pm_runtime_resume_and_get(drm_dev->dev); +} + +static __always_inline int +pvr_power_put(struct pvr_device *pvr_dev) +{ + struct drm_device *drm_dev = from_pvr_device(pvr_dev); + + return pm_runtime_put(drm_dev->dev); +} + +#endif /* PVR_POWER_H */ diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c new file mode 100644 index 0000000000..5ed9c98fb5 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_queue.c @@ -0,0 +1,1432 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include +#include + +#include "pvr_cccb.h" +#include "pvr_context.h" +#include "pvr_device.h" +#include "pvr_drv.h" +#include "pvr_job.h" +#include "pvr_queue.h" +#include "pvr_vm.h" + +#include "pvr_rogue_fwif_client.h" + +#define MAX_DEADLINE_MS 30000 + +#define CTX_COMPUTE_CCCB_SIZE_LOG2 15 +#define CTX_FRAG_CCCB_SIZE_LOG2 15 +#define CTX_GEOM_CCCB_SIZE_LOG2 15 +#define CTX_TRANSFER_CCCB_SIZE_LOG2 15 + +static int get_xfer_ctx_state_size(struct pvr_device *pvr_dev) +{ + u32 num_isp_store_registers; + + if (PVR_HAS_FEATURE(pvr_dev, xe_memory_hierarchy)) { + num_isp_store_registers = 1; + } else { + int err; + + err = PVR_FEATURE_VALUE(pvr_dev, num_isp_ipp_pipes, &num_isp_store_registers); + if (WARN_ON(err)) + return err; + } + + return sizeof(struct rogue_fwif_frag_ctx_state) + + (num_isp_store_registers * + sizeof(((struct rogue_fwif_frag_ctx_state *)0)->frag_reg_isp_store[0])); +} + +static int get_frag_ctx_state_size(struct pvr_device *pvr_dev) +{ + u32 num_isp_store_registers; + int err; + + if (PVR_HAS_FEATURE(pvr_dev, xe_memory_hierarchy)) { + err = PVR_FEATURE_VALUE(pvr_dev, num_raster_pipes, &num_isp_store_registers); + if (WARN_ON(err)) + return err; + + if (PVR_HAS_FEATURE(pvr_dev, gpu_multicore_support)) { + u32 xpu_max_slaves; + + err = PVR_FEATURE_VALUE(pvr_dev, xpu_max_slaves, &xpu_max_slaves); + if (WARN_ON(err)) + return err; + + num_isp_store_registers *= (1 + xpu_max_slaves); + } + } else { + err = PVR_FEATURE_VALUE(pvr_dev, num_isp_ipp_pipes, &num_isp_store_registers); + if (WARN_ON(err)) + return err; + } + + return sizeof(struct rogue_fwif_frag_ctx_state) + + (num_isp_store_registers * + sizeof(((struct rogue_fwif_frag_ctx_state *)0)->frag_reg_isp_store[0])); +} + +static int get_ctx_state_size(struct pvr_device *pvr_dev, enum drm_pvr_job_type type) +{ + switch (type) { + case DRM_PVR_JOB_TYPE_GEOMETRY: + return sizeof(struct rogue_fwif_geom_ctx_state); + case DRM_PVR_JOB_TYPE_FRAGMENT: + return get_frag_ctx_state_size(pvr_dev); + case DRM_PVR_JOB_TYPE_COMPUTE: + return sizeof(struct rogue_fwif_compute_ctx_state); + case DRM_PVR_JOB_TYPE_TRANSFER_FRAG: + return get_xfer_ctx_state_size(pvr_dev); + } + + WARN(1, "Invalid queue type"); + return -EINVAL; +} + +static u32 get_ctx_offset(enum drm_pvr_job_type type) +{ + switch (type) { + case DRM_PVR_JOB_TYPE_GEOMETRY: + return offsetof(struct rogue_fwif_fwrendercontext, geom_context); + case DRM_PVR_JOB_TYPE_FRAGMENT: + return offsetof(struct rogue_fwif_fwrendercontext, frag_context); + case DRM_PVR_JOB_TYPE_COMPUTE: + return offsetof(struct rogue_fwif_fwcomputecontext, cdm_context); + case DRM_PVR_JOB_TYPE_TRANSFER_FRAG: + return offsetof(struct rogue_fwif_fwtransfercontext, tq_context); + } + + return 0; +} + +static const char * +pvr_queue_fence_get_driver_name(struct dma_fence *f) +{ + return PVR_DRIVER_NAME; +} + +static void pvr_queue_fence_release(struct dma_fence *f) +{ + struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base); + + pvr_context_put(fence->queue->ctx); + dma_fence_free(f); +} + +static const char * +pvr_queue_job_fence_get_timeline_name(struct dma_fence *f) +{ + struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base); + + switch (fence->queue->type) { + case DRM_PVR_JOB_TYPE_GEOMETRY: + return "geometry"; + + case DRM_PVR_JOB_TYPE_FRAGMENT: + return "fragment"; + + case DRM_PVR_JOB_TYPE_COMPUTE: + return "compute"; + + case DRM_PVR_JOB_TYPE_TRANSFER_FRAG: + return "transfer"; + } + + WARN(1, "Invalid queue type"); + return "invalid"; +} + +static const char * +pvr_queue_cccb_fence_get_timeline_name(struct dma_fence *f) +{ + struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base); + + switch (fence->queue->type) { + case DRM_PVR_JOB_TYPE_GEOMETRY: + return "geometry-cccb"; + + case DRM_PVR_JOB_TYPE_FRAGMENT: + return "fragment-cccb"; + + case DRM_PVR_JOB_TYPE_COMPUTE: + return "compute-cccb"; + + case DRM_PVR_JOB_TYPE_TRANSFER_FRAG: + return "transfer-cccb"; + } + + WARN(1, "Invalid queue type"); + return "invalid"; +} + +static const struct dma_fence_ops pvr_queue_job_fence_ops = { + .get_driver_name = pvr_queue_fence_get_driver_name, + .get_timeline_name = pvr_queue_job_fence_get_timeline_name, + .release = pvr_queue_fence_release, +}; + +/** + * to_pvr_queue_job_fence() - Return a pvr_queue_fence object if the fence is + * backed by a UFO. + * @f: The dma_fence to turn into a pvr_queue_fence. + * + * Return: + * * A non-NULL pvr_queue_fence object if the dma_fence is backed by a UFO, or + * * NULL otherwise. + */ +static struct pvr_queue_fence * +to_pvr_queue_job_fence(struct dma_fence *f) +{ + struct drm_sched_fence *sched_fence = to_drm_sched_fence(f); + + if (sched_fence) + f = sched_fence->parent; + + if (f && f->ops == &pvr_queue_job_fence_ops) + return container_of(f, struct pvr_queue_fence, base); + + return NULL; +} + +static const struct dma_fence_ops pvr_queue_cccb_fence_ops = { + .get_driver_name = pvr_queue_fence_get_driver_name, + .get_timeline_name = pvr_queue_cccb_fence_get_timeline_name, + .release = pvr_queue_fence_release, +}; + +/** + * pvr_queue_fence_put() - Put wrapper for pvr_queue_fence objects. + * @f: The dma_fence object to put. + * + * If the pvr_queue_fence has been initialized, we call dma_fence_put(), + * otherwise we free the object with dma_fence_free(). This allows us + * to do the right thing before and after pvr_queue_fence_init() had been + * called. + */ +static void pvr_queue_fence_put(struct dma_fence *f) +{ + if (!f) + return; + + if (WARN_ON(f->ops && + f->ops != &pvr_queue_cccb_fence_ops && + f->ops != &pvr_queue_job_fence_ops)) + return; + + /* If the fence hasn't been initialized yet, free the object directly. */ + if (f->ops) + dma_fence_put(f); + else + dma_fence_free(f); +} + +/** + * pvr_queue_fence_alloc() - Allocate a pvr_queue_fence fence object + * + * Call this function to allocate job CCCB and done fences. This only + * allocates the objects. Initialization happens when the underlying + * dma_fence object is to be returned to drm_sched (in prepare_job() or + * run_job()). + * + * Return: + * * A valid pointer if the allocation succeeds, or + * * NULL if the allocation fails. + */ +static struct dma_fence * +pvr_queue_fence_alloc(void) +{ + struct pvr_queue_fence *fence; + + fence = kzalloc(sizeof(*fence), GFP_KERNEL); + if (!fence) + return NULL; + + return &fence->base; +} + +/** + * pvr_queue_fence_init() - Initializes a pvr_queue_fence object. + * @f: The fence to initialize + * @queue: The queue this fence belongs to. + * @fence_ops: The fence operations. + * @fence_ctx: The fence context. + * + * Wrapper around dma_fence_init() that takes care of initializing the + * pvr_queue_fence::queue field too. + */ +static void +pvr_queue_fence_init(struct dma_fence *f, + struct pvr_queue *queue, + const struct dma_fence_ops *fence_ops, + struct pvr_queue_fence_ctx *fence_ctx) +{ + struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base); + + pvr_context_get(queue->ctx); + fence->queue = queue; + dma_fence_init(&fence->base, fence_ops, + &fence_ctx->lock, fence_ctx->id, + atomic_inc_return(&fence_ctx->seqno)); +} + +/** + * pvr_queue_cccb_fence_init() - Initializes a CCCB fence object. + * @fence: The fence to initialize. + * @queue: The queue this fence belongs to. + * + * Initializes a fence that can be used to wait for CCCB space. + * + * Should be called in the ::prepare_job() path, so the fence returned to + * drm_sched is valid. + */ +static void +pvr_queue_cccb_fence_init(struct dma_fence *fence, struct pvr_queue *queue) +{ + pvr_queue_fence_init(fence, queue, &pvr_queue_cccb_fence_ops, + &queue->cccb_fence_ctx.base); +} + +/** + * pvr_queue_job_fence_init() - Initializes a job done fence object. + * @fence: The fence to initialize. + * @queue: The queue this fence belongs to. + * + * Initializes a fence that will be signaled when the GPU is done executing + * a job. + * + * Should be called *before* the ::run_job() path, so the fence is initialised + * before being placed in the pending_list. + */ +static void +pvr_queue_job_fence_init(struct dma_fence *fence, struct pvr_queue *queue) +{ + pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops, + &queue->job_fence_ctx); +} + +/** + * pvr_queue_fence_ctx_init() - Queue fence context initialization. + * @fence_ctx: The context to initialize + */ +static void +pvr_queue_fence_ctx_init(struct pvr_queue_fence_ctx *fence_ctx) +{ + spin_lock_init(&fence_ctx->lock); + fence_ctx->id = dma_fence_context_alloc(1); + atomic_set(&fence_ctx->seqno, 0); +} + +static u32 ufo_cmds_size(u32 elem_count) +{ + /* We can pass at most ROGUE_FWIF_CCB_CMD_MAX_UFOS per UFO-related command. */ + u32 full_cmd_count = elem_count / ROGUE_FWIF_CCB_CMD_MAX_UFOS; + u32 remaining_elems = elem_count % ROGUE_FWIF_CCB_CMD_MAX_UFOS; + u32 size = full_cmd_count * + pvr_cccb_get_size_of_cmd_with_hdr(ROGUE_FWIF_CCB_CMD_MAX_UFOS * + sizeof(struct rogue_fwif_ufo)); + + if (remaining_elems) { + size += pvr_cccb_get_size_of_cmd_with_hdr(remaining_elems * + sizeof(struct rogue_fwif_ufo)); + } + + return size; +} + +static u32 job_cmds_size(struct pvr_job *job, u32 ufo_wait_count) +{ + /* One UFO cmd for the fence signaling, one UFO cmd per native fence native, + * and a command for the job itself. + */ + return ufo_cmds_size(1) + ufo_cmds_size(ufo_wait_count) + + pvr_cccb_get_size_of_cmd_with_hdr(job->cmd_len); +} + +/** + * job_count_remaining_native_deps() - Count the number of non-signaled native dependencies. + * @job: Job to operate on. + * + * Returns: Number of non-signaled native deps remaining. + */ +static unsigned long job_count_remaining_native_deps(struct pvr_job *job) +{ + unsigned long remaining_count = 0; + struct dma_fence *fence = NULL; + unsigned long index; + + xa_for_each(&job->base.dependencies, index, fence) { + struct pvr_queue_fence *jfence; + + jfence = to_pvr_queue_job_fence(fence); + if (!jfence) + continue; + + if (!dma_fence_is_signaled(&jfence->base)) + remaining_count++; + } + + return remaining_count; +} + +/** + * pvr_queue_get_job_cccb_fence() - Get the CCCB fence attached to a job. + * @queue: The queue this job will be submitted to. + * @job: The job to get the CCCB fence on. + * + * The CCCB fence is a synchronization primitive allowing us to delay job + * submission until there's enough space in the CCCB to submit the job. + * + * Return: + * * NULL if there's enough space in the CCCB to submit this job, or + * * A valid dma_fence object otherwise. + */ +static struct dma_fence * +pvr_queue_get_job_cccb_fence(struct pvr_queue *queue, struct pvr_job *job) +{ + struct pvr_queue_fence *cccb_fence; + unsigned int native_deps_remaining; + + /* If the fence is NULL, that means we already checked that we had + * enough space in the cccb for our job. + */ + if (!job->cccb_fence) + return NULL; + + mutex_lock(&queue->cccb_fence_ctx.job_lock); + + /* Count remaining native dependencies and check if the job fits in the CCCB. */ + native_deps_remaining = job_count_remaining_native_deps(job); + if (pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) { + pvr_queue_fence_put(job->cccb_fence); + job->cccb_fence = NULL; + goto out_unlock; + } + + /* There should be no job attached to the CCCB fence context: + * drm_sched_entity guarantees that jobs are submitted one at a time. + */ + if (WARN_ON(queue->cccb_fence_ctx.job)) + pvr_job_put(queue->cccb_fence_ctx.job); + + queue->cccb_fence_ctx.job = pvr_job_get(job); + + /* Initialize the fence before returning it. */ + cccb_fence = container_of(job->cccb_fence, struct pvr_queue_fence, base); + if (!WARN_ON(cccb_fence->queue)) + pvr_queue_cccb_fence_init(job->cccb_fence, queue); + +out_unlock: + mutex_unlock(&queue->cccb_fence_ctx.job_lock); + + return dma_fence_get(job->cccb_fence); +} + +/** + * pvr_queue_get_job_kccb_fence() - Get the KCCB fence attached to a job. + * @queue: The queue this job will be submitted to. + * @job: The job to get the KCCB fence on. + * + * The KCCB fence is a synchronization primitive allowing us to delay job + * submission until there's enough space in the KCCB to submit the job. + * + * Return: + * * NULL if there's enough space in the KCCB to submit this job, or + * * A valid dma_fence object otherwise. + */ +static struct dma_fence * +pvr_queue_get_job_kccb_fence(struct pvr_queue *queue, struct pvr_job *job) +{ + struct pvr_device *pvr_dev = queue->ctx->pvr_dev; + struct dma_fence *kccb_fence = NULL; + + /* If the fence is NULL, that means we already checked that we had + * enough space in the KCCB for our job. + */ + if (!job->kccb_fence) + return NULL; + + if (!WARN_ON(job->kccb_fence->ops)) { + kccb_fence = pvr_kccb_reserve_slot(pvr_dev, job->kccb_fence); + job->kccb_fence = NULL; + } + + return kccb_fence; +} + +static struct dma_fence * +pvr_queue_get_paired_frag_job_dep(struct pvr_queue *queue, struct pvr_job *job) +{ + struct pvr_job *frag_job = job->type == DRM_PVR_JOB_TYPE_GEOMETRY ? + job->paired_job : NULL; + struct dma_fence *f; + unsigned long index; + + if (!frag_job) + return NULL; + + xa_for_each(&frag_job->base.dependencies, index, f) { + /* Skip already signaled fences. */ + if (dma_fence_is_signaled(f)) + continue; + + /* Skip our own fence. */ + if (f == &job->base.s_fence->scheduled) + continue; + + return dma_fence_get(f); + } + + return frag_job->base.sched->ops->prepare_job(&frag_job->base, &queue->entity); +} + +/** + * pvr_queue_prepare_job() - Return the next internal dependencies expressed as a dma_fence. + * @sched_job: The job to query the next internal dependency on + * @s_entity: The entity this job is queue on. + * + * After iterating over drm_sched_job::dependencies, drm_sched let the driver return + * its own internal dependencies. We use this function to return our internal dependencies. + */ +static struct dma_fence * +pvr_queue_prepare_job(struct drm_sched_job *sched_job, + struct drm_sched_entity *s_entity) +{ + struct pvr_job *job = container_of(sched_job, struct pvr_job, base); + struct pvr_queue *queue = container_of(s_entity, struct pvr_queue, entity); + struct dma_fence *internal_dep = NULL; + + /* + * Initialize the done_fence, so we can signal it. This must be done + * here because otherwise by the time of run_job() the job will end up + * in the pending list without a valid fence. + */ + if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job) { + /* + * This will be called on a paired fragment job after being + * submitted to firmware. We can tell if this is the case and + * bail early from whether run_job() has been called on the + * geometry job, which would issue a pm ref. + */ + if (job->paired_job->has_pm_ref) + return NULL; + + /* + * In this case we need to use the job's own ctx to initialise + * the done_fence. The other steps are done in the ctx of the + * paired geometry job. + */ + pvr_queue_job_fence_init(job->done_fence, + job->ctx->queues.fragment); + } else { + pvr_queue_job_fence_init(job->done_fence, queue); + } + + /* CCCB fence is used to make sure we have enough space in the CCCB to + * submit our commands. + */ + internal_dep = pvr_queue_get_job_cccb_fence(queue, job); + + /* KCCB fence is used to make sure we have a KCCB slot to queue our + * CMD_KICK. + */ + if (!internal_dep) + internal_dep = pvr_queue_get_job_kccb_fence(queue, job); + + /* Any extra internal dependency should be added here, using the following + * pattern: + * + * if (!internal_dep) + * internal_dep = pvr_queue_get_job_xxxx_fence(queue, job); + */ + + /* The paired job fence should come last, when everything else is ready. */ + if (!internal_dep) + internal_dep = pvr_queue_get_paired_frag_job_dep(queue, job); + + return internal_dep; +} + +/** + * pvr_queue_update_active_state_locked() - Update the queue active state. + * @queue: Queue to update the state on. + * + * Locked version of pvr_queue_update_active_state(). Must be called with + * pvr_device::queue::lock held. + */ +static void pvr_queue_update_active_state_locked(struct pvr_queue *queue) +{ + struct pvr_device *pvr_dev = queue->ctx->pvr_dev; + + lockdep_assert_held(&pvr_dev->queues.lock); + + /* The queue is temporary out of any list when it's being reset, + * we don't want a call to pvr_queue_update_active_state_locked() + * to re-insert it behind our back. + */ + if (list_empty(&queue->node)) + return; + + if (!atomic_read(&queue->in_flight_job_count)) + list_move_tail(&queue->node, &pvr_dev->queues.idle); + else + list_move_tail(&queue->node, &pvr_dev->queues.active); +} + +/** + * pvr_queue_update_active_state() - Update the queue active state. + * @queue: Queue to update the state on. + * + * Active state is based on the in_flight_job_count value. + * + * Updating the active state implies moving the queue in or out of the + * active queue list, which also defines whether the queue is checked + * or not when a FW event is received. + * + * This function should be called any time a job is submitted or it done + * fence is signaled. + */ +static void pvr_queue_update_active_state(struct pvr_queue *queue) +{ + struct pvr_device *pvr_dev = queue->ctx->pvr_dev; + + mutex_lock(&pvr_dev->queues.lock); + pvr_queue_update_active_state_locked(queue); + mutex_unlock(&pvr_dev->queues.lock); +} + +static void pvr_queue_submit_job_to_cccb(struct pvr_job *job) +{ + struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler); + struct rogue_fwif_ufo ufos[ROGUE_FWIF_CCB_CMD_MAX_UFOS]; + struct pvr_cccb *cccb = &queue->cccb; + struct pvr_queue_fence *jfence; + struct dma_fence *fence; + unsigned long index; + u32 ufo_count = 0; + + /* We need to add the queue to the active list before updating the CCCB, + * otherwise we might miss the FW event informing us that something + * happened on this queue. + */ + atomic_inc(&queue->in_flight_job_count); + pvr_queue_update_active_state(queue); + + xa_for_each(&job->base.dependencies, index, fence) { + jfence = to_pvr_queue_job_fence(fence); + if (!jfence) + continue; + + /* Skip the partial render fence, we will place it at the end. */ + if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job && + &job->paired_job->base.s_fence->scheduled == fence) + continue; + + if (dma_fence_is_signaled(&jfence->base)) + continue; + + pvr_fw_object_get_fw_addr(jfence->queue->timeline_ufo.fw_obj, + &ufos[ufo_count].addr); + ufos[ufo_count++].value = jfence->base.seqno; + + if (ufo_count == ARRAY_SIZE(ufos)) { + pvr_cccb_write_command_with_header(cccb, ROGUE_FWIF_CCB_CMD_TYPE_FENCE_PR, + sizeof(ufos), ufos, 0, 0); + ufo_count = 0; + } + } + + /* Partial render fence goes last. */ + if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job) { + jfence = to_pvr_queue_job_fence(job->paired_job->done_fence); + if (!WARN_ON(!jfence)) { + pvr_fw_object_get_fw_addr(jfence->queue->timeline_ufo.fw_obj, + &ufos[ufo_count].addr); + ufos[ufo_count++].value = job->paired_job->done_fence->seqno; + } + } + + if (ufo_count) { + pvr_cccb_write_command_with_header(cccb, ROGUE_FWIF_CCB_CMD_TYPE_FENCE_PR, + sizeof(ufos[0]) * ufo_count, ufos, 0, 0); + } + + if (job->type == DRM_PVR_JOB_TYPE_GEOMETRY && job->paired_job) { + struct rogue_fwif_cmd_geom *cmd = job->cmd; + + /* Reference value for the partial render test is the current queue fence + * seqno minus one. + */ + pvr_fw_object_get_fw_addr(queue->timeline_ufo.fw_obj, + &cmd->partial_render_geom_frag_fence.addr); + cmd->partial_render_geom_frag_fence.value = job->done_fence->seqno - 1; + } + + /* Submit job to FW */ + pvr_cccb_write_command_with_header(cccb, job->fw_ccb_cmd_type, job->cmd_len, job->cmd, + job->id, job->id); + + /* Signal the job fence. */ + pvr_fw_object_get_fw_addr(queue->timeline_ufo.fw_obj, &ufos[0].addr); + ufos[0].value = job->done_fence->seqno; + pvr_cccb_write_command_with_header(cccb, ROGUE_FWIF_CCB_CMD_TYPE_UPDATE, + sizeof(ufos[0]), ufos, 0, 0); +} + +/** + * pvr_queue_run_job() - Submit a job to the FW. + * @sched_job: The job to submit. + * + * This function is called when all non-native dependencies have been met and + * when the commands resulting from this job are guaranteed to fit in the CCCB. + */ +static struct dma_fence *pvr_queue_run_job(struct drm_sched_job *sched_job) +{ + struct pvr_job *job = container_of(sched_job, struct pvr_job, base); + struct pvr_device *pvr_dev = job->pvr_dev; + int err; + + /* The fragment job is issued along the geometry job when we use combined + * geom+frag kicks. When we get there, we should simply return the + * done_fence that's been initialized earlier. + */ + if (job->paired_job && job->type == DRM_PVR_JOB_TYPE_FRAGMENT && + job->done_fence->ops) { + return dma_fence_get(job->done_fence); + } + + /* The only kind of jobs that can be paired are geometry and fragment, and + * we bail out early if we see a fragment job that's paired with a geomtry + * job. + * Paired jobs must also target the same context and point to the same + * HWRT. + */ + if (WARN_ON(job->paired_job && + (job->type != DRM_PVR_JOB_TYPE_GEOMETRY || + job->paired_job->type != DRM_PVR_JOB_TYPE_FRAGMENT || + job->hwrt != job->paired_job->hwrt || + job->ctx != job->paired_job->ctx))) + return ERR_PTR(-EINVAL); + + err = pvr_job_get_pm_ref(job); + if (WARN_ON(err)) + return ERR_PTR(err); + + if (job->paired_job) { + err = pvr_job_get_pm_ref(job->paired_job); + if (WARN_ON(err)) + return ERR_PTR(err); + } + + /* Submit our job to the CCCB */ + pvr_queue_submit_job_to_cccb(job); + + if (job->paired_job) { + struct pvr_job *geom_job = job; + struct pvr_job *frag_job = job->paired_job; + struct pvr_queue *geom_queue = job->ctx->queues.geometry; + struct pvr_queue *frag_queue = job->ctx->queues.fragment; + + /* Submit the fragment job along the geometry job and send a combined kick. */ + pvr_queue_submit_job_to_cccb(frag_job); + pvr_cccb_send_kccb_combined_kick(pvr_dev, + &geom_queue->cccb, &frag_queue->cccb, + pvr_context_get_fw_addr(geom_job->ctx) + + geom_queue->ctx_offset, + pvr_context_get_fw_addr(frag_job->ctx) + + frag_queue->ctx_offset, + job->hwrt, + frag_job->fw_ccb_cmd_type == + ROGUE_FWIF_CCB_CMD_TYPE_FRAG_PR); + } else { + struct pvr_queue *queue = container_of(job->base.sched, + struct pvr_queue, scheduler); + + pvr_cccb_send_kccb_kick(pvr_dev, &queue->cccb, + pvr_context_get_fw_addr(job->ctx) + queue->ctx_offset, + job->hwrt); + } + + return dma_fence_get(job->done_fence); +} + +static void pvr_queue_stop(struct pvr_queue *queue, struct pvr_job *bad_job) +{ + drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL); +} + +static void pvr_queue_start(struct pvr_queue *queue) +{ + struct pvr_job *job; + + /* Make sure we CPU-signal the UFO object, so other queues don't get + * blocked waiting on it. + */ + *queue->timeline_ufo.value = atomic_read(&queue->job_fence_ctx.seqno); + + list_for_each_entry(job, &queue->scheduler.pending_list, base.list) { + if (dma_fence_is_signaled(job->done_fence)) { + /* Jobs might have completed after drm_sched_stop() was called. + * In that case, re-assign the parent field to the done_fence. + */ + WARN_ON(job->base.s_fence->parent); + job->base.s_fence->parent = dma_fence_get(job->done_fence); + } else { + /* If we had unfinished jobs, flag the entity as guilty so no + * new job can be submitted. + */ + atomic_set(&queue->ctx->faulty, 1); + } + } + + drm_sched_start(&queue->scheduler, true); +} + +/** + * pvr_queue_timedout_job() - Handle a job timeout event. + * @s_job: The job this timeout occurred on. + * + * FIXME: We don't do anything here to unblock the situation, we just stop+start + * the scheduler, and re-assign parent fences in the middle. + * + * Return: + * * DRM_GPU_SCHED_STAT_NOMINAL. + */ +static enum drm_gpu_sched_stat +pvr_queue_timedout_job(struct drm_sched_job *s_job) +{ + struct drm_gpu_scheduler *sched = s_job->sched; + struct pvr_queue *queue = container_of(sched, struct pvr_queue, scheduler); + struct pvr_device *pvr_dev = queue->ctx->pvr_dev; + struct pvr_job *job; + u32 job_count = 0; + + dev_err(sched->dev, "Job timeout\n"); + + /* Before we stop the scheduler, make sure the queue is out of any list, so + * any call to pvr_queue_update_active_state_locked() that might happen + * until the scheduler is really stopped doesn't end up re-inserting the + * queue in the active list. This would cause + * pvr_queue_signal_done_fences() and drm_sched_stop() to race with each + * other when accessing the pending_list, since drm_sched_stop() doesn't + * grab the job_list_lock when modifying the list (it's assuming the + * only other accessor is the scheduler, and it's safe to not grab the + * lock since it's stopped). + */ + mutex_lock(&pvr_dev->queues.lock); + list_del_init(&queue->node); + mutex_unlock(&pvr_dev->queues.lock); + + drm_sched_stop(sched, s_job); + + /* Re-assign job parent fences. */ + list_for_each_entry(job, &sched->pending_list, base.list) { + job->base.s_fence->parent = dma_fence_get(job->done_fence); + job_count++; + } + WARN_ON(atomic_read(&queue->in_flight_job_count) != job_count); + + /* Re-insert the queue in the proper list, and kick a queue processing + * operation if there were jobs pending. + */ + mutex_lock(&pvr_dev->queues.lock); + if (!job_count) { + list_move_tail(&queue->node, &pvr_dev->queues.idle); + } else { + atomic_set(&queue->in_flight_job_count, job_count); + list_move_tail(&queue->node, &pvr_dev->queues.active); + pvr_queue_process(queue); + } + mutex_unlock(&pvr_dev->queues.lock); + + drm_sched_start(sched, true); + + return DRM_GPU_SCHED_STAT_NOMINAL; +} + +/** + * pvr_queue_free_job() - Release the reference the scheduler had on a job object. + * @sched_job: Job object to free. + */ +static void pvr_queue_free_job(struct drm_sched_job *sched_job) +{ + struct pvr_job *job = container_of(sched_job, struct pvr_job, base); + + drm_sched_job_cleanup(sched_job); + job->paired_job = NULL; + pvr_job_put(job); +} + +static const struct drm_sched_backend_ops pvr_queue_sched_ops = { + .prepare_job = pvr_queue_prepare_job, + .run_job = pvr_queue_run_job, + .timedout_job = pvr_queue_timedout_job, + .free_job = pvr_queue_free_job, +}; + +/** + * pvr_queue_fence_is_ufo_backed() - Check if a dma_fence is backed by a UFO object + * @f: Fence to test. + * + * A UFO-backed fence is a fence that can be signaled or waited upon FW-side. + * pvr_job::done_fence objects are backed by the timeline UFO attached to the queue + * they are pushed to, but those fences are not directly exposed to the outside + * world, so we also need to check if the fence we're being passed is a + * drm_sched_fence that was coming from our driver. + */ +bool pvr_queue_fence_is_ufo_backed(struct dma_fence *f) +{ + struct drm_sched_fence *sched_fence = f ? to_drm_sched_fence(f) : NULL; + + if (sched_fence && + sched_fence->sched->ops == &pvr_queue_sched_ops) + return true; + + if (f && f->ops == &pvr_queue_job_fence_ops) + return true; + + return false; +} + +/** + * pvr_queue_signal_done_fences() - Signal done fences. + * @queue: Queue to check. + * + * Signal done fences of jobs whose seqno is less than the current value of + * the UFO object attached to the queue. + */ +static void +pvr_queue_signal_done_fences(struct pvr_queue *queue) +{ + struct pvr_job *job, *tmp_job; + u32 cur_seqno; + + spin_lock(&queue->scheduler.job_list_lock); + cur_seqno = *queue->timeline_ufo.value; + list_for_each_entry_safe(job, tmp_job, &queue->scheduler.pending_list, base.list) { + if ((int)(cur_seqno - lower_32_bits(job->done_fence->seqno)) < 0) + break; + + if (!dma_fence_is_signaled(job->done_fence)) { + dma_fence_signal(job->done_fence); + pvr_job_release_pm_ref(job); + atomic_dec(&queue->in_flight_job_count); + } + } + spin_unlock(&queue->scheduler.job_list_lock); +} + +/** + * pvr_queue_check_job_waiting_for_cccb_space() - Check if the job waiting for CCCB space + * can be unblocked + * pushed to the CCCB + * @queue: Queue to check + * + * If we have a job waiting for CCCB, and this job now fits in the CCCB, we signal + * its CCCB fence, which should kick drm_sched. + */ +static void +pvr_queue_check_job_waiting_for_cccb_space(struct pvr_queue *queue) +{ + struct pvr_queue_fence *cccb_fence; + u32 native_deps_remaining; + struct pvr_job *job; + + mutex_lock(&queue->cccb_fence_ctx.job_lock); + job = queue->cccb_fence_ctx.job; + if (!job) + goto out_unlock; + + /* If we have a job attached to the CCCB fence context, its CCCB fence + * shouldn't be NULL. + */ + if (WARN_ON(!job->cccb_fence)) { + job = NULL; + goto out_unlock; + } + + /* If we get there, CCCB fence has to be initialized. */ + cccb_fence = container_of(job->cccb_fence, struct pvr_queue_fence, base); + if (WARN_ON(!cccb_fence->queue)) { + job = NULL; + goto out_unlock; + } + + /* Evict signaled dependencies before checking for CCCB space. + * If the job fits, signal the CCCB fence, this should unblock + * the drm_sched_entity. + */ + native_deps_remaining = job_count_remaining_native_deps(job); + if (!pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) { + job = NULL; + goto out_unlock; + } + + dma_fence_signal(job->cccb_fence); + pvr_queue_fence_put(job->cccb_fence); + job->cccb_fence = NULL; + queue->cccb_fence_ctx.job = NULL; + +out_unlock: + mutex_unlock(&queue->cccb_fence_ctx.job_lock); + + pvr_job_put(job); +} + +/** + * pvr_queue_process() - Process events that happened on a queue. + * @queue: Queue to check + * + * Signal job fences and check if jobs waiting for CCCB space can be unblocked. + */ +void pvr_queue_process(struct pvr_queue *queue) +{ + lockdep_assert_held(&queue->ctx->pvr_dev->queues.lock); + + pvr_queue_check_job_waiting_for_cccb_space(queue); + pvr_queue_signal_done_fences(queue); + pvr_queue_update_active_state_locked(queue); +} + +static u32 get_dm_type(struct pvr_queue *queue) +{ + switch (queue->type) { + case DRM_PVR_JOB_TYPE_GEOMETRY: + return PVR_FWIF_DM_GEOM; + case DRM_PVR_JOB_TYPE_TRANSFER_FRAG: + case DRM_PVR_JOB_TYPE_FRAGMENT: + return PVR_FWIF_DM_FRAG; + case DRM_PVR_JOB_TYPE_COMPUTE: + return PVR_FWIF_DM_CDM; + } + + return ~0; +} + +/** + * init_fw_context() - Initializes the queue part of a FW context. + * @queue: Queue object to initialize the FW context for. + * @fw_ctx_map: The FW context CPU mapping. + * + * FW contexts are containing various states, one of them being a per-queue state + * that needs to be initialized for each queue being exposed by a context. This + * function takes care of that. + */ +static void init_fw_context(struct pvr_queue *queue, void *fw_ctx_map) +{ + struct pvr_context *ctx = queue->ctx; + struct pvr_fw_object *fw_mem_ctx_obj = pvr_vm_get_fw_mem_context(ctx->vm_ctx); + struct rogue_fwif_fwcommoncontext *cctx_fw; + struct pvr_cccb *cccb = &queue->cccb; + + cctx_fw = fw_ctx_map + queue->ctx_offset; + cctx_fw->ccbctl_fw_addr = cccb->ctrl_fw_addr; + cctx_fw->ccb_fw_addr = cccb->cccb_fw_addr; + + cctx_fw->dm = get_dm_type(queue); + cctx_fw->priority = ctx->priority; + cctx_fw->priority_seq_num = 0; + cctx_fw->max_deadline_ms = MAX_DEADLINE_MS; + cctx_fw->pid = task_tgid_nr(current); + cctx_fw->server_common_context_id = ctx->ctx_id; + + pvr_fw_object_get_fw_addr(fw_mem_ctx_obj, &cctx_fw->fw_mem_context_fw_addr); + + pvr_fw_object_get_fw_addr(queue->reg_state_obj, &cctx_fw->context_state_addr); +} + +/** + * pvr_queue_cleanup_fw_context() - Wait for the FW context to be idle and clean it up. + * @queue: Queue on FW context to clean up. + * + * Return: + * * 0 on success, + * * Any error returned by pvr_fw_structure_cleanup() otherwise. + */ +static int pvr_queue_cleanup_fw_context(struct pvr_queue *queue) +{ + if (!queue->ctx->fw_obj) + return 0; + + return pvr_fw_structure_cleanup(queue->ctx->pvr_dev, + ROGUE_FWIF_CLEANUP_FWCOMMONCONTEXT, + queue->ctx->fw_obj, queue->ctx_offset); +} + +/** + * pvr_queue_job_init() - Initialize queue related fields in a pvr_job object. + * @job: The job to initialize. + * + * Bind the job to a queue and allocate memory to guarantee pvr_queue_job_arm() + * and pvr_queue_job_push() can't fail. We also make sure the context type is + * valid and the job can fit in the CCCB. + * + * Return: + * * 0 on success, or + * * An error code if something failed. + */ +int pvr_queue_job_init(struct pvr_job *job) +{ + /* Fragment jobs need at least one native fence wait on the geometry job fence. */ + u32 min_native_dep_count = job->type == DRM_PVR_JOB_TYPE_FRAGMENT ? 1 : 0; + struct pvr_queue *queue; + int err; + + if (atomic_read(&job->ctx->faulty)) + return -EIO; + + queue = pvr_context_get_queue_for_job(job->ctx, job->type); + if (!queue) + return -EINVAL; + + if (!pvr_cccb_cmdseq_can_fit(&queue->cccb, job_cmds_size(job, min_native_dep_count))) + return -E2BIG; + + err = drm_sched_job_init(&job->base, &queue->entity, 1, THIS_MODULE); + if (err) + return err; + + job->cccb_fence = pvr_queue_fence_alloc(); + job->kccb_fence = pvr_kccb_fence_alloc(); + job->done_fence = pvr_queue_fence_alloc(); + if (!job->cccb_fence || !job->kccb_fence || !job->done_fence) + return -ENOMEM; + + return 0; +} + +/** + * pvr_queue_job_arm() - Arm a job object. + * @job: The job to arm. + * + * Initializes fences and return the drm_sched finished fence so it can + * be exposed to the outside world. Once this function is called, you should + * make sure the job is pushed using pvr_queue_job_push(), or guarantee that + * no one grabbed a reference to the returned fence. The latter can happen if + * we do multi-job submission, and something failed when creating/initializing + * a job. In that case, we know the fence didn't leave the driver, and we + * can thus guarantee nobody will wait on an dead fence object. + * + * Return: + * * A dma_fence object. + */ +struct dma_fence *pvr_queue_job_arm(struct pvr_job *job) +{ + drm_sched_job_arm(&job->base); + + return &job->base.s_fence->finished; +} + +/** + * pvr_queue_job_cleanup() - Cleanup fence/scheduler related fields in the job object. + * @job: The job to cleanup. + * + * Should be called in the job release path. + */ +void pvr_queue_job_cleanup(struct pvr_job *job) +{ + pvr_queue_fence_put(job->done_fence); + pvr_queue_fence_put(job->cccb_fence); + pvr_kccb_fence_put(job->kccb_fence); + + if (job->base.s_fence) + drm_sched_job_cleanup(&job->base); +} + +/** + * pvr_queue_job_push() - Push a job to its queue. + * @job: The job to push. + * + * Must be called after pvr_queue_job_init() and after all dependencies + * have been added to the job. This will effectively queue the job to + * the drm_sched_entity attached to the queue. We grab a reference on + * the job object, so the caller is free to drop its reference when it's + * done accessing the job object. + */ +void pvr_queue_job_push(struct pvr_job *job) +{ + struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler); + + /* Keep track of the last queued job scheduled fence for combined submit. */ + dma_fence_put(queue->last_queued_job_scheduled_fence); + queue->last_queued_job_scheduled_fence = dma_fence_get(&job->base.s_fence->scheduled); + + pvr_job_get(job); + drm_sched_entity_push_job(&job->base); +} + +static void reg_state_init(void *cpu_ptr, void *priv) +{ + struct pvr_queue *queue = priv; + + if (queue->type == DRM_PVR_JOB_TYPE_GEOMETRY) { + struct rogue_fwif_geom_ctx_state *geom_ctx_state_fw = cpu_ptr; + + geom_ctx_state_fw->geom_core[0].geom_reg_vdm_call_stack_pointer_init = + queue->callstack_addr; + } +} + +/** + * pvr_queue_create() - Create a queue object. + * @ctx: The context this queue will be attached to. + * @type: The type of jobs being pushed to this queue. + * @args: The arguments passed to the context creation function. + * @fw_ctx_map: CPU mapping of the FW context object. + * + * Create a queue object that will be used to queue and track jobs. + * + * Return: + * * A valid pointer to a pvr_queue object, or + * * An error pointer if the creation/initialization failed. + */ +struct pvr_queue *pvr_queue_create(struct pvr_context *ctx, + enum drm_pvr_job_type type, + struct drm_pvr_ioctl_create_context_args *args, + void *fw_ctx_map) +{ + static const struct { + u32 cccb_size; + const char *name; + } props[] = { + [DRM_PVR_JOB_TYPE_GEOMETRY] = { + .cccb_size = CTX_GEOM_CCCB_SIZE_LOG2, + .name = "geometry", + }, + [DRM_PVR_JOB_TYPE_FRAGMENT] = { + .cccb_size = CTX_FRAG_CCCB_SIZE_LOG2, + .name = "fragment" + }, + [DRM_PVR_JOB_TYPE_COMPUTE] = { + .cccb_size = CTX_COMPUTE_CCCB_SIZE_LOG2, + .name = "compute" + }, + [DRM_PVR_JOB_TYPE_TRANSFER_FRAG] = { + .cccb_size = CTX_TRANSFER_CCCB_SIZE_LOG2, + .name = "transfer_frag" + }, + }; + struct pvr_device *pvr_dev = ctx->pvr_dev; + struct drm_gpu_scheduler *sched; + struct pvr_queue *queue; + int ctx_state_size, err; + void *cpu_map; + + if (WARN_ON(type >= sizeof(props))) + return ERR_PTR(-EINVAL); + + switch (ctx->type) { + case DRM_PVR_CTX_TYPE_RENDER: + if (type != DRM_PVR_JOB_TYPE_GEOMETRY && + type != DRM_PVR_JOB_TYPE_FRAGMENT) + return ERR_PTR(-EINVAL); + break; + case DRM_PVR_CTX_TYPE_COMPUTE: + if (type != DRM_PVR_JOB_TYPE_COMPUTE) + return ERR_PTR(-EINVAL); + break; + case DRM_PVR_CTX_TYPE_TRANSFER_FRAG: + if (type != DRM_PVR_JOB_TYPE_TRANSFER_FRAG) + return ERR_PTR(-EINVAL); + break; + default: + return ERR_PTR(-EINVAL); + } + + ctx_state_size = get_ctx_state_size(pvr_dev, type); + if (ctx_state_size < 0) + return ERR_PTR(ctx_state_size); + + queue = kzalloc(sizeof(*queue), GFP_KERNEL); + if (!queue) + return ERR_PTR(-ENOMEM); + + queue->type = type; + queue->ctx_offset = get_ctx_offset(type); + queue->ctx = ctx; + queue->callstack_addr = args->callstack_addr; + sched = &queue->scheduler; + INIT_LIST_HEAD(&queue->node); + mutex_init(&queue->cccb_fence_ctx.job_lock); + pvr_queue_fence_ctx_init(&queue->cccb_fence_ctx.base); + pvr_queue_fence_ctx_init(&queue->job_fence_ctx); + + err = pvr_cccb_init(pvr_dev, &queue->cccb, props[type].cccb_size, props[type].name); + if (err) + goto err_free_queue; + + err = pvr_fw_object_create(pvr_dev, ctx_state_size, + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + reg_state_init, queue, &queue->reg_state_obj); + if (err) + goto err_cccb_fini; + + init_fw_context(queue, fw_ctx_map); + + if (type != DRM_PVR_JOB_TYPE_GEOMETRY && type != DRM_PVR_JOB_TYPE_FRAGMENT && + args->callstack_addr) { + err = -EINVAL; + goto err_release_reg_state; + } + + cpu_map = pvr_fw_object_create_and_map(pvr_dev, sizeof(*queue->timeline_ufo.value), + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + NULL, NULL, &queue->timeline_ufo.fw_obj); + if (IS_ERR(cpu_map)) { + err = PTR_ERR(cpu_map); + goto err_release_reg_state; + } + + queue->timeline_ufo.value = cpu_map; + + err = drm_sched_init(&queue->scheduler, + &pvr_queue_sched_ops, + pvr_dev->sched_wq, 1, 64 * 1024, 1, + msecs_to_jiffies(500), + pvr_dev->sched_wq, NULL, "pvr-queue", + pvr_dev->base.dev); + if (err) + goto err_release_ufo; + + err = drm_sched_entity_init(&queue->entity, + DRM_SCHED_PRIORITY_KERNEL, + &sched, 1, &ctx->faulty); + if (err) + goto err_sched_fini; + + mutex_lock(&pvr_dev->queues.lock); + list_add_tail(&queue->node, &pvr_dev->queues.idle); + mutex_unlock(&pvr_dev->queues.lock); + + return queue; + +err_sched_fini: + drm_sched_fini(&queue->scheduler); + +err_release_ufo: + pvr_fw_object_unmap_and_destroy(queue->timeline_ufo.fw_obj); + +err_release_reg_state: + pvr_fw_object_destroy(queue->reg_state_obj); + +err_cccb_fini: + pvr_cccb_fini(&queue->cccb); + +err_free_queue: + mutex_destroy(&queue->cccb_fence_ctx.job_lock); + kfree(queue); + + return ERR_PTR(err); +} + +void pvr_queue_device_pre_reset(struct pvr_device *pvr_dev) +{ + struct pvr_queue *queue; + + mutex_lock(&pvr_dev->queues.lock); + list_for_each_entry(queue, &pvr_dev->queues.idle, node) + pvr_queue_stop(queue, NULL); + list_for_each_entry(queue, &pvr_dev->queues.active, node) + pvr_queue_stop(queue, NULL); + mutex_unlock(&pvr_dev->queues.lock); +} + +void pvr_queue_device_post_reset(struct pvr_device *pvr_dev) +{ + struct pvr_queue *queue; + + mutex_lock(&pvr_dev->queues.lock); + list_for_each_entry(queue, &pvr_dev->queues.active, node) + pvr_queue_start(queue); + list_for_each_entry(queue, &pvr_dev->queues.idle, node) + pvr_queue_start(queue); + mutex_unlock(&pvr_dev->queues.lock); +} + +/** + * pvr_queue_kill() - Kill a queue. + * @queue: The queue to kill. + * + * Kill the queue so no new jobs can be pushed. Should be called when the + * context handle is destroyed. The queue object might last longer if jobs + * are still in flight and holding a reference to the context this queue + * belongs to. + */ +void pvr_queue_kill(struct pvr_queue *queue) +{ + drm_sched_entity_destroy(&queue->entity); + dma_fence_put(queue->last_queued_job_scheduled_fence); + queue->last_queued_job_scheduled_fence = NULL; +} + +/** + * pvr_queue_destroy() - Destroy a queue. + * @queue: The queue to destroy. + * + * Cleanup the queue and free the resources attached to it. Should be + * called from the context release function. + */ +void pvr_queue_destroy(struct pvr_queue *queue) +{ + if (!queue) + return; + + mutex_lock(&queue->ctx->pvr_dev->queues.lock); + list_del_init(&queue->node); + mutex_unlock(&queue->ctx->pvr_dev->queues.lock); + + drm_sched_fini(&queue->scheduler); + drm_sched_entity_fini(&queue->entity); + + if (WARN_ON(queue->last_queued_job_scheduled_fence)) + dma_fence_put(queue->last_queued_job_scheduled_fence); + + pvr_queue_cleanup_fw_context(queue); + + pvr_fw_object_unmap_and_destroy(queue->timeline_ufo.fw_obj); + pvr_fw_object_destroy(queue->reg_state_obj); + pvr_cccb_fini(&queue->cccb); + mutex_destroy(&queue->cccb_fence_ctx.job_lock); + kfree(queue); +} + +/** + * pvr_queue_device_init() - Device-level initialization of queue related fields. + * @pvr_dev: The device to initialize. + * + * Initializes all fields related to queue management in pvr_device. + * + * Return: + * * 0 on success, or + * * An error code on failure. + */ +int pvr_queue_device_init(struct pvr_device *pvr_dev) +{ + int err; + + INIT_LIST_HEAD(&pvr_dev->queues.active); + INIT_LIST_HEAD(&pvr_dev->queues.idle); + err = drmm_mutex_init(from_pvr_device(pvr_dev), &pvr_dev->queues.lock); + if (err) + return err; + + pvr_dev->sched_wq = alloc_workqueue("powervr-sched", WQ_UNBOUND, 0); + if (!pvr_dev->sched_wq) + return -ENOMEM; + + return 0; +} + +/** + * pvr_queue_device_fini() - Device-level cleanup of queue related fields. + * @pvr_dev: The device to cleanup. + * + * Cleanup/free all queue-related resources attached to a pvr_device object. + */ +void pvr_queue_device_fini(struct pvr_device *pvr_dev) +{ + destroy_workqueue(pvr_dev->sched_wq); +} diff --git a/drivers/gpu/drm/imagination/pvr_queue.h b/drivers/gpu/drm/imagination/pvr_queue.h new file mode 100644 index 0000000000..e06ced6930 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_queue.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_QUEUE_H +#define PVR_QUEUE_H + +#include + +#include "pvr_cccb.h" +#include "pvr_device.h" + +struct pvr_context; +struct pvr_queue; + +/** + * struct pvr_queue_fence_ctx - Queue fence context + * + * Used to implement dma_fence_ops for pvr_job::{done,cccb}_fence. + */ +struct pvr_queue_fence_ctx { + /** @id: Fence context ID allocated with dma_fence_context_alloc(). */ + u64 id; + + /** @seqno: Sequence number incremented each time a fence is created. */ + atomic_t seqno; + + /** @lock: Lock used to synchronize access to fences allocated by this context. */ + spinlock_t lock; +}; + +/** + * struct pvr_queue_cccb_fence_ctx - CCCB fence context + * + * Context used to manage fences controlling access to the CCCB. No fences are + * issued if there's enough space in the CCCB to push job commands. + */ +struct pvr_queue_cccb_fence_ctx { + /** @base: Base queue fence context. */ + struct pvr_queue_fence_ctx base; + + /** + * @job: Job waiting for CCCB space. + * + * Thanks to the serializationg done at the drm_sched_entity level, + * there's no more than one job waiting for CCCB at a given time. + * + * This field is NULL if no jobs are currently waiting for CCCB space. + * + * Must be accessed with @job_lock held. + */ + struct pvr_job *job; + + /** @job_lock: Lock protecting access to the job object. */ + struct mutex job_lock; +}; + +/** + * struct pvr_queue_fence - Queue fence object + */ +struct pvr_queue_fence { + /** @base: Base dma_fence. */ + struct dma_fence base; + + /** @queue: Queue that created this fence. */ + struct pvr_queue *queue; +}; + +/** + * struct pvr_queue - Job queue + * + * Used to queue and track execution of pvr_job objects. + */ +struct pvr_queue { + /** @scheduler: Single entity scheduler use to push jobs to this queue. */ + struct drm_gpu_scheduler scheduler; + + /** @entity: Scheduling entity backing this queue. */ + struct drm_sched_entity entity; + + /** @type: Type of jobs queued to this queue. */ + enum drm_pvr_job_type type; + + /** @ctx: Context object this queue is bound to. */ + struct pvr_context *ctx; + + /** @node: Used to add the queue to the active/idle queue list. */ + struct list_head node; + + /** + * @in_flight_job_count: Number of jobs submitted to the CCCB that + * have not been processed yet. + */ + atomic_t in_flight_job_count; + + /** + * @cccb_fence_ctx: CCCB fence context. + * + * Used to control access to the CCCB is full, such that we don't + * end up trying to push commands to the CCCB if there's not enough + * space to receive all commands needed for a job to complete. + */ + struct pvr_queue_cccb_fence_ctx cccb_fence_ctx; + + /** @job_fence_ctx: Job fence context object. */ + struct pvr_queue_fence_ctx job_fence_ctx; + + /** @timeline_ufo: Timeline UFO for the context queue. */ + struct { + /** @fw_obj: FW object representing the UFO value. */ + struct pvr_fw_object *fw_obj; + + /** @value: CPU mapping of the UFO value. */ + u32 *value; + } timeline_ufo; + + /** + * @last_queued_job_scheduled_fence: The scheduled fence of the last + * job queued to this queue. + * + * We use it to insert frag -> geom dependencies when issuing combined + * geom+frag jobs, to guarantee that the fragment job that's part of + * the combined operation comes after all fragment jobs that were queued + * before it. + */ + struct dma_fence *last_queued_job_scheduled_fence; + + /** @cccb: Client Circular Command Buffer. */ + struct pvr_cccb cccb; + + /** @reg_state_obj: FW object representing the register state of this queue. */ + struct pvr_fw_object *reg_state_obj; + + /** @ctx_offset: Offset of the queue context in the FW context object. */ + u32 ctx_offset; + + /** @callstack_addr: Initial call stack address for register state object. */ + u64 callstack_addr; +}; + +bool pvr_queue_fence_is_ufo_backed(struct dma_fence *f); + +int pvr_queue_job_init(struct pvr_job *job); + +void pvr_queue_job_cleanup(struct pvr_job *job); + +void pvr_queue_job_push(struct pvr_job *job); + +struct dma_fence *pvr_queue_job_arm(struct pvr_job *job); + +struct pvr_queue *pvr_queue_create(struct pvr_context *ctx, + enum drm_pvr_job_type type, + struct drm_pvr_ioctl_create_context_args *args, + void *fw_ctx_map); + +void pvr_queue_kill(struct pvr_queue *queue); + +void pvr_queue_destroy(struct pvr_queue *queue); + +void pvr_queue_process(struct pvr_queue *queue); + +void pvr_queue_device_pre_reset(struct pvr_device *pvr_dev); + +void pvr_queue_device_post_reset(struct pvr_device *pvr_dev); + +int pvr_queue_device_init(struct pvr_device *pvr_dev); + +void pvr_queue_device_fini(struct pvr_device *pvr_dev); + +#endif /* PVR_QUEUE_H */ diff --git a/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h new file mode 100644 index 0000000000..2a90d02796 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs.h @@ -0,0 +1,6193 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +/* *** Autogenerated C -- do not edit *** */ + +#ifndef PVR_ROGUE_CR_DEFS_H +#define PVR_ROGUE_CR_DEFS_H + +/* clang-format off */ + +#define ROGUE_CR_DEFS_REVISION 1 + +/* Register ROGUE_CR_RASTERISATION_INDIRECT */ +#define ROGUE_CR_RASTERISATION_INDIRECT 0x8238U +#define ROGUE_CR_RASTERISATION_INDIRECT_MASKFULL 0x000000000000000FULL +#define ROGUE_CR_RASTERISATION_INDIRECT_ADDRESS_SHIFT 0U +#define ROGUE_CR_RASTERISATION_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF0U + +/* Register ROGUE_CR_PBE_INDIRECT */ +#define ROGUE_CR_PBE_INDIRECT 0x83E0U +#define ROGUE_CR_PBE_INDIRECT_MASKFULL 0x000000000000000FULL +#define ROGUE_CR_PBE_INDIRECT_ADDRESS_SHIFT 0U +#define ROGUE_CR_PBE_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF0U + +/* Register ROGUE_CR_PBE_PERF_INDIRECT */ +#define ROGUE_CR_PBE_PERF_INDIRECT 0x83D8U +#define ROGUE_CR_PBE_PERF_INDIRECT_MASKFULL 0x000000000000000FULL +#define ROGUE_CR_PBE_PERF_INDIRECT_ADDRESS_SHIFT 0U +#define ROGUE_CR_PBE_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF0U + +/* Register ROGUE_CR_TPU_PERF_INDIRECT */ +#define ROGUE_CR_TPU_PERF_INDIRECT 0x83F0U +#define ROGUE_CR_TPU_PERF_INDIRECT_MASKFULL 0x0000000000000007ULL +#define ROGUE_CR_TPU_PERF_INDIRECT_ADDRESS_SHIFT 0U +#define ROGUE_CR_TPU_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF8U + +/* Register ROGUE_CR_RASTERISATION_PERF_INDIRECT */ +#define ROGUE_CR_RASTERISATION_PERF_INDIRECT 0x8318U +#define ROGUE_CR_RASTERISATION_PERF_INDIRECT_MASKFULL 0x000000000000000FULL +#define ROGUE_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_SHIFT 0U +#define ROGUE_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF0U + +/* Register ROGUE_CR_TPU_MCU_L0_PERF_INDIRECT */ +#define ROGUE_CR_TPU_MCU_L0_PERF_INDIRECT 0x8028U +#define ROGUE_CR_TPU_MCU_L0_PERF_INDIRECT_MASKFULL 0x0000000000000007ULL +#define ROGUE_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_SHIFT 0U +#define ROGUE_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF8U + +/* Register ROGUE_CR_USC_PERF_INDIRECT */ +#define ROGUE_CR_USC_PERF_INDIRECT 0x8030U +#define ROGUE_CR_USC_PERF_INDIRECT_MASKFULL 0x000000000000000FULL +#define ROGUE_CR_USC_PERF_INDIRECT_ADDRESS_SHIFT 0U +#define ROGUE_CR_USC_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF0U + +/* Register ROGUE_CR_BLACKPEARL_INDIRECT */ +#define ROGUE_CR_BLACKPEARL_INDIRECT 0x8388U +#define ROGUE_CR_BLACKPEARL_INDIRECT_MASKFULL 0x0000000000000003ULL +#define ROGUE_CR_BLACKPEARL_INDIRECT_ADDRESS_SHIFT 0U +#define ROGUE_CR_BLACKPEARL_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFFCU + +/* Register ROGUE_CR_BLACKPEARL_PERF_INDIRECT */ +#define ROGUE_CR_BLACKPEARL_PERF_INDIRECT 0x83F8U +#define ROGUE_CR_BLACKPEARL_PERF_INDIRECT_MASKFULL 0x0000000000000003ULL +#define ROGUE_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_SHIFT 0U +#define ROGUE_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFFCU + +/* Register ROGUE_CR_TEXAS3_PERF_INDIRECT */ +#define ROGUE_CR_TEXAS3_PERF_INDIRECT 0x83D0U +#define ROGUE_CR_TEXAS3_PERF_INDIRECT_MASKFULL 0x0000000000000007ULL +#define ROGUE_CR_TEXAS3_PERF_INDIRECT_ADDRESS_SHIFT 0U +#define ROGUE_CR_TEXAS3_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFF8U + +/* Register ROGUE_CR_TEXAS_PERF_INDIRECT */ +#define ROGUE_CR_TEXAS_PERF_INDIRECT 0x8288U +#define ROGUE_CR_TEXAS_PERF_INDIRECT_MASKFULL 0x0000000000000003ULL +#define ROGUE_CR_TEXAS_PERF_INDIRECT_ADDRESS_SHIFT 0U +#define ROGUE_CR_TEXAS_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFFCU + +/* Register ROGUE_CR_BX_TU_PERF_INDIRECT */ +#define ROGUE_CR_BX_TU_PERF_INDIRECT 0xC900U +#define ROGUE_CR_BX_TU_PERF_INDIRECT_MASKFULL 0x0000000000000003ULL +#define ROGUE_CR_BX_TU_PERF_INDIRECT_ADDRESS_SHIFT 0U +#define ROGUE_CR_BX_TU_PERF_INDIRECT_ADDRESS_CLRMSK 0xFFFFFFFCU + +/* Register ROGUE_CR_CLK_CTRL */ +#define ROGUE_CR_CLK_CTRL 0x0000U +#define ROGUE_CR_CLK_CTRL__PBE2_XE__MASKFULL 0xFFFFFF003F3FFFFFULL +#define ROGUE_CR_CLK_CTRL__S7_TOP__MASKFULL 0xCFCF03000F3F3F0FULL +#define ROGUE_CR_CLK_CTRL_MASKFULL 0xFFFFFF003F3FFFFFULL +#define ROGUE_CR_CLK_CTRL_BIF_TEXAS_SHIFT 62U +#define ROGUE_CR_CLK_CTRL_BIF_TEXAS_CLRMSK 0x3FFFFFFFFFFFFFFFULL +#define ROGUE_CR_CLK_CTRL_BIF_TEXAS_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_BIF_TEXAS_ON 0x4000000000000000ULL +#define ROGUE_CR_CLK_CTRL_BIF_TEXAS_AUTO 0x8000000000000000ULL +#define ROGUE_CR_CLK_CTRL_IPP_SHIFT 60U +#define ROGUE_CR_CLK_CTRL_IPP_CLRMSK 0xCFFFFFFFFFFFFFFFULL +#define ROGUE_CR_CLK_CTRL_IPP_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_IPP_ON 0x1000000000000000ULL +#define ROGUE_CR_CLK_CTRL_IPP_AUTO 0x2000000000000000ULL +#define ROGUE_CR_CLK_CTRL_FBC_SHIFT 58U +#define ROGUE_CR_CLK_CTRL_FBC_CLRMSK 0xF3FFFFFFFFFFFFFFULL +#define ROGUE_CR_CLK_CTRL_FBC_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_FBC_ON 0x0400000000000000ULL +#define ROGUE_CR_CLK_CTRL_FBC_AUTO 0x0800000000000000ULL +#define ROGUE_CR_CLK_CTRL_FBDC_SHIFT 56U +#define ROGUE_CR_CLK_CTRL_FBDC_CLRMSK 0xFCFFFFFFFFFFFFFFULL +#define ROGUE_CR_CLK_CTRL_FBDC_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_FBDC_ON 0x0100000000000000ULL +#define ROGUE_CR_CLK_CTRL_FBDC_AUTO 0x0200000000000000ULL +#define ROGUE_CR_CLK_CTRL_FB_TLCACHE_SHIFT 54U +#define ROGUE_CR_CLK_CTRL_FB_TLCACHE_CLRMSK 0xFF3FFFFFFFFFFFFFULL +#define ROGUE_CR_CLK_CTRL_FB_TLCACHE_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_FB_TLCACHE_ON 0x0040000000000000ULL +#define ROGUE_CR_CLK_CTRL_FB_TLCACHE_AUTO 0x0080000000000000ULL +#define ROGUE_CR_CLK_CTRL_USCS_SHIFT 52U +#define ROGUE_CR_CLK_CTRL_USCS_CLRMSK 0xFFCFFFFFFFFFFFFFULL +#define ROGUE_CR_CLK_CTRL_USCS_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_USCS_ON 0x0010000000000000ULL +#define ROGUE_CR_CLK_CTRL_USCS_AUTO 0x0020000000000000ULL +#define ROGUE_CR_CLK_CTRL_PBE_SHIFT 50U +#define ROGUE_CR_CLK_CTRL_PBE_CLRMSK 0xFFF3FFFFFFFFFFFFULL +#define ROGUE_CR_CLK_CTRL_PBE_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_PBE_ON 0x0004000000000000ULL +#define ROGUE_CR_CLK_CTRL_PBE_AUTO 0x0008000000000000ULL +#define ROGUE_CR_CLK_CTRL_MCU_L1_SHIFT 48U +#define ROGUE_CR_CLK_CTRL_MCU_L1_CLRMSK 0xFFFCFFFFFFFFFFFFULL +#define ROGUE_CR_CLK_CTRL_MCU_L1_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_MCU_L1_ON 0x0001000000000000ULL +#define ROGUE_CR_CLK_CTRL_MCU_L1_AUTO 0x0002000000000000ULL +#define ROGUE_CR_CLK_CTRL_CDM_SHIFT 46U +#define ROGUE_CR_CLK_CTRL_CDM_CLRMSK 0xFFFF3FFFFFFFFFFFULL +#define ROGUE_CR_CLK_CTRL_CDM_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_CDM_ON 0x0000400000000000ULL +#define ROGUE_CR_CLK_CTRL_CDM_AUTO 0x0000800000000000ULL +#define ROGUE_CR_CLK_CTRL_SIDEKICK_SHIFT 44U +#define ROGUE_CR_CLK_CTRL_SIDEKICK_CLRMSK 0xFFFFCFFFFFFFFFFFULL +#define ROGUE_CR_CLK_CTRL_SIDEKICK_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_SIDEKICK_ON 0x0000100000000000ULL +#define ROGUE_CR_CLK_CTRL_SIDEKICK_AUTO 0x0000200000000000ULL +#define ROGUE_CR_CLK_CTRL_BIF_SIDEKICK_SHIFT 42U +#define ROGUE_CR_CLK_CTRL_BIF_SIDEKICK_CLRMSK 0xFFFFF3FFFFFFFFFFULL +#define ROGUE_CR_CLK_CTRL_BIF_SIDEKICK_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_BIF_SIDEKICK_ON 0x0000040000000000ULL +#define ROGUE_CR_CLK_CTRL_BIF_SIDEKICK_AUTO 0x0000080000000000ULL +#define ROGUE_CR_CLK_CTRL_BIF_SHIFT 40U +#define ROGUE_CR_CLK_CTRL_BIF_CLRMSK 0xFFFFFCFFFFFFFFFFULL +#define ROGUE_CR_CLK_CTRL_BIF_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_BIF_ON 0x0000010000000000ULL +#define ROGUE_CR_CLK_CTRL_BIF_AUTO 0x0000020000000000ULL +#define ROGUE_CR_CLK_CTRL_TPU_MCU_DEMUX_SHIFT 28U +#define ROGUE_CR_CLK_CTRL_TPU_MCU_DEMUX_CLRMSK 0xFFFFFFFFCFFFFFFFULL +#define ROGUE_CR_CLK_CTRL_TPU_MCU_DEMUX_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_TPU_MCU_DEMUX_ON 0x0000000010000000ULL +#define ROGUE_CR_CLK_CTRL_TPU_MCU_DEMUX_AUTO 0x0000000020000000ULL +#define ROGUE_CR_CLK_CTRL_MCU_L0_SHIFT 26U +#define ROGUE_CR_CLK_CTRL_MCU_L0_CLRMSK 0xFFFFFFFFF3FFFFFFULL +#define ROGUE_CR_CLK_CTRL_MCU_L0_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_MCU_L0_ON 0x0000000004000000ULL +#define ROGUE_CR_CLK_CTRL_MCU_L0_AUTO 0x0000000008000000ULL +#define ROGUE_CR_CLK_CTRL_TPU_SHIFT 24U +#define ROGUE_CR_CLK_CTRL_TPU_CLRMSK 0xFFFFFFFFFCFFFFFFULL +#define ROGUE_CR_CLK_CTRL_TPU_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_TPU_ON 0x0000000001000000ULL +#define ROGUE_CR_CLK_CTRL_TPU_AUTO 0x0000000002000000ULL +#define ROGUE_CR_CLK_CTRL_USC_SHIFT 20U +#define ROGUE_CR_CLK_CTRL_USC_CLRMSK 0xFFFFFFFFFFCFFFFFULL +#define ROGUE_CR_CLK_CTRL_USC_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_USC_ON 0x0000000000100000ULL +#define ROGUE_CR_CLK_CTRL_USC_AUTO 0x0000000000200000ULL +#define ROGUE_CR_CLK_CTRL_TLA_SHIFT 18U +#define ROGUE_CR_CLK_CTRL_TLA_CLRMSK 0xFFFFFFFFFFF3FFFFULL +#define ROGUE_CR_CLK_CTRL_TLA_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_TLA_ON 0x0000000000040000ULL +#define ROGUE_CR_CLK_CTRL_TLA_AUTO 0x0000000000080000ULL +#define ROGUE_CR_CLK_CTRL_SLC_SHIFT 16U +#define ROGUE_CR_CLK_CTRL_SLC_CLRMSK 0xFFFFFFFFFFFCFFFFULL +#define ROGUE_CR_CLK_CTRL_SLC_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_SLC_ON 0x0000000000010000ULL +#define ROGUE_CR_CLK_CTRL_SLC_AUTO 0x0000000000020000ULL +#define ROGUE_CR_CLK_CTRL_UVS_SHIFT 14U +#define ROGUE_CR_CLK_CTRL_UVS_CLRMSK 0xFFFFFFFFFFFF3FFFULL +#define ROGUE_CR_CLK_CTRL_UVS_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_UVS_ON 0x0000000000004000ULL +#define ROGUE_CR_CLK_CTRL_UVS_AUTO 0x0000000000008000ULL +#define ROGUE_CR_CLK_CTRL_PDS_SHIFT 12U +#define ROGUE_CR_CLK_CTRL_PDS_CLRMSK 0xFFFFFFFFFFFFCFFFULL +#define ROGUE_CR_CLK_CTRL_PDS_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_PDS_ON 0x0000000000001000ULL +#define ROGUE_CR_CLK_CTRL_PDS_AUTO 0x0000000000002000ULL +#define ROGUE_CR_CLK_CTRL_VDM_SHIFT 10U +#define ROGUE_CR_CLK_CTRL_VDM_CLRMSK 0xFFFFFFFFFFFFF3FFULL +#define ROGUE_CR_CLK_CTRL_VDM_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_VDM_ON 0x0000000000000400ULL +#define ROGUE_CR_CLK_CTRL_VDM_AUTO 0x0000000000000800ULL +#define ROGUE_CR_CLK_CTRL_PM_SHIFT 8U +#define ROGUE_CR_CLK_CTRL_PM_CLRMSK 0xFFFFFFFFFFFFFCFFULL +#define ROGUE_CR_CLK_CTRL_PM_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_PM_ON 0x0000000000000100ULL +#define ROGUE_CR_CLK_CTRL_PM_AUTO 0x0000000000000200ULL +#define ROGUE_CR_CLK_CTRL_GPP_SHIFT 6U +#define ROGUE_CR_CLK_CTRL_GPP_CLRMSK 0xFFFFFFFFFFFFFF3FULL +#define ROGUE_CR_CLK_CTRL_GPP_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_GPP_ON 0x0000000000000040ULL +#define ROGUE_CR_CLK_CTRL_GPP_AUTO 0x0000000000000080ULL +#define ROGUE_CR_CLK_CTRL_TE_SHIFT 4U +#define ROGUE_CR_CLK_CTRL_TE_CLRMSK 0xFFFFFFFFFFFFFFCFULL +#define ROGUE_CR_CLK_CTRL_TE_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_TE_ON 0x0000000000000010ULL +#define ROGUE_CR_CLK_CTRL_TE_AUTO 0x0000000000000020ULL +#define ROGUE_CR_CLK_CTRL_TSP_SHIFT 2U +#define ROGUE_CR_CLK_CTRL_TSP_CLRMSK 0xFFFFFFFFFFFFFFF3ULL +#define ROGUE_CR_CLK_CTRL_TSP_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_TSP_ON 0x0000000000000004ULL +#define ROGUE_CR_CLK_CTRL_TSP_AUTO 0x0000000000000008ULL +#define ROGUE_CR_CLK_CTRL_ISP_SHIFT 0U +#define ROGUE_CR_CLK_CTRL_ISP_CLRMSK 0xFFFFFFFFFFFFFFFCULL +#define ROGUE_CR_CLK_CTRL_ISP_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL_ISP_ON 0x0000000000000001ULL +#define ROGUE_CR_CLK_CTRL_ISP_AUTO 0x0000000000000002ULL + +/* Register ROGUE_CR_CLK_STATUS */ +#define ROGUE_CR_CLK_STATUS 0x0008U +#define ROGUE_CR_CLK_STATUS__PBE2_XE__MASKFULL 0x00000001FFF077FFULL +#define ROGUE_CR_CLK_STATUS__S7_TOP__MASKFULL 0x00000001B3101773ULL +#define ROGUE_CR_CLK_STATUS_MASKFULL 0x00000001FFF077FFULL +#define ROGUE_CR_CLK_STATUS_MCU_FBTC_SHIFT 32U +#define ROGUE_CR_CLK_STATUS_MCU_FBTC_CLRMSK 0xFFFFFFFEFFFFFFFFULL +#define ROGUE_CR_CLK_STATUS_MCU_FBTC_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_MCU_FBTC_RUNNING 0x0000000100000000ULL +#define ROGUE_CR_CLK_STATUS_BIF_TEXAS_SHIFT 31U +#define ROGUE_CR_CLK_STATUS_BIF_TEXAS_CLRMSK 0xFFFFFFFF7FFFFFFFULL +#define ROGUE_CR_CLK_STATUS_BIF_TEXAS_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_BIF_TEXAS_RUNNING 0x0000000080000000ULL +#define ROGUE_CR_CLK_STATUS_IPP_SHIFT 30U +#define ROGUE_CR_CLK_STATUS_IPP_CLRMSK 0xFFFFFFFFBFFFFFFFULL +#define ROGUE_CR_CLK_STATUS_IPP_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_IPP_RUNNING 0x0000000040000000ULL +#define ROGUE_CR_CLK_STATUS_FBC_SHIFT 29U +#define ROGUE_CR_CLK_STATUS_FBC_CLRMSK 0xFFFFFFFFDFFFFFFFULL +#define ROGUE_CR_CLK_STATUS_FBC_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_FBC_RUNNING 0x0000000020000000ULL +#define ROGUE_CR_CLK_STATUS_FBDC_SHIFT 28U +#define ROGUE_CR_CLK_STATUS_FBDC_CLRMSK 0xFFFFFFFFEFFFFFFFULL +#define ROGUE_CR_CLK_STATUS_FBDC_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_FBDC_RUNNING 0x0000000010000000ULL +#define ROGUE_CR_CLK_STATUS_FB_TLCACHE_SHIFT 27U +#define ROGUE_CR_CLK_STATUS_FB_TLCACHE_CLRMSK 0xFFFFFFFFF7FFFFFFULL +#define ROGUE_CR_CLK_STATUS_FB_TLCACHE_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_FB_TLCACHE_RUNNING 0x0000000008000000ULL +#define ROGUE_CR_CLK_STATUS_USCS_SHIFT 26U +#define ROGUE_CR_CLK_STATUS_USCS_CLRMSK 0xFFFFFFFFFBFFFFFFULL +#define ROGUE_CR_CLK_STATUS_USCS_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_USCS_RUNNING 0x0000000004000000ULL +#define ROGUE_CR_CLK_STATUS_PBE_SHIFT 25U +#define ROGUE_CR_CLK_STATUS_PBE_CLRMSK 0xFFFFFFFFFDFFFFFFULL +#define ROGUE_CR_CLK_STATUS_PBE_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_PBE_RUNNING 0x0000000002000000ULL +#define ROGUE_CR_CLK_STATUS_MCU_L1_SHIFT 24U +#define ROGUE_CR_CLK_STATUS_MCU_L1_CLRMSK 0xFFFFFFFFFEFFFFFFULL +#define ROGUE_CR_CLK_STATUS_MCU_L1_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_MCU_L1_RUNNING 0x0000000001000000ULL +#define ROGUE_CR_CLK_STATUS_CDM_SHIFT 23U +#define ROGUE_CR_CLK_STATUS_CDM_CLRMSK 0xFFFFFFFFFF7FFFFFULL +#define ROGUE_CR_CLK_STATUS_CDM_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_CDM_RUNNING 0x0000000000800000ULL +#define ROGUE_CR_CLK_STATUS_SIDEKICK_SHIFT 22U +#define ROGUE_CR_CLK_STATUS_SIDEKICK_CLRMSK 0xFFFFFFFFFFBFFFFFULL +#define ROGUE_CR_CLK_STATUS_SIDEKICK_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_SIDEKICK_RUNNING 0x0000000000400000ULL +#define ROGUE_CR_CLK_STATUS_BIF_SIDEKICK_SHIFT 21U +#define ROGUE_CR_CLK_STATUS_BIF_SIDEKICK_CLRMSK 0xFFFFFFFFFFDFFFFFULL +#define ROGUE_CR_CLK_STATUS_BIF_SIDEKICK_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_BIF_SIDEKICK_RUNNING 0x0000000000200000ULL +#define ROGUE_CR_CLK_STATUS_BIF_SHIFT 20U +#define ROGUE_CR_CLK_STATUS_BIF_CLRMSK 0xFFFFFFFFFFEFFFFFULL +#define ROGUE_CR_CLK_STATUS_BIF_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_BIF_RUNNING 0x0000000000100000ULL +#define ROGUE_CR_CLK_STATUS_TPU_MCU_DEMUX_SHIFT 14U +#define ROGUE_CR_CLK_STATUS_TPU_MCU_DEMUX_CLRMSK 0xFFFFFFFFFFFFBFFFULL +#define ROGUE_CR_CLK_STATUS_TPU_MCU_DEMUX_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_TPU_MCU_DEMUX_RUNNING 0x0000000000004000ULL +#define ROGUE_CR_CLK_STATUS_MCU_L0_SHIFT 13U +#define ROGUE_CR_CLK_STATUS_MCU_L0_CLRMSK 0xFFFFFFFFFFFFDFFFULL +#define ROGUE_CR_CLK_STATUS_MCU_L0_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_MCU_L0_RUNNING 0x0000000000002000ULL +#define ROGUE_CR_CLK_STATUS_TPU_SHIFT 12U +#define ROGUE_CR_CLK_STATUS_TPU_CLRMSK 0xFFFFFFFFFFFFEFFFULL +#define ROGUE_CR_CLK_STATUS_TPU_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_TPU_RUNNING 0x0000000000001000ULL +#define ROGUE_CR_CLK_STATUS_USC_SHIFT 10U +#define ROGUE_CR_CLK_STATUS_USC_CLRMSK 0xFFFFFFFFFFFFFBFFULL +#define ROGUE_CR_CLK_STATUS_USC_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_USC_RUNNING 0x0000000000000400ULL +#define ROGUE_CR_CLK_STATUS_TLA_SHIFT 9U +#define ROGUE_CR_CLK_STATUS_TLA_CLRMSK 0xFFFFFFFFFFFFFDFFULL +#define ROGUE_CR_CLK_STATUS_TLA_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_TLA_RUNNING 0x0000000000000200ULL +#define ROGUE_CR_CLK_STATUS_SLC_SHIFT 8U +#define ROGUE_CR_CLK_STATUS_SLC_CLRMSK 0xFFFFFFFFFFFFFEFFULL +#define ROGUE_CR_CLK_STATUS_SLC_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_SLC_RUNNING 0x0000000000000100ULL +#define ROGUE_CR_CLK_STATUS_UVS_SHIFT 7U +#define ROGUE_CR_CLK_STATUS_UVS_CLRMSK 0xFFFFFFFFFFFFFF7FULL +#define ROGUE_CR_CLK_STATUS_UVS_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_UVS_RUNNING 0x0000000000000080ULL +#define ROGUE_CR_CLK_STATUS_PDS_SHIFT 6U +#define ROGUE_CR_CLK_STATUS_PDS_CLRMSK 0xFFFFFFFFFFFFFFBFULL +#define ROGUE_CR_CLK_STATUS_PDS_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_PDS_RUNNING 0x0000000000000040ULL +#define ROGUE_CR_CLK_STATUS_VDM_SHIFT 5U +#define ROGUE_CR_CLK_STATUS_VDM_CLRMSK 0xFFFFFFFFFFFFFFDFULL +#define ROGUE_CR_CLK_STATUS_VDM_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_VDM_RUNNING 0x0000000000000020ULL +#define ROGUE_CR_CLK_STATUS_PM_SHIFT 4U +#define ROGUE_CR_CLK_STATUS_PM_CLRMSK 0xFFFFFFFFFFFFFFEFULL +#define ROGUE_CR_CLK_STATUS_PM_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_PM_RUNNING 0x0000000000000010ULL +#define ROGUE_CR_CLK_STATUS_GPP_SHIFT 3U +#define ROGUE_CR_CLK_STATUS_GPP_CLRMSK 0xFFFFFFFFFFFFFFF7ULL +#define ROGUE_CR_CLK_STATUS_GPP_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_GPP_RUNNING 0x0000000000000008ULL +#define ROGUE_CR_CLK_STATUS_TE_SHIFT 2U +#define ROGUE_CR_CLK_STATUS_TE_CLRMSK 0xFFFFFFFFFFFFFFFBULL +#define ROGUE_CR_CLK_STATUS_TE_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_TE_RUNNING 0x0000000000000004ULL +#define ROGUE_CR_CLK_STATUS_TSP_SHIFT 1U +#define ROGUE_CR_CLK_STATUS_TSP_CLRMSK 0xFFFFFFFFFFFFFFFDULL +#define ROGUE_CR_CLK_STATUS_TSP_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_TSP_RUNNING 0x0000000000000002ULL +#define ROGUE_CR_CLK_STATUS_ISP_SHIFT 0U +#define ROGUE_CR_CLK_STATUS_ISP_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_CLK_STATUS_ISP_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS_ISP_RUNNING 0x0000000000000001ULL + +/* Register ROGUE_CR_CORE_ID */ +#define ROGUE_CR_CORE_ID__PBVNC 0x0020U +#define ROGUE_CR_CORE_ID__PBVNC__MASKFULL 0xFFFFFFFFFFFFFFFFULL +#define ROGUE_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT 48U +#define ROGUE_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK 0x0000FFFFFFFFFFFFULL +#define ROGUE_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT 32U +#define ROGUE_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK 0xFFFF0000FFFFFFFFULL +#define ROGUE_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT 16U +#define ROGUE_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK 0xFFFFFFFF0000FFFFULL +#define ROGUE_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT 0U +#define ROGUE_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK 0xFFFFFFFFFFFF0000ULL + +/* Register ROGUE_CR_CORE_ID */ +#define ROGUE_CR_CORE_ID 0x0018U +#define ROGUE_CR_CORE_ID_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_CORE_ID_ID_SHIFT 16U +#define ROGUE_CR_CORE_ID_ID_CLRMSK 0x0000FFFFU +#define ROGUE_CR_CORE_ID_CONFIG_SHIFT 0U +#define ROGUE_CR_CORE_ID_CONFIG_CLRMSK 0xFFFF0000U + +/* Register ROGUE_CR_CORE_REVISION */ +#define ROGUE_CR_CORE_REVISION 0x0020U +#define ROGUE_CR_CORE_REVISION_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_CORE_REVISION_DESIGNER_SHIFT 24U +#define ROGUE_CR_CORE_REVISION_DESIGNER_CLRMSK 0x00FFFFFFU +#define ROGUE_CR_CORE_REVISION_MAJOR_SHIFT 16U +#define ROGUE_CR_CORE_REVISION_MAJOR_CLRMSK 0xFF00FFFFU +#define ROGUE_CR_CORE_REVISION_MINOR_SHIFT 8U +#define ROGUE_CR_CORE_REVISION_MINOR_CLRMSK 0xFFFF00FFU +#define ROGUE_CR_CORE_REVISION_MAINTENANCE_SHIFT 0U +#define ROGUE_CR_CORE_REVISION_MAINTENANCE_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_DESIGNER_REV_FIELD1 */ +#define ROGUE_CR_DESIGNER_REV_FIELD1 0x0028U +#define ROGUE_CR_DESIGNER_REV_FIELD1_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0U +#define ROGUE_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_CLRMSK 0x00000000U + +/* Register ROGUE_CR_DESIGNER_REV_FIELD2 */ +#define ROGUE_CR_DESIGNER_REV_FIELD2 0x0030U +#define ROGUE_CR_DESIGNER_REV_FIELD2_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0U +#define ROGUE_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_CLRMSK 0x00000000U + +/* Register ROGUE_CR_CHANGESET_NUMBER */ +#define ROGUE_CR_CHANGESET_NUMBER 0x0040U +#define ROGUE_CR_CHANGESET_NUMBER_MASKFULL 0xFFFFFFFFFFFFFFFFULL +#define ROGUE_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_SHIFT 0U +#define ROGUE_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_CLRMSK 0x0000000000000000ULL + +/* Register ROGUE_CR_CLK_XTPLUS_CTRL */ +#define ROGUE_CR_CLK_XTPLUS_CTRL 0x0080U +#define ROGUE_CR_CLK_XTPLUS_CTRL_MASKFULL 0x0000003FFFFF0000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_TDM_SHIFT 36U +#define ROGUE_CR_CLK_XTPLUS_CTRL_TDM_CLRMSK 0xFFFFFFCFFFFFFFFFULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_TDM_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_TDM_ON 0x0000001000000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_TDM_AUTO 0x0000002000000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_ASTC_SHIFT 34U +#define ROGUE_CR_CLK_XTPLUS_CTRL_ASTC_CLRMSK 0xFFFFFFF3FFFFFFFFULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_ASTC_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_ASTC_ON 0x0000000400000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_ASTC_AUTO 0x0000000800000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_IPF_SHIFT 32U +#define ROGUE_CR_CLK_XTPLUS_CTRL_IPF_CLRMSK 0xFFFFFFFCFFFFFFFFULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_IPF_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_IPF_ON 0x0000000100000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_IPF_AUTO 0x0000000200000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_COMPUTE_SHIFT 30U +#define ROGUE_CR_CLK_XTPLUS_CTRL_COMPUTE_CLRMSK 0xFFFFFFFF3FFFFFFFULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_COMPUTE_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_COMPUTE_ON 0x0000000040000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_COMPUTE_AUTO 0x0000000080000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_PIXEL_SHIFT 28U +#define ROGUE_CR_CLK_XTPLUS_CTRL_PIXEL_CLRMSK 0xFFFFFFFFCFFFFFFFULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_PIXEL_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_PIXEL_ON 0x0000000010000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_PIXEL_AUTO 0x0000000020000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_VERTEX_SHIFT 26U +#define ROGUE_CR_CLK_XTPLUS_CTRL_VERTEX_CLRMSK 0xFFFFFFFFF3FFFFFFULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_VERTEX_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_VERTEX_ON 0x0000000004000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_VERTEX_AUTO 0x0000000008000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_USCPS_SHIFT 24U +#define ROGUE_CR_CLK_XTPLUS_CTRL_USCPS_CLRMSK 0xFFFFFFFFFCFFFFFFULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_USCPS_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_USCPS_ON 0x0000000001000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_USCPS_AUTO 0x0000000002000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_PDS_SHARED_SHIFT 22U +#define ROGUE_CR_CLK_XTPLUS_CTRL_PDS_SHARED_CLRMSK 0xFFFFFFFFFF3FFFFFULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_PDS_SHARED_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_PDS_SHARED_ON 0x0000000000400000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_PDS_SHARED_AUTO 0x0000000000800000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_SHIFT 20U +#define ROGUE_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_CLRMSK 0xFFFFFFFFFFCFFFFFULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_ON 0x0000000000100000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_AUTO 0x0000000000200000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_USC_SHARED_SHIFT 18U +#define ROGUE_CR_CLK_XTPLUS_CTRL_USC_SHARED_CLRMSK 0xFFFFFFFFFFF3FFFFULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_USC_SHARED_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_USC_SHARED_ON 0x0000000000040000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_USC_SHARED_AUTO 0x0000000000080000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_GEOMETRY_SHIFT 16U +#define ROGUE_CR_CLK_XTPLUS_CTRL_GEOMETRY_CLRMSK 0xFFFFFFFFFFFCFFFFULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_GEOMETRY_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_GEOMETRY_ON 0x0000000000010000ULL +#define ROGUE_CR_CLK_XTPLUS_CTRL_GEOMETRY_AUTO 0x0000000000020000ULL + +/* Register ROGUE_CR_CLK_XTPLUS_STATUS */ +#define ROGUE_CR_CLK_XTPLUS_STATUS 0x0088U +#define ROGUE_CR_CLK_XTPLUS_STATUS_MASKFULL 0x00000000000007FFULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_TDM_SHIFT 10U +#define ROGUE_CR_CLK_XTPLUS_STATUS_TDM_CLRMSK 0xFFFFFFFFFFFFFBFFULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_TDM_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_TDM_RUNNING 0x0000000000000400ULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_IPF_SHIFT 9U +#define ROGUE_CR_CLK_XTPLUS_STATUS_IPF_CLRMSK 0xFFFFFFFFFFFFFDFFULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_IPF_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_IPF_RUNNING 0x0000000000000200ULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_COMPUTE_SHIFT 8U +#define ROGUE_CR_CLK_XTPLUS_STATUS_COMPUTE_CLRMSK 0xFFFFFFFFFFFFFEFFULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_COMPUTE_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_COMPUTE_RUNNING 0x0000000000000100ULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_ASTC_SHIFT 7U +#define ROGUE_CR_CLK_XTPLUS_STATUS_ASTC_CLRMSK 0xFFFFFFFFFFFFFF7FULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_ASTC_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_ASTC_RUNNING 0x0000000000000080ULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_PIXEL_SHIFT 6U +#define ROGUE_CR_CLK_XTPLUS_STATUS_PIXEL_CLRMSK 0xFFFFFFFFFFFFFFBFULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_PIXEL_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_PIXEL_RUNNING 0x0000000000000040ULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_VERTEX_SHIFT 5U +#define ROGUE_CR_CLK_XTPLUS_STATUS_VERTEX_CLRMSK 0xFFFFFFFFFFFFFFDFULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_VERTEX_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_VERTEX_RUNNING 0x0000000000000020ULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_USCPS_SHIFT 4U +#define ROGUE_CR_CLK_XTPLUS_STATUS_USCPS_CLRMSK 0xFFFFFFFFFFFFFFEFULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_USCPS_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_USCPS_RUNNING 0x0000000000000010ULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_PDS_SHARED_SHIFT 3U +#define ROGUE_CR_CLK_XTPLUS_STATUS_PDS_SHARED_CLRMSK 0xFFFFFFFFFFFFFFF7ULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_PDS_SHARED_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_PDS_SHARED_RUNNING 0x0000000000000008ULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_SHIFT 2U +#define ROGUE_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_CLRMSK 0xFFFFFFFFFFFFFFFBULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_RUNNING 0x0000000000000004ULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_USC_SHARED_SHIFT 1U +#define ROGUE_CR_CLK_XTPLUS_STATUS_USC_SHARED_CLRMSK 0xFFFFFFFFFFFFFFFDULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_USC_SHARED_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_USC_SHARED_RUNNING 0x0000000000000002ULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_GEOMETRY_SHIFT 0U +#define ROGUE_CR_CLK_XTPLUS_STATUS_GEOMETRY_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_GEOMETRY_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_XTPLUS_STATUS_GEOMETRY_RUNNING 0x0000000000000001ULL + +/* Register ROGUE_CR_SOFT_RESET */ +#define ROGUE_CR_SOFT_RESET 0x0100U +#define ROGUE_CR_SOFT_RESET__PBE2_XE__MASKFULL 0xFFEFFFFFFFFFFC3DULL +#define ROGUE_CR_SOFT_RESET_MASKFULL 0x00E7FFFFFFFFFC3DULL +#define ROGUE_CR_SOFT_RESET_PHANTOM3_CORE_SHIFT 63U +#define ROGUE_CR_SOFT_RESET_PHANTOM3_CORE_CLRMSK 0x7FFFFFFFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_PHANTOM3_CORE_EN 0x8000000000000000ULL +#define ROGUE_CR_SOFT_RESET_PHANTOM2_CORE_SHIFT 62U +#define ROGUE_CR_SOFT_RESET_PHANTOM2_CORE_CLRMSK 0xBFFFFFFFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_PHANTOM2_CORE_EN 0x4000000000000000ULL +#define ROGUE_CR_SOFT_RESET_BERNADO2_CORE_SHIFT 61U +#define ROGUE_CR_SOFT_RESET_BERNADO2_CORE_CLRMSK 0xDFFFFFFFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_BERNADO2_CORE_EN 0x2000000000000000ULL +#define ROGUE_CR_SOFT_RESET_JONES_CORE_SHIFT 60U +#define ROGUE_CR_SOFT_RESET_JONES_CORE_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_JONES_CORE_EN 0x1000000000000000ULL +#define ROGUE_CR_SOFT_RESET_TILING_CORE_SHIFT 59U +#define ROGUE_CR_SOFT_RESET_TILING_CORE_CLRMSK 0xF7FFFFFFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_TILING_CORE_EN 0x0800000000000000ULL +#define ROGUE_CR_SOFT_RESET_TE3_SHIFT 58U +#define ROGUE_CR_SOFT_RESET_TE3_CLRMSK 0xFBFFFFFFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_TE3_EN 0x0400000000000000ULL +#define ROGUE_CR_SOFT_RESET_VCE_SHIFT 57U +#define ROGUE_CR_SOFT_RESET_VCE_CLRMSK 0xFDFFFFFFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_VCE_EN 0x0200000000000000ULL +#define ROGUE_CR_SOFT_RESET_VBS_SHIFT 56U +#define ROGUE_CR_SOFT_RESET_VBS_CLRMSK 0xFEFFFFFFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_VBS_EN 0x0100000000000000ULL +#define ROGUE_CR_SOFT_RESET_DPX1_CORE_SHIFT 55U +#define ROGUE_CR_SOFT_RESET_DPX1_CORE_CLRMSK 0xFF7FFFFFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_DPX1_CORE_EN 0x0080000000000000ULL +#define ROGUE_CR_SOFT_RESET_DPX0_CORE_SHIFT 54U +#define ROGUE_CR_SOFT_RESET_DPX0_CORE_CLRMSK 0xFFBFFFFFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_DPX0_CORE_EN 0x0040000000000000ULL +#define ROGUE_CR_SOFT_RESET_FBA_SHIFT 53U +#define ROGUE_CR_SOFT_RESET_FBA_CLRMSK 0xFFDFFFFFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_FBA_EN 0x0020000000000000ULL +#define ROGUE_CR_SOFT_RESET_FB_CDC_SHIFT 51U +#define ROGUE_CR_SOFT_RESET_FB_CDC_CLRMSK 0xFFF7FFFFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_FB_CDC_EN 0x0008000000000000ULL +#define ROGUE_CR_SOFT_RESET_SH_SHIFT 50U +#define ROGUE_CR_SOFT_RESET_SH_CLRMSK 0xFFFBFFFFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_SH_EN 0x0004000000000000ULL +#define ROGUE_CR_SOFT_RESET_VRDM_SHIFT 49U +#define ROGUE_CR_SOFT_RESET_VRDM_CLRMSK 0xFFFDFFFFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_VRDM_EN 0x0002000000000000ULL +#define ROGUE_CR_SOFT_RESET_MCU_FBTC_SHIFT 48U +#define ROGUE_CR_SOFT_RESET_MCU_FBTC_CLRMSK 0xFFFEFFFFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_MCU_FBTC_EN 0x0001000000000000ULL +#define ROGUE_CR_SOFT_RESET_PHANTOM1_CORE_SHIFT 47U +#define ROGUE_CR_SOFT_RESET_PHANTOM1_CORE_CLRMSK 0xFFFF7FFFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_PHANTOM1_CORE_EN 0x0000800000000000ULL +#define ROGUE_CR_SOFT_RESET_PHANTOM0_CORE_SHIFT 46U +#define ROGUE_CR_SOFT_RESET_PHANTOM0_CORE_CLRMSK 0xFFFFBFFFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_PHANTOM0_CORE_EN 0x0000400000000000ULL +#define ROGUE_CR_SOFT_RESET_BERNADO1_CORE_SHIFT 45U +#define ROGUE_CR_SOFT_RESET_BERNADO1_CORE_CLRMSK 0xFFFFDFFFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_BERNADO1_CORE_EN 0x0000200000000000ULL +#define ROGUE_CR_SOFT_RESET_BERNADO0_CORE_SHIFT 44U +#define ROGUE_CR_SOFT_RESET_BERNADO0_CORE_CLRMSK 0xFFFFEFFFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_BERNADO0_CORE_EN 0x0000100000000000ULL +#define ROGUE_CR_SOFT_RESET_IPP_SHIFT 43U +#define ROGUE_CR_SOFT_RESET_IPP_CLRMSK 0xFFFFF7FFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_IPP_EN 0x0000080000000000ULL +#define ROGUE_CR_SOFT_RESET_BIF_TEXAS_SHIFT 42U +#define ROGUE_CR_SOFT_RESET_BIF_TEXAS_CLRMSK 0xFFFFFBFFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_BIF_TEXAS_EN 0x0000040000000000ULL +#define ROGUE_CR_SOFT_RESET_TORNADO_CORE_SHIFT 41U +#define ROGUE_CR_SOFT_RESET_TORNADO_CORE_CLRMSK 0xFFFFFDFFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_TORNADO_CORE_EN 0x0000020000000000ULL +#define ROGUE_CR_SOFT_RESET_DUST_H_CORE_SHIFT 40U +#define ROGUE_CR_SOFT_RESET_DUST_H_CORE_CLRMSK 0xFFFFFEFFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_DUST_H_CORE_EN 0x0000010000000000ULL +#define ROGUE_CR_SOFT_RESET_DUST_G_CORE_SHIFT 39U +#define ROGUE_CR_SOFT_RESET_DUST_G_CORE_CLRMSK 0xFFFFFF7FFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_DUST_G_CORE_EN 0x0000008000000000ULL +#define ROGUE_CR_SOFT_RESET_DUST_F_CORE_SHIFT 38U +#define ROGUE_CR_SOFT_RESET_DUST_F_CORE_CLRMSK 0xFFFFFFBFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_DUST_F_CORE_EN 0x0000004000000000ULL +#define ROGUE_CR_SOFT_RESET_DUST_E_CORE_SHIFT 37U +#define ROGUE_CR_SOFT_RESET_DUST_E_CORE_CLRMSK 0xFFFFFFDFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_DUST_E_CORE_EN 0x0000002000000000ULL +#define ROGUE_CR_SOFT_RESET_DUST_D_CORE_SHIFT 36U +#define ROGUE_CR_SOFT_RESET_DUST_D_CORE_CLRMSK 0xFFFFFFEFFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_DUST_D_CORE_EN 0x0000001000000000ULL +#define ROGUE_CR_SOFT_RESET_DUST_C_CORE_SHIFT 35U +#define ROGUE_CR_SOFT_RESET_DUST_C_CORE_CLRMSK 0xFFFFFFF7FFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_DUST_C_CORE_EN 0x0000000800000000ULL +#define ROGUE_CR_SOFT_RESET_MMU_SHIFT 34U +#define ROGUE_CR_SOFT_RESET_MMU_CLRMSK 0xFFFFFFFBFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_MMU_EN 0x0000000400000000ULL +#define ROGUE_CR_SOFT_RESET_BIF1_SHIFT 33U +#define ROGUE_CR_SOFT_RESET_BIF1_CLRMSK 0xFFFFFFFDFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_BIF1_EN 0x0000000200000000ULL +#define ROGUE_CR_SOFT_RESET_GARTEN_SHIFT 32U +#define ROGUE_CR_SOFT_RESET_GARTEN_CLRMSK 0xFFFFFFFEFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_GARTEN_EN 0x0000000100000000ULL +#define ROGUE_CR_SOFT_RESET_CPU_SHIFT 32U +#define ROGUE_CR_SOFT_RESET_CPU_CLRMSK 0xFFFFFFFEFFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_CPU_EN 0x0000000100000000ULL +#define ROGUE_CR_SOFT_RESET_RASCAL_CORE_SHIFT 31U +#define ROGUE_CR_SOFT_RESET_RASCAL_CORE_CLRMSK 0xFFFFFFFF7FFFFFFFULL +#define ROGUE_CR_SOFT_RESET_RASCAL_CORE_EN 0x0000000080000000ULL +#define ROGUE_CR_SOFT_RESET_DUST_B_CORE_SHIFT 30U +#define ROGUE_CR_SOFT_RESET_DUST_B_CORE_CLRMSK 0xFFFFFFFFBFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_DUST_B_CORE_EN 0x0000000040000000ULL +#define ROGUE_CR_SOFT_RESET_DUST_A_CORE_SHIFT 29U +#define ROGUE_CR_SOFT_RESET_DUST_A_CORE_CLRMSK 0xFFFFFFFFDFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_DUST_A_CORE_EN 0x0000000020000000ULL +#define ROGUE_CR_SOFT_RESET_FB_TLCACHE_SHIFT 28U +#define ROGUE_CR_SOFT_RESET_FB_TLCACHE_CLRMSK 0xFFFFFFFFEFFFFFFFULL +#define ROGUE_CR_SOFT_RESET_FB_TLCACHE_EN 0x0000000010000000ULL +#define ROGUE_CR_SOFT_RESET_SLC_SHIFT 27U +#define ROGUE_CR_SOFT_RESET_SLC_CLRMSK 0xFFFFFFFFF7FFFFFFULL +#define ROGUE_CR_SOFT_RESET_SLC_EN 0x0000000008000000ULL +#define ROGUE_CR_SOFT_RESET_TLA_SHIFT 26U +#define ROGUE_CR_SOFT_RESET_TLA_CLRMSK 0xFFFFFFFFFBFFFFFFULL +#define ROGUE_CR_SOFT_RESET_TLA_EN 0x0000000004000000ULL +#define ROGUE_CR_SOFT_RESET_UVS_SHIFT 25U +#define ROGUE_CR_SOFT_RESET_UVS_CLRMSK 0xFFFFFFFFFDFFFFFFULL +#define ROGUE_CR_SOFT_RESET_UVS_EN 0x0000000002000000ULL +#define ROGUE_CR_SOFT_RESET_TE_SHIFT 24U +#define ROGUE_CR_SOFT_RESET_TE_CLRMSK 0xFFFFFFFFFEFFFFFFULL +#define ROGUE_CR_SOFT_RESET_TE_EN 0x0000000001000000ULL +#define ROGUE_CR_SOFT_RESET_GPP_SHIFT 23U +#define ROGUE_CR_SOFT_RESET_GPP_CLRMSK 0xFFFFFFFFFF7FFFFFULL +#define ROGUE_CR_SOFT_RESET_GPP_EN 0x0000000000800000ULL +#define ROGUE_CR_SOFT_RESET_FBDC_SHIFT 22U +#define ROGUE_CR_SOFT_RESET_FBDC_CLRMSK 0xFFFFFFFFFFBFFFFFULL +#define ROGUE_CR_SOFT_RESET_FBDC_EN 0x0000000000400000ULL +#define ROGUE_CR_SOFT_RESET_FBC_SHIFT 21U +#define ROGUE_CR_SOFT_RESET_FBC_CLRMSK 0xFFFFFFFFFFDFFFFFULL +#define ROGUE_CR_SOFT_RESET_FBC_EN 0x0000000000200000ULL +#define ROGUE_CR_SOFT_RESET_PM_SHIFT 20U +#define ROGUE_CR_SOFT_RESET_PM_CLRMSK 0xFFFFFFFFFFEFFFFFULL +#define ROGUE_CR_SOFT_RESET_PM_EN 0x0000000000100000ULL +#define ROGUE_CR_SOFT_RESET_PBE_SHIFT 19U +#define ROGUE_CR_SOFT_RESET_PBE_CLRMSK 0xFFFFFFFFFFF7FFFFULL +#define ROGUE_CR_SOFT_RESET_PBE_EN 0x0000000000080000ULL +#define ROGUE_CR_SOFT_RESET_USC_SHARED_SHIFT 18U +#define ROGUE_CR_SOFT_RESET_USC_SHARED_CLRMSK 0xFFFFFFFFFFFBFFFFULL +#define ROGUE_CR_SOFT_RESET_USC_SHARED_EN 0x0000000000040000ULL +#define ROGUE_CR_SOFT_RESET_MCU_L1_SHIFT 17U +#define ROGUE_CR_SOFT_RESET_MCU_L1_CLRMSK 0xFFFFFFFFFFFDFFFFULL +#define ROGUE_CR_SOFT_RESET_MCU_L1_EN 0x0000000000020000ULL +#define ROGUE_CR_SOFT_RESET_BIF_SHIFT 16U +#define ROGUE_CR_SOFT_RESET_BIF_CLRMSK 0xFFFFFFFFFFFEFFFFULL +#define ROGUE_CR_SOFT_RESET_BIF_EN 0x0000000000010000ULL +#define ROGUE_CR_SOFT_RESET_CDM_SHIFT 15U +#define ROGUE_CR_SOFT_RESET_CDM_CLRMSK 0xFFFFFFFFFFFF7FFFULL +#define ROGUE_CR_SOFT_RESET_CDM_EN 0x0000000000008000ULL +#define ROGUE_CR_SOFT_RESET_VDM_SHIFT 14U +#define ROGUE_CR_SOFT_RESET_VDM_CLRMSK 0xFFFFFFFFFFFFBFFFULL +#define ROGUE_CR_SOFT_RESET_VDM_EN 0x0000000000004000ULL +#define ROGUE_CR_SOFT_RESET_TESS_SHIFT 13U +#define ROGUE_CR_SOFT_RESET_TESS_CLRMSK 0xFFFFFFFFFFFFDFFFULL +#define ROGUE_CR_SOFT_RESET_TESS_EN 0x0000000000002000ULL +#define ROGUE_CR_SOFT_RESET_PDS_SHIFT 12U +#define ROGUE_CR_SOFT_RESET_PDS_CLRMSK 0xFFFFFFFFFFFFEFFFULL +#define ROGUE_CR_SOFT_RESET_PDS_EN 0x0000000000001000ULL +#define ROGUE_CR_SOFT_RESET_ISP_SHIFT 11U +#define ROGUE_CR_SOFT_RESET_ISP_CLRMSK 0xFFFFFFFFFFFFF7FFULL +#define ROGUE_CR_SOFT_RESET_ISP_EN 0x0000000000000800ULL +#define ROGUE_CR_SOFT_RESET_TSP_SHIFT 10U +#define ROGUE_CR_SOFT_RESET_TSP_CLRMSK 0xFFFFFFFFFFFFFBFFULL +#define ROGUE_CR_SOFT_RESET_TSP_EN 0x0000000000000400ULL +#define ROGUE_CR_SOFT_RESET_SYSARB_SHIFT 5U +#define ROGUE_CR_SOFT_RESET_SYSARB_CLRMSK 0xFFFFFFFFFFFFFFDFULL +#define ROGUE_CR_SOFT_RESET_SYSARB_EN 0x0000000000000020ULL +#define ROGUE_CR_SOFT_RESET_TPU_MCU_DEMUX_SHIFT 4U +#define ROGUE_CR_SOFT_RESET_TPU_MCU_DEMUX_CLRMSK 0xFFFFFFFFFFFFFFEFULL +#define ROGUE_CR_SOFT_RESET_TPU_MCU_DEMUX_EN 0x0000000000000010ULL +#define ROGUE_CR_SOFT_RESET_MCU_L0_SHIFT 3U +#define ROGUE_CR_SOFT_RESET_MCU_L0_CLRMSK 0xFFFFFFFFFFFFFFF7ULL +#define ROGUE_CR_SOFT_RESET_MCU_L0_EN 0x0000000000000008ULL +#define ROGUE_CR_SOFT_RESET_TPU_SHIFT 2U +#define ROGUE_CR_SOFT_RESET_TPU_CLRMSK 0xFFFFFFFFFFFFFFFBULL +#define ROGUE_CR_SOFT_RESET_TPU_EN 0x0000000000000004ULL +#define ROGUE_CR_SOFT_RESET_USC_SHIFT 0U +#define ROGUE_CR_SOFT_RESET_USC_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_SOFT_RESET_USC_EN 0x0000000000000001ULL + +/* Register ROGUE_CR_SOFT_RESET2 */ +#define ROGUE_CR_SOFT_RESET2 0x0108U +#define ROGUE_CR_SOFT_RESET2_MASKFULL 0x00000000001FFFFFULL +#define ROGUE_CR_SOFT_RESET2_SPFILTER_SHIFT 12U +#define ROGUE_CR_SOFT_RESET2_SPFILTER_CLRMSK 0xFFE00FFFU +#define ROGUE_CR_SOFT_RESET2_TDM_SHIFT 11U +#define ROGUE_CR_SOFT_RESET2_TDM_CLRMSK 0xFFFFF7FFU +#define ROGUE_CR_SOFT_RESET2_TDM_EN 0x00000800U +#define ROGUE_CR_SOFT_RESET2_ASTC_SHIFT 10U +#define ROGUE_CR_SOFT_RESET2_ASTC_CLRMSK 0xFFFFFBFFU +#define ROGUE_CR_SOFT_RESET2_ASTC_EN 0x00000400U +#define ROGUE_CR_SOFT_RESET2_BLACKPEARL_SHIFT 9U +#define ROGUE_CR_SOFT_RESET2_BLACKPEARL_CLRMSK 0xFFFFFDFFU +#define ROGUE_CR_SOFT_RESET2_BLACKPEARL_EN 0x00000200U +#define ROGUE_CR_SOFT_RESET2_USCPS_SHIFT 8U +#define ROGUE_CR_SOFT_RESET2_USCPS_CLRMSK 0xFFFFFEFFU +#define ROGUE_CR_SOFT_RESET2_USCPS_EN 0x00000100U +#define ROGUE_CR_SOFT_RESET2_IPF_SHIFT 7U +#define ROGUE_CR_SOFT_RESET2_IPF_CLRMSK 0xFFFFFF7FU +#define ROGUE_CR_SOFT_RESET2_IPF_EN 0x00000080U +#define ROGUE_CR_SOFT_RESET2_GEOMETRY_SHIFT 6U +#define ROGUE_CR_SOFT_RESET2_GEOMETRY_CLRMSK 0xFFFFFFBFU +#define ROGUE_CR_SOFT_RESET2_GEOMETRY_EN 0x00000040U +#define ROGUE_CR_SOFT_RESET2_USC_SHARED_SHIFT 5U +#define ROGUE_CR_SOFT_RESET2_USC_SHARED_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_SOFT_RESET2_USC_SHARED_EN 0x00000020U +#define ROGUE_CR_SOFT_RESET2_PDS_SHARED_SHIFT 4U +#define ROGUE_CR_SOFT_RESET2_PDS_SHARED_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_SOFT_RESET2_PDS_SHARED_EN 0x00000010U +#define ROGUE_CR_SOFT_RESET2_BIF_BLACKPEARL_SHIFT 3U +#define ROGUE_CR_SOFT_RESET2_BIF_BLACKPEARL_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_SOFT_RESET2_BIF_BLACKPEARL_EN 0x00000008U +#define ROGUE_CR_SOFT_RESET2_PIXEL_SHIFT 2U +#define ROGUE_CR_SOFT_RESET2_PIXEL_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_SOFT_RESET2_PIXEL_EN 0x00000004U +#define ROGUE_CR_SOFT_RESET2_CDM_SHIFT 1U +#define ROGUE_CR_SOFT_RESET2_CDM_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_SOFT_RESET2_CDM_EN 0x00000002U +#define ROGUE_CR_SOFT_RESET2_VERTEX_SHIFT 0U +#define ROGUE_CR_SOFT_RESET2_VERTEX_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_SOFT_RESET2_VERTEX_EN 0x00000001U + +/* Register ROGUE_CR_EVENT_STATUS */ +#define ROGUE_CR_EVENT_STATUS 0x0130U +#define ROGUE_CR_EVENT_STATUS__ROGUEXE__MASKFULL 0x00000000E01DFFFFULL +#define ROGUE_CR_EVENT_STATUS__SIGNALS__MASKFULL 0x00000000E007FFFFULL +#define ROGUE_CR_EVENT_STATUS_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_EVENT_STATUS_TDM_FENCE_FINISHED_SHIFT 31U +#define ROGUE_CR_EVENT_STATUS_TDM_FENCE_FINISHED_CLRMSK 0x7FFFFFFFU +#define ROGUE_CR_EVENT_STATUS_TDM_FENCE_FINISHED_EN 0x80000000U +#define ROGUE_CR_EVENT_STATUS_TDM_BUFFER_STALL_SHIFT 30U +#define ROGUE_CR_EVENT_STATUS_TDM_BUFFER_STALL_CLRMSK 0xBFFFFFFFU +#define ROGUE_CR_EVENT_STATUS_TDM_BUFFER_STALL_EN 0x40000000U +#define ROGUE_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_SHIFT 29U +#define ROGUE_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_CLRMSK 0xDFFFFFFFU +#define ROGUE_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_EN 0x20000000U +#define ROGUE_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_SHIFT 28U +#define ROGUE_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_CLRMSK 0xEFFFFFFFU +#define ROGUE_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_EN 0x10000000U +#define ROGUE_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_SHIFT 27U +#define ROGUE_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_CLRMSK 0xF7FFFFFFU +#define ROGUE_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_EN 0x08000000U +#define ROGUE_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_SHIFT 26U +#define ROGUE_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_CLRMSK 0xFBFFFFFFU +#define ROGUE_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_EN 0x04000000U +#define ROGUE_CR_EVENT_STATUS_FBA_FC3_FINISHED_SHIFT 25U +#define ROGUE_CR_EVENT_STATUS_FBA_FC3_FINISHED_CLRMSK 0xFDFFFFFFU +#define ROGUE_CR_EVENT_STATUS_FBA_FC3_FINISHED_EN 0x02000000U +#define ROGUE_CR_EVENT_STATUS_FBA_FC2_FINISHED_SHIFT 24U +#define ROGUE_CR_EVENT_STATUS_FBA_FC2_FINISHED_CLRMSK 0xFEFFFFFFU +#define ROGUE_CR_EVENT_STATUS_FBA_FC2_FINISHED_EN 0x01000000U +#define ROGUE_CR_EVENT_STATUS_FBA_FC1_FINISHED_SHIFT 23U +#define ROGUE_CR_EVENT_STATUS_FBA_FC1_FINISHED_CLRMSK 0xFF7FFFFFU +#define ROGUE_CR_EVENT_STATUS_FBA_FC1_FINISHED_EN 0x00800000U +#define ROGUE_CR_EVENT_STATUS_FBA_FC0_FINISHED_SHIFT 22U +#define ROGUE_CR_EVENT_STATUS_FBA_FC0_FINISHED_CLRMSK 0xFFBFFFFFU +#define ROGUE_CR_EVENT_STATUS_FBA_FC0_FINISHED_EN 0x00400000U +#define ROGUE_CR_EVENT_STATUS_RDM_FC3_FINISHED_SHIFT 21U +#define ROGUE_CR_EVENT_STATUS_RDM_FC3_FINISHED_CLRMSK 0xFFDFFFFFU +#define ROGUE_CR_EVENT_STATUS_RDM_FC3_FINISHED_EN 0x00200000U +#define ROGUE_CR_EVENT_STATUS_RDM_FC2_FINISHED_SHIFT 20U +#define ROGUE_CR_EVENT_STATUS_RDM_FC2_FINISHED_CLRMSK 0xFFEFFFFFU +#define ROGUE_CR_EVENT_STATUS_RDM_FC2_FINISHED_EN 0x00100000U +#define ROGUE_CR_EVENT_STATUS_SAFETY_SHIFT 20U +#define ROGUE_CR_EVENT_STATUS_SAFETY_CLRMSK 0xFFEFFFFFU +#define ROGUE_CR_EVENT_STATUS_SAFETY_EN 0x00100000U +#define ROGUE_CR_EVENT_STATUS_RDM_FC1_FINISHED_SHIFT 19U +#define ROGUE_CR_EVENT_STATUS_RDM_FC1_FINISHED_CLRMSK 0xFFF7FFFFU +#define ROGUE_CR_EVENT_STATUS_RDM_FC1_FINISHED_EN 0x00080000U +#define ROGUE_CR_EVENT_STATUS_SLAVE_REQ_SHIFT 19U +#define ROGUE_CR_EVENT_STATUS_SLAVE_REQ_CLRMSK 0xFFF7FFFFU +#define ROGUE_CR_EVENT_STATUS_SLAVE_REQ_EN 0x00080000U +#define ROGUE_CR_EVENT_STATUS_RDM_FC0_FINISHED_SHIFT 18U +#define ROGUE_CR_EVENT_STATUS_RDM_FC0_FINISHED_CLRMSK 0xFFFBFFFFU +#define ROGUE_CR_EVENT_STATUS_RDM_FC0_FINISHED_EN 0x00040000U +#define ROGUE_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_SHIFT 18U +#define ROGUE_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_CLRMSK 0xFFFBFFFFU +#define ROGUE_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_EN 0x00040000U +#define ROGUE_CR_EVENT_STATUS_SHG_FINISHED_SHIFT 17U +#define ROGUE_CR_EVENT_STATUS_SHG_FINISHED_CLRMSK 0xFFFDFFFFU +#define ROGUE_CR_EVENT_STATUS_SHG_FINISHED_EN 0x00020000U +#define ROGUE_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_SHIFT 17U +#define ROGUE_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_CLRMSK 0xFFFDFFFFU +#define ROGUE_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_EN 0x00020000U +#define ROGUE_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_SHIFT 16U +#define ROGUE_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_CLRMSK 0xFFFEFFFFU +#define ROGUE_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_EN 0x00010000U +#define ROGUE_CR_EVENT_STATUS_USC_TRIGGER_SHIFT 15U +#define ROGUE_CR_EVENT_STATUS_USC_TRIGGER_CLRMSK 0xFFFF7FFFU +#define ROGUE_CR_EVENT_STATUS_USC_TRIGGER_EN 0x00008000U +#define ROGUE_CR_EVENT_STATUS_ZLS_FINISHED_SHIFT 14U +#define ROGUE_CR_EVENT_STATUS_ZLS_FINISHED_CLRMSK 0xFFFFBFFFU +#define ROGUE_CR_EVENT_STATUS_ZLS_FINISHED_EN 0x00004000U +#define ROGUE_CR_EVENT_STATUS_GPIO_ACK_SHIFT 13U +#define ROGUE_CR_EVENT_STATUS_GPIO_ACK_CLRMSK 0xFFFFDFFFU +#define ROGUE_CR_EVENT_STATUS_GPIO_ACK_EN 0x00002000U +#define ROGUE_CR_EVENT_STATUS_GPIO_REQ_SHIFT 12U +#define ROGUE_CR_EVENT_STATUS_GPIO_REQ_CLRMSK 0xFFFFEFFFU +#define ROGUE_CR_EVENT_STATUS_GPIO_REQ_EN 0x00001000U +#define ROGUE_CR_EVENT_STATUS_POWER_ABORT_SHIFT 11U +#define ROGUE_CR_EVENT_STATUS_POWER_ABORT_CLRMSK 0xFFFFF7FFU +#define ROGUE_CR_EVENT_STATUS_POWER_ABORT_EN 0x00000800U +#define ROGUE_CR_EVENT_STATUS_POWER_COMPLETE_SHIFT 10U +#define ROGUE_CR_EVENT_STATUS_POWER_COMPLETE_CLRMSK 0xFFFFFBFFU +#define ROGUE_CR_EVENT_STATUS_POWER_COMPLETE_EN 0x00000400U +#define ROGUE_CR_EVENT_STATUS_MMU_PAGE_FAULT_SHIFT 9U +#define ROGUE_CR_EVENT_STATUS_MMU_PAGE_FAULT_CLRMSK 0xFFFFFDFFU +#define ROGUE_CR_EVENT_STATUS_MMU_PAGE_FAULT_EN 0x00000200U +#define ROGUE_CR_EVENT_STATUS_PM_3D_MEM_FREE_SHIFT 8U +#define ROGUE_CR_EVENT_STATUS_PM_3D_MEM_FREE_CLRMSK 0xFFFFFEFFU +#define ROGUE_CR_EVENT_STATUS_PM_3D_MEM_FREE_EN 0x00000100U +#define ROGUE_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_SHIFT 7U +#define ROGUE_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_CLRMSK 0xFFFFFF7FU +#define ROGUE_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_EN 0x00000080U +#define ROGUE_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 6U +#define ROGUE_CR_EVENT_STATUS_TA_TERMINATE_CLRMSK 0xFFFFFFBFU +#define ROGUE_CR_EVENT_STATUS_TA_TERMINATE_EN 0x00000040U +#define ROGUE_CR_EVENT_STATUS_TA_FINISHED_SHIFT 5U +#define ROGUE_CR_EVENT_STATUS_TA_FINISHED_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_EVENT_STATUS_TA_FINISHED_EN 0x00000020U +#define ROGUE_CR_EVENT_STATUS_ISP_END_MACROTILE_SHIFT 4U +#define ROGUE_CR_EVENT_STATUS_ISP_END_MACROTILE_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_EVENT_STATUS_ISP_END_MACROTILE_EN 0x00000010U +#define ROGUE_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 3U +#define ROGUE_CR_EVENT_STATUS_PIXELBE_END_RENDER_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_EVENT_STATUS_PIXELBE_END_RENDER_EN 0x00000008U +#define ROGUE_CR_EVENT_STATUS_COMPUTE_FINISHED_SHIFT 2U +#define ROGUE_CR_EVENT_STATUS_COMPUTE_FINISHED_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_EVENT_STATUS_COMPUTE_FINISHED_EN 0x00000004U +#define ROGUE_CR_EVENT_STATUS_KERNEL_FINISHED_SHIFT 1U +#define ROGUE_CR_EVENT_STATUS_KERNEL_FINISHED_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_EVENT_STATUS_KERNEL_FINISHED_EN 0x00000002U +#define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_SHIFT 0U +#define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_EVENT_STATUS_TLA_COMPLETE_EN 0x00000001U + +/* Register ROGUE_CR_TIMER */ +#define ROGUE_CR_TIMER 0x0160U +#define ROGUE_CR_TIMER_MASKFULL 0x8000FFFFFFFFFFFFULL +#define ROGUE_CR_TIMER_BIT31_SHIFT 63U +#define ROGUE_CR_TIMER_BIT31_CLRMSK 0x7FFFFFFFFFFFFFFFULL +#define ROGUE_CR_TIMER_BIT31_EN 0x8000000000000000ULL +#define ROGUE_CR_TIMER_VALUE_SHIFT 0U +#define ROGUE_CR_TIMER_VALUE_CLRMSK 0xFFFF000000000000ULL + +/* Register ROGUE_CR_TLA_STATUS */ +#define ROGUE_CR_TLA_STATUS 0x0178U +#define ROGUE_CR_TLA_STATUS_MASKFULL 0xFFFFFFFFFFFFFFFFULL +#define ROGUE_CR_TLA_STATUS_BLIT_COUNT_SHIFT 39U +#define ROGUE_CR_TLA_STATUS_BLIT_COUNT_CLRMSK 0x0000007FFFFFFFFFULL +#define ROGUE_CR_TLA_STATUS_REQUEST_SHIFT 7U +#define ROGUE_CR_TLA_STATUS_REQUEST_CLRMSK 0xFFFFFF800000007FULL +#define ROGUE_CR_TLA_STATUS_FIFO_FULLNESS_SHIFT 1U +#define ROGUE_CR_TLA_STATUS_FIFO_FULLNESS_CLRMSK 0xFFFFFFFFFFFFFF81ULL +#define ROGUE_CR_TLA_STATUS_BUSY_SHIFT 0U +#define ROGUE_CR_TLA_STATUS_BUSY_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_TLA_STATUS_BUSY_EN 0x0000000000000001ULL + +/* Register ROGUE_CR_PM_PARTIAL_RENDER_ENABLE */ +#define ROGUE_CR_PM_PARTIAL_RENDER_ENABLE 0x0338U +#define ROGUE_CR_PM_PARTIAL_RENDER_ENABLE_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_PM_PARTIAL_RENDER_ENABLE_OP_SHIFT 0U +#define ROGUE_CR_PM_PARTIAL_RENDER_ENABLE_OP_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_PM_PARTIAL_RENDER_ENABLE_OP_EN 0x00000001U + +/* Register ROGUE_CR_SIDEKICK_IDLE */ +#define ROGUE_CR_SIDEKICK_IDLE 0x03C8U +#define ROGUE_CR_SIDEKICK_IDLE_MASKFULL 0x000000000000007FULL +#define ROGUE_CR_SIDEKICK_IDLE_FB_CDC_SHIFT 6U +#define ROGUE_CR_SIDEKICK_IDLE_FB_CDC_CLRMSK 0xFFFFFFBFU +#define ROGUE_CR_SIDEKICK_IDLE_FB_CDC_EN 0x00000040U +#define ROGUE_CR_SIDEKICK_IDLE_MMU_SHIFT 5U +#define ROGUE_CR_SIDEKICK_IDLE_MMU_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_SIDEKICK_IDLE_MMU_EN 0x00000020U +#define ROGUE_CR_SIDEKICK_IDLE_BIF128_SHIFT 4U +#define ROGUE_CR_SIDEKICK_IDLE_BIF128_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_SIDEKICK_IDLE_BIF128_EN 0x00000010U +#define ROGUE_CR_SIDEKICK_IDLE_TLA_SHIFT 3U +#define ROGUE_CR_SIDEKICK_IDLE_TLA_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_SIDEKICK_IDLE_TLA_EN 0x00000008U +#define ROGUE_CR_SIDEKICK_IDLE_GARTEN_SHIFT 2U +#define ROGUE_CR_SIDEKICK_IDLE_GARTEN_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_SIDEKICK_IDLE_GARTEN_EN 0x00000004U +#define ROGUE_CR_SIDEKICK_IDLE_HOSTIF_SHIFT 1U +#define ROGUE_CR_SIDEKICK_IDLE_HOSTIF_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_SIDEKICK_IDLE_HOSTIF_EN 0x00000002U +#define ROGUE_CR_SIDEKICK_IDLE_SOCIF_SHIFT 0U +#define ROGUE_CR_SIDEKICK_IDLE_SOCIF_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_SIDEKICK_IDLE_SOCIF_EN 0x00000001U + +/* Register ROGUE_CR_MARS_IDLE */ +#define ROGUE_CR_MARS_IDLE 0x08F8U +#define ROGUE_CR_MARS_IDLE_MASKFULL 0x0000000000000007ULL +#define ROGUE_CR_MARS_IDLE_MH_SYSARB0_SHIFT 2U +#define ROGUE_CR_MARS_IDLE_MH_SYSARB0_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_MARS_IDLE_MH_SYSARB0_EN 0x00000004U +#define ROGUE_CR_MARS_IDLE_CPU_SHIFT 1U +#define ROGUE_CR_MARS_IDLE_CPU_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_MARS_IDLE_CPU_EN 0x00000002U +#define ROGUE_CR_MARS_IDLE_SOCIF_SHIFT 0U +#define ROGUE_CR_MARS_IDLE_SOCIF_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_MARS_IDLE_SOCIF_EN 0x00000001U + +/* Register ROGUE_CR_VDM_CONTEXT_STORE_STATUS */ +#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS 0x0430U +#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_MASKFULL 0x00000000000000F3ULL +#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_SHIFT 4U +#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_CLRMSK 0xFFFFFF0FU +#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT 1U +#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN 0x00000002U +#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT 0U +#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_EN 0x00000001U + +/* Register ROGUE_CR_VDM_CONTEXT_STORE_TASK0 */ +#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0 0x0438U +#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0_MASKFULL 0xFFFFFFFFFFFFFFFFULL +#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_SHIFT 32U +#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_CLRMSK 0x00000000FFFFFFFFULL +#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_SHIFT 0U +#define ROGUE_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_CLRMSK 0xFFFFFFFF00000000ULL + +/* Register ROGUE_CR_VDM_CONTEXT_STORE_TASK1 */ +#define ROGUE_CR_VDM_CONTEXT_STORE_TASK1 0x0440U +#define ROGUE_CR_VDM_CONTEXT_STORE_TASK1_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_SHIFT 0U +#define ROGUE_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_CLRMSK 0x00000000U + +/* Register ROGUE_CR_VDM_CONTEXT_STORE_TASK2 */ +#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2 0x0448U +#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2_MASKFULL 0xFFFFFFFFFFFFFFFFULL +#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_SHIFT 32U +#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_CLRMSK 0x00000000FFFFFFFFULL +#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_SHIFT 0U +#define ROGUE_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_CLRMSK 0xFFFFFFFF00000000ULL + +/* Register ROGUE_CR_VDM_CONTEXT_RESUME_TASK0 */ +#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0 0x0450U +#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0_MASKFULL 0xFFFFFFFFFFFFFFFFULL +#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_SHIFT 32U +#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_CLRMSK 0x00000000FFFFFFFFULL +#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_SHIFT 0U +#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_CLRMSK 0xFFFFFFFF00000000ULL + +/* Register ROGUE_CR_VDM_CONTEXT_RESUME_TASK1 */ +#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK1 0x0458U +#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK1_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_SHIFT 0U +#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_CLRMSK 0x00000000U + +/* Register ROGUE_CR_VDM_CONTEXT_RESUME_TASK2 */ +#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2 0x0460U +#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2_MASKFULL 0xFFFFFFFFFFFFFFFFULL +#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_SHIFT 32U +#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_CLRMSK 0x00000000FFFFFFFFULL +#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_SHIFT 0U +#define ROGUE_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_CLRMSK 0xFFFFFFFF00000000ULL + +/* Register ROGUE_CR_CDM_CONTEXT_STORE_STATUS */ +#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS 0x04A0U +#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_MASKFULL 0x0000000000000003ULL +#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT 1U +#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN 0x00000002U +#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT 0U +#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_EN 0x00000001U + +/* Register ROGUE_CR_CDM_CONTEXT_PDS0 */ +#define ROGUE_CR_CDM_CONTEXT_PDS0 0x04A8U +#define ROGUE_CR_CDM_CONTEXT_PDS0_MASKFULL 0xFFFFFFF0FFFFFFF0ULL +#define ROGUE_CR_CDM_CONTEXT_PDS0_DATA_ADDR_SHIFT 36U +#define ROGUE_CR_CDM_CONTEXT_PDS0_DATA_ADDR_CLRMSK 0x0000000FFFFFFFFFULL +#define ROGUE_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSHIFT 4U +#define ROGUE_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSIZE 16U +#define ROGUE_CR_CDM_CONTEXT_PDS0_CODE_ADDR_SHIFT 4U +#define ROGUE_CR_CDM_CONTEXT_PDS0_CODE_ADDR_CLRMSK 0xFFFFFFFF0000000FULL +#define ROGUE_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSHIFT 4U +#define ROGUE_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSIZE 16U + +/* Register ROGUE_CR_CDM_CONTEXT_PDS1 */ +#define ROGUE_CR_CDM_CONTEXT_PDS1 0x04B0U +#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__MASKFULL 0x000000007FFFFFFFULL +#define ROGUE_CR_CDM_CONTEXT_PDS1_MASKFULL 0x000000003FFFFFFFULL +#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT 30U +#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK 0xBFFFFFFFU +#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN 0x40000000U +#define ROGUE_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_SHIFT 29U +#define ROGUE_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_CLRMSK 0xDFFFFFFFU +#define ROGUE_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_EN 0x20000000U +#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT 29U +#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK 0xDFFFFFFFU +#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN 0x20000000U +#define ROGUE_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_SHIFT 28U +#define ROGUE_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_CLRMSK 0xEFFFFFFFU +#define ROGUE_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_EN 0x10000000U +#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_SHIFT 28U +#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_CLRMSK 0xEFFFFFFFU +#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_EN 0x10000000U +#define ROGUE_CR_CDM_CONTEXT_PDS1_TARGET_SHIFT 27U +#define ROGUE_CR_CDM_CONTEXT_PDS1_TARGET_CLRMSK 0xF7FFFFFFU +#define ROGUE_CR_CDM_CONTEXT_PDS1_TARGET_EN 0x08000000U +#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT 22U +#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK 0xF03FFFFFU +#define ROGUE_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_SHIFT 21U +#define ROGUE_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_CLRMSK 0xF81FFFFFU +#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT 21U +#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK 0xFFDFFFFFU +#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_EN 0x00200000U +#define ROGUE_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_SHIFT 20U +#define ROGUE_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_CLRMSK 0xFFEFFFFFU +#define ROGUE_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_EN 0x00100000U +#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT 12U +#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK 0xFFE00FFFU +#define ROGUE_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_SHIFT 11U +#define ROGUE_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_CLRMSK 0xFFF007FFU +#define ROGUE_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_SHIFT 7U +#define ROGUE_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_CLRMSK 0xFFFFF87FU +#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT 7U +#define ROGUE_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK 0xFFFFF07FU +#define ROGUE_CR_CDM_CONTEXT_PDS1_DATA_SIZE_SHIFT 1U +#define ROGUE_CR_CDM_CONTEXT_PDS1_DATA_SIZE_CLRMSK 0xFFFFFF81U +#define ROGUE_CR_CDM_CONTEXT_PDS1_FENCE_SHIFT 0U +#define ROGUE_CR_CDM_CONTEXT_PDS1_FENCE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_CDM_CONTEXT_PDS1_FENCE_EN 0x00000001U + +/* Register ROGUE_CR_CDM_TERMINATE_PDS */ +#define ROGUE_CR_CDM_TERMINATE_PDS 0x04B8U +#define ROGUE_CR_CDM_TERMINATE_PDS_MASKFULL 0xFFFFFFF0FFFFFFF0ULL +#define ROGUE_CR_CDM_TERMINATE_PDS_DATA_ADDR_SHIFT 36U +#define ROGUE_CR_CDM_TERMINATE_PDS_DATA_ADDR_CLRMSK 0x0000000FFFFFFFFFULL +#define ROGUE_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSHIFT 4U +#define ROGUE_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSIZE 16U +#define ROGUE_CR_CDM_TERMINATE_PDS_CODE_ADDR_SHIFT 4U +#define ROGUE_CR_CDM_TERMINATE_PDS_CODE_ADDR_CLRMSK 0xFFFFFFFF0000000FULL +#define ROGUE_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSHIFT 4U +#define ROGUE_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSIZE 16U + +/* Register ROGUE_CR_CDM_TERMINATE_PDS1 */ +#define ROGUE_CR_CDM_TERMINATE_PDS1 0x04C0U +#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__MASKFULL 0x000000007FFFFFFFULL +#define ROGUE_CR_CDM_TERMINATE_PDS1_MASKFULL 0x000000003FFFFFFFULL +#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT 30U +#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK 0xBFFFFFFFU +#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN 0x40000000U +#define ROGUE_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_SHIFT 29U +#define ROGUE_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_CLRMSK 0xDFFFFFFFU +#define ROGUE_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_EN 0x20000000U +#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT 29U +#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK 0xDFFFFFFFU +#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN 0x20000000U +#define ROGUE_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_SHIFT 28U +#define ROGUE_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_CLRMSK 0xEFFFFFFFU +#define ROGUE_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_EN 0x10000000U +#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_SHIFT 28U +#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_CLRMSK 0xEFFFFFFFU +#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_EN 0x10000000U +#define ROGUE_CR_CDM_TERMINATE_PDS1_TARGET_SHIFT 27U +#define ROGUE_CR_CDM_TERMINATE_PDS1_TARGET_CLRMSK 0xF7FFFFFFU +#define ROGUE_CR_CDM_TERMINATE_PDS1_TARGET_EN 0x08000000U +#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT 22U +#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK 0xF03FFFFFU +#define ROGUE_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_SHIFT 21U +#define ROGUE_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_CLRMSK 0xF81FFFFFU +#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT 21U +#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK 0xFFDFFFFFU +#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_EN 0x00200000U +#define ROGUE_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_SHIFT 20U +#define ROGUE_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_CLRMSK 0xFFEFFFFFU +#define ROGUE_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_EN 0x00100000U +#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT 12U +#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK 0xFFE00FFFU +#define ROGUE_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_SHIFT 11U +#define ROGUE_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_CLRMSK 0xFFF007FFU +#define ROGUE_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_SHIFT 7U +#define ROGUE_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_CLRMSK 0xFFFFF87FU +#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT 7U +#define ROGUE_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK 0xFFFFF07FU +#define ROGUE_CR_CDM_TERMINATE_PDS1_DATA_SIZE_SHIFT 1U +#define ROGUE_CR_CDM_TERMINATE_PDS1_DATA_SIZE_CLRMSK 0xFFFFFF81U +#define ROGUE_CR_CDM_TERMINATE_PDS1_FENCE_SHIFT 0U +#define ROGUE_CR_CDM_TERMINATE_PDS1_FENCE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_CDM_TERMINATE_PDS1_FENCE_EN 0x00000001U + +/* Register ROGUE_CR_CDM_CONTEXT_LOAD_PDS0 */ +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0 0x04D8U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_MASKFULL 0xFFFFFFF0FFFFFFF0ULL +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_SHIFT 36U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_CLRMSK 0x0000000FFFFFFFFFULL +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSHIFT 4U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSIZE 16U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_SHIFT 4U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_CLRMSK 0xFFFFFFFF0000000FULL +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSHIFT 4U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSIZE 16U + +/* Register ROGUE_CR_CDM_CONTEXT_LOAD_PDS1 */ +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1 0x04E0U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__MASKFULL 0x000000007FFFFFFFULL +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_MASKFULL 0x000000003FFFFFFFULL +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT 30U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK 0xBFFFFFFFU +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN 0x40000000U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_SHIFT 29U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_CLRMSK 0xDFFFFFFFU +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_EN 0x20000000U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT 29U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK 0xDFFFFFFFU +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN 0x20000000U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_SHIFT 28U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_CLRMSK 0xEFFFFFFFU +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_EN 0x10000000U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_SHIFT 28U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_CLRMSK 0xEFFFFFFFU +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_EN 0x10000000U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_SHIFT 27U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_CLRMSK 0xF7FFFFFFU +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_EN 0x08000000U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT 22U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK 0xF03FFFFFU +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_SHIFT 21U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_CLRMSK 0xF81FFFFFU +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT 21U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK 0xFFDFFFFFU +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_EN 0x00200000U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_SHIFT 20U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_CLRMSK 0xFFEFFFFFU +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_EN 0x00100000U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT 12U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK 0xFFE00FFFU +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_SHIFT 11U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_CLRMSK 0xFFF007FFU +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_SHIFT 7U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_CLRMSK 0xFFFFF87FU +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT 7U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK 0xFFFFF07FU +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_SHIFT 1U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_CLRMSK 0xFFFFFF81U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_SHIFT 0U +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_EN 0x00000001U + +/* Register ROGUE_CR_MIPS_WRAPPER_CONFIG */ +#define ROGUE_CR_MIPS_WRAPPER_CONFIG 0x0810U +#define ROGUE_CR_MIPS_WRAPPER_CONFIG_MASKFULL 0x000001030F01FFFFULL +#define ROGUE_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_SHIFT 40U +#define ROGUE_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_CLRMSK 0xFFFFFEFFFFFFFFFFULL +#define ROGUE_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_EN 0x0000010000000000ULL +#define ROGUE_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_SHIFT 33U +#define ROGUE_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_CLRMSK 0xFFFFFFFDFFFFFFFFULL +#define ROGUE_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_EN 0x0000000200000000ULL +#define ROGUE_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_SHIFT 32U +#define ROGUE_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_CLRMSK 0xFFFFFFFEFFFFFFFFULL +#define ROGUE_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_EN 0x0000000100000000ULL +#define ROGUE_CR_MIPS_WRAPPER_CONFIG_OS_ID_SHIFT 25U +#define ROGUE_CR_MIPS_WRAPPER_CONFIG_OS_ID_CLRMSK 0xFFFFFFFFF1FFFFFFULL +#define ROGUE_CR_MIPS_WRAPPER_CONFIG_TRUSTED_SHIFT 24U +#define ROGUE_CR_MIPS_WRAPPER_CONFIG_TRUSTED_CLRMSK 0xFFFFFFFFFEFFFFFFULL +#define ROGUE_CR_MIPS_WRAPPER_CONFIG_TRUSTED_EN 0x0000000001000000ULL +#define ROGUE_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_SHIFT 16U +#define ROGUE_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_CLRMSK 0xFFFFFFFFFFFEFFFFULL +#define ROGUE_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MIPS32 0x0000000000000000ULL +#define ROGUE_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS 0x0000000000010000ULL +#define ROGUE_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_SHIFT 0U +#define ROGUE_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_CLRMSK 0xFFFFFFFFFFFF0000ULL + +/* Register ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1 */ +#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1 0x0818U +#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_MASKFULL 0x00000000FFFFF001ULL +#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_SHIFT 12U +#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL +#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_SHIFT 0U +#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN 0x0000000000000001ULL + +/* Register ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2 */ +#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2 0x0820U +#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_MASKFULL 0x000000FFFFFFF1FFULL +#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_SHIFT 12U +#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT 6U +#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK 0xFFFFFFFFFFFFFE3FULL +#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_SHIFT 5U +#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK 0xFFFFFFFFFFFFFFDFULL +#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_EN 0x0000000000000020ULL +#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_SHIFT 0U +#define ROGUE_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_CLRMSK 0xFFFFFFFFFFFFFFE0ULL + +/* Register ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1 */ +#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1 0x0828U +#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_MASKFULL 0x00000000FFFFF001ULL +#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_SHIFT 12U +#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL +#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_SHIFT 0U +#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN 0x0000000000000001ULL + +/* Register ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2 */ +#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2 0x0830U +#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_MASKFULL 0x000000FFFFFFF1FFULL +#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_SHIFT 12U +#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_SHIFT 6U +#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_CLRMSK 0xFFFFFFFFFFFFFE3FULL +#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_SHIFT 5U +#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_CLRMSK 0xFFFFFFFFFFFFFFDFULL +#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_EN 0x0000000000000020ULL +#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_SHIFT 0U +#define ROGUE_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_CLRMSK 0xFFFFFFFFFFFFFFE0ULL + +/* Register ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1 */ +#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1 0x0838U +#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_MASKFULL 0x00000000FFFFF001ULL +#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_SHIFT 12U +#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL +#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_SHIFT 0U +#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN 0x0000000000000001ULL + +/* Register ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2 */ +#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2 0x0840U +#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_MASKFULL 0x000000FFFFFFF1FFULL +#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_SHIFT 12U +#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_SHIFT 6U +#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_CLRMSK 0xFFFFFFFFFFFFFE3FULL +#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_SHIFT 5U +#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_CLRMSK 0xFFFFFFFFFFFFFFDFULL +#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_EN 0x0000000000000020ULL +#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_SHIFT 0U +#define ROGUE_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_CLRMSK 0xFFFFFFFFFFFFFFE0ULL + +/* Register ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1 */ +#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1 0x0848U +#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_MASKFULL 0x00000000FFFFF001ULL +#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_SHIFT 12U +#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL +#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_SHIFT 0U +#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN 0x0000000000000001ULL + +/* Register ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2 */ +#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2 0x0850U +#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_MASKFULL 0x000000FFFFFFF1FFULL +#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_SHIFT 12U +#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_SHIFT 6U +#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_CLRMSK 0xFFFFFFFFFFFFFE3FULL +#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_SHIFT 5U +#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_CLRMSK 0xFFFFFFFFFFFFFFDFULL +#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_EN 0x0000000000000020ULL +#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_SHIFT 0U +#define ROGUE_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_CLRMSK 0xFFFFFFFFFFFFFFE0ULL + +/* Register ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1 */ +#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1 0x0858U +#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_MASKFULL 0x00000000FFFFF001ULL +#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_SHIFT 12U +#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL +#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_SHIFT 0U +#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN 0x0000000000000001ULL + +/* Register ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2 */ +#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2 0x0860U +#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_MASKFULL 0x000000FFFFFFF1FFULL +#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_SHIFT 12U +#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_SHIFT 6U +#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_CLRMSK 0xFFFFFFFFFFFFFE3FULL +#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_SHIFT 5U +#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_CLRMSK 0xFFFFFFFFFFFFFFDFULL +#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_EN 0x0000000000000020ULL +#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_SHIFT 0U +#define ROGUE_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_CLRMSK 0xFFFFFFFFFFFFFFE0ULL + +/* Register ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS */ +#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS 0x0868U +#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_MASKFULL 0x00000001FFFFFFFFULL +#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_SHIFT 32U +#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_CLRMSK 0xFFFFFFFEFFFFFFFFULL +#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_EN 0x0000000100000000ULL +#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_SHIFT 0U +#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_CLRMSK 0xFFFFFFFF00000000ULL + +/* Register ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR */ +#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR 0x0870U +#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_SHIFT 0U +#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_EN 0x00000001U + +/* Register ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG */ +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG 0x0878U +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MASKFULL 0xFFFFFFF7FFFFFFBFULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_SHIFT 36U +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_CLRMSK 0x0000000FFFFFFFFFULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_SHIFT 32U +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_CLRMSK 0xFFFFFFF8FFFFFFFFULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_SHIFT 12U +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_SHIFT 11U +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_CLRMSK 0xFFFFFFFFFFFFF7FFULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_EN 0x0000000000000800ULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_SHIFT 7U +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_CLRMSK 0xFFFFFFFFFFFFF87FULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4KB 0x0000000000000000ULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16KB 0x0000000000000080ULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64KB 0x0000000000000100ULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256KB 0x0000000000000180ULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_1MB 0x0000000000000200ULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4MB 0x0000000000000280ULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16MB 0x0000000000000300ULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64MB 0x0000000000000380ULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256MB 0x0000000000000400ULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_SHIFT 1U +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_CLRMSK 0xFFFFFFFFFFFFFFC1ULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_SHIFT 0U +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_EN 0x0000000000000001ULL + +/* Register ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ */ +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ 0x0880U +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_MASKFULL 0x000000000000003FULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_SHIFT 1U +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_CLRMSK 0xFFFFFFC1U +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_SHIFT 0U +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_EN 0x00000001U + +/* Register ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA */ +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA 0x0888U +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_MASKFULL 0xFFFFFFF7FFFFFF81ULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_SHIFT 36U +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_CLRMSK 0x0000000FFFFFFFFFULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_SHIFT 32U +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_CLRMSK 0xFFFFFFF8FFFFFFFFULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_SHIFT 12U +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_CLRMSK 0xFFFFFFFF00000FFFULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_SHIFT 11U +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_CLRMSK 0xFFFFFFFFFFFFF7FFULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_EN 0x0000000000000800ULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_SHIFT 7U +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_CLRMSK 0xFFFFFFFFFFFFF87FULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_SHIFT 0U +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_EN 0x0000000000000001ULL + +/* Register ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE */ +#define ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE 0x08A0U +#define ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_SHIFT 0U +#define ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_EN 0x00000001U + +/* Register ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS */ +#define ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS 0x08A8U +#define ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_SHIFT 0U +#define ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN 0x00000001U + +/* Register ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR */ +#define ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR 0x08B0U +#define ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_SHIFT 0U +#define ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN 0x00000001U + +/* Register ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE */ +#define ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE 0x08B8U +#define ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_SHIFT 0U +#define ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN 0x00000001U + +/* Register ROGUE_CR_MIPS_WRAPPER_NMI_EVENT */ +#define ROGUE_CR_MIPS_WRAPPER_NMI_EVENT 0x08C0U +#define ROGUE_CR_MIPS_WRAPPER_NMI_EVENT_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_SHIFT 0U +#define ROGUE_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN 0x00000001U + +/* Register ROGUE_CR_MIPS_DEBUG_CONFIG */ +#define ROGUE_CR_MIPS_DEBUG_CONFIG 0x08C8U +#define ROGUE_CR_MIPS_DEBUG_CONFIG_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_SHIFT 0U +#define ROGUE_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_EN 0x00000001U + +/* Register ROGUE_CR_MIPS_EXCEPTION_STATUS */ +#define ROGUE_CR_MIPS_EXCEPTION_STATUS 0x08D0U +#define ROGUE_CR_MIPS_EXCEPTION_STATUS_MASKFULL 0x000000000000003FULL +#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_SHIFT 5U +#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_EN 0x00000020U +#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_SHIFT 4U +#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN 0x00000010U +#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_SHIFT 3U +#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_EN 0x00000008U +#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_SHIFT 2U +#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_EN 0x00000004U +#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_EXL_SHIFT 1U +#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_EXL_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_EXL_EN 0x00000002U +#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_ERL_SHIFT 0U +#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_ERL_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN 0x00000001U + +/* Register ROGUE_CR_MIPS_WRAPPER_STATUS */ +#define ROGUE_CR_MIPS_WRAPPER_STATUS 0x08E8U +#define ROGUE_CR_MIPS_WRAPPER_STATUS_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_MIPS_WRAPPER_STATUS_OUTSTANDING_REQUESTS_SHIFT 0U +#define ROGUE_CR_MIPS_WRAPPER_STATUS_OUTSTANDING_REQUESTS_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_XPU_BROADCAST */ +#define ROGUE_CR_XPU_BROADCAST 0x0890U +#define ROGUE_CR_XPU_BROADCAST_MASKFULL 0x00000000000001FFULL +#define ROGUE_CR_XPU_BROADCAST_MASK_SHIFT 0U +#define ROGUE_CR_XPU_BROADCAST_MASK_CLRMSK 0xFFFFFE00U + +/* Register ROGUE_CR_META_SP_MSLVDATAX */ +#define ROGUE_CR_META_SP_MSLVDATAX 0x0A00U +#define ROGUE_CR_META_SP_MSLVDATAX_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_META_SP_MSLVDATAX_MSLVDATAX_SHIFT 0U +#define ROGUE_CR_META_SP_MSLVDATAX_MSLVDATAX_CLRMSK 0x00000000U + +/* Register ROGUE_CR_META_SP_MSLVDATAT */ +#define ROGUE_CR_META_SP_MSLVDATAT 0x0A08U +#define ROGUE_CR_META_SP_MSLVDATAT_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_META_SP_MSLVDATAT_MSLVDATAT_SHIFT 0U +#define ROGUE_CR_META_SP_MSLVDATAT_MSLVDATAT_CLRMSK 0x00000000U + +/* Register ROGUE_CR_META_SP_MSLVCTRL0 */ +#define ROGUE_CR_META_SP_MSLVCTRL0 0x0A10U +#define ROGUE_CR_META_SP_MSLVCTRL0_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_META_SP_MSLVCTRL0_ADDR_SHIFT 2U +#define ROGUE_CR_META_SP_MSLVCTRL0_ADDR_CLRMSK 0x00000003U +#define ROGUE_CR_META_SP_MSLVCTRL0_AUTOINCR_SHIFT 1U +#define ROGUE_CR_META_SP_MSLVCTRL0_AUTOINCR_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_META_SP_MSLVCTRL0_AUTOINCR_EN 0x00000002U +#define ROGUE_CR_META_SP_MSLVCTRL0_RD_SHIFT 0U +#define ROGUE_CR_META_SP_MSLVCTRL0_RD_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_META_SP_MSLVCTRL0_RD_EN 0x00000001U + +/* Register ROGUE_CR_META_SP_MSLVCTRL1 */ +#define ROGUE_CR_META_SP_MSLVCTRL1 0x0A18U +#define ROGUE_CR_META_SP_MSLVCTRL1_MASKFULL 0x00000000F7F4003FULL +#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_SHIFT 30U +#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_CLRMSK 0x3FFFFFFFU +#define ROGUE_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_SHIFT 29U +#define ROGUE_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_CLRMSK 0xDFFFFFFFU +#define ROGUE_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_EN 0x20000000U +#define ROGUE_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_SHIFT 28U +#define ROGUE_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_CLRMSK 0xEFFFFFFFU +#define ROGUE_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_EN 0x10000000U +#define ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_SHIFT 26U +#define ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_CLRMSK 0xFBFFFFFFU +#define ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN 0x04000000U +#define ROGUE_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_SHIFT 25U +#define ROGUE_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_CLRMSK 0xFDFFFFFFU +#define ROGUE_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_EN 0x02000000U +#define ROGUE_CR_META_SP_MSLVCTRL1_READY_SHIFT 24U +#define ROGUE_CR_META_SP_MSLVCTRL1_READY_CLRMSK 0xFEFFFFFFU +#define ROGUE_CR_META_SP_MSLVCTRL1_READY_EN 0x01000000U +#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERRID_SHIFT 21U +#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERRID_CLRMSK 0xFF1FFFFFU +#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERR_SHIFT 20U +#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERR_CLRMSK 0xFFEFFFFFU +#define ROGUE_CR_META_SP_MSLVCTRL1_DEFERR_EN 0x00100000U +#define ROGUE_CR_META_SP_MSLVCTRL1_WR_ACTIVE_SHIFT 18U +#define ROGUE_CR_META_SP_MSLVCTRL1_WR_ACTIVE_CLRMSK 0xFFFBFFFFU +#define ROGUE_CR_META_SP_MSLVCTRL1_WR_ACTIVE_EN 0x00040000U +#define ROGUE_CR_META_SP_MSLVCTRL1_THREAD_SHIFT 4U +#define ROGUE_CR_META_SP_MSLVCTRL1_THREAD_CLRMSK 0xFFFFFFCFU +#define ROGUE_CR_META_SP_MSLVCTRL1_TRANS_SIZE_SHIFT 2U +#define ROGUE_CR_META_SP_MSLVCTRL1_TRANS_SIZE_CLRMSK 0xFFFFFFF3U +#define ROGUE_CR_META_SP_MSLVCTRL1_BYTE_ROUND_SHIFT 0U +#define ROGUE_CR_META_SP_MSLVCTRL1_BYTE_ROUND_CLRMSK 0xFFFFFFFCU + +/* Register ROGUE_CR_META_SP_MSLVHANDSHKE */ +#define ROGUE_CR_META_SP_MSLVHANDSHKE 0x0A50U +#define ROGUE_CR_META_SP_MSLVHANDSHKE_MASKFULL 0x000000000000000FULL +#define ROGUE_CR_META_SP_MSLVHANDSHKE_INPUT_SHIFT 2U +#define ROGUE_CR_META_SP_MSLVHANDSHKE_INPUT_CLRMSK 0xFFFFFFF3U +#define ROGUE_CR_META_SP_MSLVHANDSHKE_OUTPUT_SHIFT 0U +#define ROGUE_CR_META_SP_MSLVHANDSHKE_OUTPUT_CLRMSK 0xFFFFFFFCU + +/* Register ROGUE_CR_META_SP_MSLVT0KICK */ +#define ROGUE_CR_META_SP_MSLVT0KICK 0x0A80U +#define ROGUE_CR_META_SP_MSLVT0KICK_MASKFULL 0x000000000000FFFFULL +#define ROGUE_CR_META_SP_MSLVT0KICK_MSLVT0KICK_SHIFT 0U +#define ROGUE_CR_META_SP_MSLVT0KICK_MSLVT0KICK_CLRMSK 0xFFFF0000U + +/* Register ROGUE_CR_META_SP_MSLVT0KICKI */ +#define ROGUE_CR_META_SP_MSLVT0KICKI 0x0A88U +#define ROGUE_CR_META_SP_MSLVT0KICKI_MASKFULL 0x000000000000FFFFULL +#define ROGUE_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_SHIFT 0U +#define ROGUE_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_CLRMSK 0xFFFF0000U + +/* Register ROGUE_CR_META_SP_MSLVT1KICK */ +#define ROGUE_CR_META_SP_MSLVT1KICK 0x0A90U +#define ROGUE_CR_META_SP_MSLVT1KICK_MASKFULL 0x000000000000FFFFULL +#define ROGUE_CR_META_SP_MSLVT1KICK_MSLVT1KICK_SHIFT 0U +#define ROGUE_CR_META_SP_MSLVT1KICK_MSLVT1KICK_CLRMSK 0xFFFF0000U + +/* Register ROGUE_CR_META_SP_MSLVT1KICKI */ +#define ROGUE_CR_META_SP_MSLVT1KICKI 0x0A98U +#define ROGUE_CR_META_SP_MSLVT1KICKI_MASKFULL 0x000000000000FFFFULL +#define ROGUE_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_SHIFT 0U +#define ROGUE_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_CLRMSK 0xFFFF0000U + +/* Register ROGUE_CR_META_SP_MSLVT2KICK */ +#define ROGUE_CR_META_SP_MSLVT2KICK 0x0AA0U +#define ROGUE_CR_META_SP_MSLVT2KICK_MASKFULL 0x000000000000FFFFULL +#define ROGUE_CR_META_SP_MSLVT2KICK_MSLVT2KICK_SHIFT 0U +#define ROGUE_CR_META_SP_MSLVT2KICK_MSLVT2KICK_CLRMSK 0xFFFF0000U + +/* Register ROGUE_CR_META_SP_MSLVT2KICKI */ +#define ROGUE_CR_META_SP_MSLVT2KICKI 0x0AA8U +#define ROGUE_CR_META_SP_MSLVT2KICKI_MASKFULL 0x000000000000FFFFULL +#define ROGUE_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_SHIFT 0U +#define ROGUE_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_CLRMSK 0xFFFF0000U + +/* Register ROGUE_CR_META_SP_MSLVT3KICK */ +#define ROGUE_CR_META_SP_MSLVT3KICK 0x0AB0U +#define ROGUE_CR_META_SP_MSLVT3KICK_MASKFULL 0x000000000000FFFFULL +#define ROGUE_CR_META_SP_MSLVT3KICK_MSLVT3KICK_SHIFT 0U +#define ROGUE_CR_META_SP_MSLVT3KICK_MSLVT3KICK_CLRMSK 0xFFFF0000U + +/* Register ROGUE_CR_META_SP_MSLVT3KICKI */ +#define ROGUE_CR_META_SP_MSLVT3KICKI 0x0AB8U +#define ROGUE_CR_META_SP_MSLVT3KICKI_MASKFULL 0x000000000000FFFFULL +#define ROGUE_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_SHIFT 0U +#define ROGUE_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_CLRMSK 0xFFFF0000U + +/* Register ROGUE_CR_META_SP_MSLVRST */ +#define ROGUE_CR_META_SP_MSLVRST 0x0AC0U +#define ROGUE_CR_META_SP_MSLVRST_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_META_SP_MSLVRST_SOFTRESET_SHIFT 0U +#define ROGUE_CR_META_SP_MSLVRST_SOFTRESET_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_META_SP_MSLVRST_SOFTRESET_EN 0x00000001U + +/* Register ROGUE_CR_META_SP_MSLVIRQSTATUS */ +#define ROGUE_CR_META_SP_MSLVIRQSTATUS 0x0AC8U +#define ROGUE_CR_META_SP_MSLVIRQSTATUS_MASKFULL 0x000000000000000CULL +#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_SHIFT 3U +#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_EN 0x00000008U +#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_SHIFT 2U +#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN 0x00000004U + +/* Register ROGUE_CR_META_SP_MSLVIRQENABLE */ +#define ROGUE_CR_META_SP_MSLVIRQENABLE 0x0AD0U +#define ROGUE_CR_META_SP_MSLVIRQENABLE_MASKFULL 0x000000000000000CULL +#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT1_SHIFT 3U +#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT1_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT1_EN 0x00000008U +#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT0_SHIFT 2U +#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT0_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_META_SP_MSLVIRQENABLE_EVENT0_EN 0x00000004U + +/* Register ROGUE_CR_META_SP_MSLVIRQLEVEL */ +#define ROGUE_CR_META_SP_MSLVIRQLEVEL 0x0AD8U +#define ROGUE_CR_META_SP_MSLVIRQLEVEL_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_META_SP_MSLVIRQLEVEL_MODE_SHIFT 0U +#define ROGUE_CR_META_SP_MSLVIRQLEVEL_MODE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_META_SP_MSLVIRQLEVEL_MODE_EN 0x00000001U + +/* Register ROGUE_CR_MTS_SCHEDULE */ +#define ROGUE_CR_MTS_SCHEDULE 0x0B00U +#define ROGUE_CR_MTS_SCHEDULE_MASKFULL 0x00000000000001FFULL +#define ROGUE_CR_MTS_SCHEDULE_HOST_SHIFT 8U +#define ROGUE_CR_MTS_SCHEDULE_HOST_CLRMSK 0xFFFFFEFFU +#define ROGUE_CR_MTS_SCHEDULE_HOST_BG_TIMER 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE_HOST_HOST 0x00000100U +#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_SHIFT 6U +#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_CLRMSK 0xFFFFFF3FU +#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_PRT0 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_PRT1 0x00000040U +#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_PRT2 0x00000080U +#define ROGUE_CR_MTS_SCHEDULE_PRIORITY_PRT3 0x000000C0U +#define ROGUE_CR_MTS_SCHEDULE_CONTEXT_SHIFT 5U +#define ROGUE_CR_MTS_SCHEDULE_CONTEXT_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_MTS_SCHEDULE_CONTEXT_BGCTX 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE_CONTEXT_INTCTX 0x00000020U +#define ROGUE_CR_MTS_SCHEDULE_TASK_SHIFT 4U +#define ROGUE_CR_MTS_SCHEDULE_TASK_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_MTS_SCHEDULE_TASK_NON_COUNTED 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE_TASK_COUNTED 0x00000010U +#define ROGUE_CR_MTS_SCHEDULE_DM_SHIFT 0U +#define ROGUE_CR_MTS_SCHEDULE_DM_CLRMSK 0xFFFFFFF0U +#define ROGUE_CR_MTS_SCHEDULE_DM_DM0 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE_DM_DM1 0x00000001U +#define ROGUE_CR_MTS_SCHEDULE_DM_DM2 0x00000002U +#define ROGUE_CR_MTS_SCHEDULE_DM_DM3 0x00000003U +#define ROGUE_CR_MTS_SCHEDULE_DM_DM4 0x00000004U +#define ROGUE_CR_MTS_SCHEDULE_DM_DM5 0x00000005U +#define ROGUE_CR_MTS_SCHEDULE_DM_DM6 0x00000006U +#define ROGUE_CR_MTS_SCHEDULE_DM_DM7 0x00000007U +#define ROGUE_CR_MTS_SCHEDULE_DM_DM_ALL 0x0000000FU + +/* Register ROGUE_CR_MTS_SCHEDULE1 */ +#define ROGUE_CR_MTS_SCHEDULE1 0x10B00U +#define ROGUE_CR_MTS_SCHEDULE1_MASKFULL 0x00000000000001FFULL +#define ROGUE_CR_MTS_SCHEDULE1_HOST_SHIFT 8U +#define ROGUE_CR_MTS_SCHEDULE1_HOST_CLRMSK 0xFFFFFEFFU +#define ROGUE_CR_MTS_SCHEDULE1_HOST_BG_TIMER 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE1_HOST_HOST 0x00000100U +#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_SHIFT 6U +#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_CLRMSK 0xFFFFFF3FU +#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_PRT0 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_PRT1 0x00000040U +#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_PRT2 0x00000080U +#define ROGUE_CR_MTS_SCHEDULE1_PRIORITY_PRT3 0x000000C0U +#define ROGUE_CR_MTS_SCHEDULE1_CONTEXT_SHIFT 5U +#define ROGUE_CR_MTS_SCHEDULE1_CONTEXT_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_MTS_SCHEDULE1_CONTEXT_BGCTX 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE1_CONTEXT_INTCTX 0x00000020U +#define ROGUE_CR_MTS_SCHEDULE1_TASK_SHIFT 4U +#define ROGUE_CR_MTS_SCHEDULE1_TASK_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_MTS_SCHEDULE1_TASK_NON_COUNTED 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE1_TASK_COUNTED 0x00000010U +#define ROGUE_CR_MTS_SCHEDULE1_DM_SHIFT 0U +#define ROGUE_CR_MTS_SCHEDULE1_DM_CLRMSK 0xFFFFFFF0U +#define ROGUE_CR_MTS_SCHEDULE1_DM_DM0 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE1_DM_DM1 0x00000001U +#define ROGUE_CR_MTS_SCHEDULE1_DM_DM2 0x00000002U +#define ROGUE_CR_MTS_SCHEDULE1_DM_DM3 0x00000003U +#define ROGUE_CR_MTS_SCHEDULE1_DM_DM4 0x00000004U +#define ROGUE_CR_MTS_SCHEDULE1_DM_DM5 0x00000005U +#define ROGUE_CR_MTS_SCHEDULE1_DM_DM6 0x00000006U +#define ROGUE_CR_MTS_SCHEDULE1_DM_DM7 0x00000007U +#define ROGUE_CR_MTS_SCHEDULE1_DM_DM_ALL 0x0000000FU + +/* Register ROGUE_CR_MTS_SCHEDULE2 */ +#define ROGUE_CR_MTS_SCHEDULE2 0x20B00U +#define ROGUE_CR_MTS_SCHEDULE2_MASKFULL 0x00000000000001FFULL +#define ROGUE_CR_MTS_SCHEDULE2_HOST_SHIFT 8U +#define ROGUE_CR_MTS_SCHEDULE2_HOST_CLRMSK 0xFFFFFEFFU +#define ROGUE_CR_MTS_SCHEDULE2_HOST_BG_TIMER 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE2_HOST_HOST 0x00000100U +#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_SHIFT 6U +#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_CLRMSK 0xFFFFFF3FU +#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_PRT0 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_PRT1 0x00000040U +#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_PRT2 0x00000080U +#define ROGUE_CR_MTS_SCHEDULE2_PRIORITY_PRT3 0x000000C0U +#define ROGUE_CR_MTS_SCHEDULE2_CONTEXT_SHIFT 5U +#define ROGUE_CR_MTS_SCHEDULE2_CONTEXT_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_MTS_SCHEDULE2_CONTEXT_BGCTX 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE2_CONTEXT_INTCTX 0x00000020U +#define ROGUE_CR_MTS_SCHEDULE2_TASK_SHIFT 4U +#define ROGUE_CR_MTS_SCHEDULE2_TASK_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_MTS_SCHEDULE2_TASK_NON_COUNTED 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE2_TASK_COUNTED 0x00000010U +#define ROGUE_CR_MTS_SCHEDULE2_DM_SHIFT 0U +#define ROGUE_CR_MTS_SCHEDULE2_DM_CLRMSK 0xFFFFFFF0U +#define ROGUE_CR_MTS_SCHEDULE2_DM_DM0 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE2_DM_DM1 0x00000001U +#define ROGUE_CR_MTS_SCHEDULE2_DM_DM2 0x00000002U +#define ROGUE_CR_MTS_SCHEDULE2_DM_DM3 0x00000003U +#define ROGUE_CR_MTS_SCHEDULE2_DM_DM4 0x00000004U +#define ROGUE_CR_MTS_SCHEDULE2_DM_DM5 0x00000005U +#define ROGUE_CR_MTS_SCHEDULE2_DM_DM6 0x00000006U +#define ROGUE_CR_MTS_SCHEDULE2_DM_DM7 0x00000007U +#define ROGUE_CR_MTS_SCHEDULE2_DM_DM_ALL 0x0000000FU + +/* Register ROGUE_CR_MTS_SCHEDULE3 */ +#define ROGUE_CR_MTS_SCHEDULE3 0x30B00U +#define ROGUE_CR_MTS_SCHEDULE3_MASKFULL 0x00000000000001FFULL +#define ROGUE_CR_MTS_SCHEDULE3_HOST_SHIFT 8U +#define ROGUE_CR_MTS_SCHEDULE3_HOST_CLRMSK 0xFFFFFEFFU +#define ROGUE_CR_MTS_SCHEDULE3_HOST_BG_TIMER 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE3_HOST_HOST 0x00000100U +#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_SHIFT 6U +#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_CLRMSK 0xFFFFFF3FU +#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_PRT0 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_PRT1 0x00000040U +#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_PRT2 0x00000080U +#define ROGUE_CR_MTS_SCHEDULE3_PRIORITY_PRT3 0x000000C0U +#define ROGUE_CR_MTS_SCHEDULE3_CONTEXT_SHIFT 5U +#define ROGUE_CR_MTS_SCHEDULE3_CONTEXT_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_MTS_SCHEDULE3_CONTEXT_BGCTX 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE3_CONTEXT_INTCTX 0x00000020U +#define ROGUE_CR_MTS_SCHEDULE3_TASK_SHIFT 4U +#define ROGUE_CR_MTS_SCHEDULE3_TASK_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_MTS_SCHEDULE3_TASK_NON_COUNTED 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE3_TASK_COUNTED 0x00000010U +#define ROGUE_CR_MTS_SCHEDULE3_DM_SHIFT 0U +#define ROGUE_CR_MTS_SCHEDULE3_DM_CLRMSK 0xFFFFFFF0U +#define ROGUE_CR_MTS_SCHEDULE3_DM_DM0 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE3_DM_DM1 0x00000001U +#define ROGUE_CR_MTS_SCHEDULE3_DM_DM2 0x00000002U +#define ROGUE_CR_MTS_SCHEDULE3_DM_DM3 0x00000003U +#define ROGUE_CR_MTS_SCHEDULE3_DM_DM4 0x00000004U +#define ROGUE_CR_MTS_SCHEDULE3_DM_DM5 0x00000005U +#define ROGUE_CR_MTS_SCHEDULE3_DM_DM6 0x00000006U +#define ROGUE_CR_MTS_SCHEDULE3_DM_DM7 0x00000007U +#define ROGUE_CR_MTS_SCHEDULE3_DM_DM_ALL 0x0000000FU + +/* Register ROGUE_CR_MTS_SCHEDULE4 */ +#define ROGUE_CR_MTS_SCHEDULE4 0x40B00U +#define ROGUE_CR_MTS_SCHEDULE4_MASKFULL 0x00000000000001FFULL +#define ROGUE_CR_MTS_SCHEDULE4_HOST_SHIFT 8U +#define ROGUE_CR_MTS_SCHEDULE4_HOST_CLRMSK 0xFFFFFEFFU +#define ROGUE_CR_MTS_SCHEDULE4_HOST_BG_TIMER 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE4_HOST_HOST 0x00000100U +#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_SHIFT 6U +#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_CLRMSK 0xFFFFFF3FU +#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_PRT0 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_PRT1 0x00000040U +#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_PRT2 0x00000080U +#define ROGUE_CR_MTS_SCHEDULE4_PRIORITY_PRT3 0x000000C0U +#define ROGUE_CR_MTS_SCHEDULE4_CONTEXT_SHIFT 5U +#define ROGUE_CR_MTS_SCHEDULE4_CONTEXT_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_MTS_SCHEDULE4_CONTEXT_BGCTX 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE4_CONTEXT_INTCTX 0x00000020U +#define ROGUE_CR_MTS_SCHEDULE4_TASK_SHIFT 4U +#define ROGUE_CR_MTS_SCHEDULE4_TASK_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_MTS_SCHEDULE4_TASK_NON_COUNTED 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE4_TASK_COUNTED 0x00000010U +#define ROGUE_CR_MTS_SCHEDULE4_DM_SHIFT 0U +#define ROGUE_CR_MTS_SCHEDULE4_DM_CLRMSK 0xFFFFFFF0U +#define ROGUE_CR_MTS_SCHEDULE4_DM_DM0 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE4_DM_DM1 0x00000001U +#define ROGUE_CR_MTS_SCHEDULE4_DM_DM2 0x00000002U +#define ROGUE_CR_MTS_SCHEDULE4_DM_DM3 0x00000003U +#define ROGUE_CR_MTS_SCHEDULE4_DM_DM4 0x00000004U +#define ROGUE_CR_MTS_SCHEDULE4_DM_DM5 0x00000005U +#define ROGUE_CR_MTS_SCHEDULE4_DM_DM6 0x00000006U +#define ROGUE_CR_MTS_SCHEDULE4_DM_DM7 0x00000007U +#define ROGUE_CR_MTS_SCHEDULE4_DM_DM_ALL 0x0000000FU + +/* Register ROGUE_CR_MTS_SCHEDULE5 */ +#define ROGUE_CR_MTS_SCHEDULE5 0x50B00U +#define ROGUE_CR_MTS_SCHEDULE5_MASKFULL 0x00000000000001FFULL +#define ROGUE_CR_MTS_SCHEDULE5_HOST_SHIFT 8U +#define ROGUE_CR_MTS_SCHEDULE5_HOST_CLRMSK 0xFFFFFEFFU +#define ROGUE_CR_MTS_SCHEDULE5_HOST_BG_TIMER 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE5_HOST_HOST 0x00000100U +#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_SHIFT 6U +#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_CLRMSK 0xFFFFFF3FU +#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_PRT0 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_PRT1 0x00000040U +#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_PRT2 0x00000080U +#define ROGUE_CR_MTS_SCHEDULE5_PRIORITY_PRT3 0x000000C0U +#define ROGUE_CR_MTS_SCHEDULE5_CONTEXT_SHIFT 5U +#define ROGUE_CR_MTS_SCHEDULE5_CONTEXT_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_MTS_SCHEDULE5_CONTEXT_BGCTX 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE5_CONTEXT_INTCTX 0x00000020U +#define ROGUE_CR_MTS_SCHEDULE5_TASK_SHIFT 4U +#define ROGUE_CR_MTS_SCHEDULE5_TASK_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_MTS_SCHEDULE5_TASK_NON_COUNTED 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE5_TASK_COUNTED 0x00000010U +#define ROGUE_CR_MTS_SCHEDULE5_DM_SHIFT 0U +#define ROGUE_CR_MTS_SCHEDULE5_DM_CLRMSK 0xFFFFFFF0U +#define ROGUE_CR_MTS_SCHEDULE5_DM_DM0 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE5_DM_DM1 0x00000001U +#define ROGUE_CR_MTS_SCHEDULE5_DM_DM2 0x00000002U +#define ROGUE_CR_MTS_SCHEDULE5_DM_DM3 0x00000003U +#define ROGUE_CR_MTS_SCHEDULE5_DM_DM4 0x00000004U +#define ROGUE_CR_MTS_SCHEDULE5_DM_DM5 0x00000005U +#define ROGUE_CR_MTS_SCHEDULE5_DM_DM6 0x00000006U +#define ROGUE_CR_MTS_SCHEDULE5_DM_DM7 0x00000007U +#define ROGUE_CR_MTS_SCHEDULE5_DM_DM_ALL 0x0000000FU + +/* Register ROGUE_CR_MTS_SCHEDULE6 */ +#define ROGUE_CR_MTS_SCHEDULE6 0x60B00U +#define ROGUE_CR_MTS_SCHEDULE6_MASKFULL 0x00000000000001FFULL +#define ROGUE_CR_MTS_SCHEDULE6_HOST_SHIFT 8U +#define ROGUE_CR_MTS_SCHEDULE6_HOST_CLRMSK 0xFFFFFEFFU +#define ROGUE_CR_MTS_SCHEDULE6_HOST_BG_TIMER 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE6_HOST_HOST 0x00000100U +#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_SHIFT 6U +#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_CLRMSK 0xFFFFFF3FU +#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_PRT0 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_PRT1 0x00000040U +#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_PRT2 0x00000080U +#define ROGUE_CR_MTS_SCHEDULE6_PRIORITY_PRT3 0x000000C0U +#define ROGUE_CR_MTS_SCHEDULE6_CONTEXT_SHIFT 5U +#define ROGUE_CR_MTS_SCHEDULE6_CONTEXT_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_MTS_SCHEDULE6_CONTEXT_BGCTX 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE6_CONTEXT_INTCTX 0x00000020U +#define ROGUE_CR_MTS_SCHEDULE6_TASK_SHIFT 4U +#define ROGUE_CR_MTS_SCHEDULE6_TASK_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_MTS_SCHEDULE6_TASK_NON_COUNTED 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE6_TASK_COUNTED 0x00000010U +#define ROGUE_CR_MTS_SCHEDULE6_DM_SHIFT 0U +#define ROGUE_CR_MTS_SCHEDULE6_DM_CLRMSK 0xFFFFFFF0U +#define ROGUE_CR_MTS_SCHEDULE6_DM_DM0 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE6_DM_DM1 0x00000001U +#define ROGUE_CR_MTS_SCHEDULE6_DM_DM2 0x00000002U +#define ROGUE_CR_MTS_SCHEDULE6_DM_DM3 0x00000003U +#define ROGUE_CR_MTS_SCHEDULE6_DM_DM4 0x00000004U +#define ROGUE_CR_MTS_SCHEDULE6_DM_DM5 0x00000005U +#define ROGUE_CR_MTS_SCHEDULE6_DM_DM6 0x00000006U +#define ROGUE_CR_MTS_SCHEDULE6_DM_DM7 0x00000007U +#define ROGUE_CR_MTS_SCHEDULE6_DM_DM_ALL 0x0000000FU + +/* Register ROGUE_CR_MTS_SCHEDULE7 */ +#define ROGUE_CR_MTS_SCHEDULE7 0x70B00U +#define ROGUE_CR_MTS_SCHEDULE7_MASKFULL 0x00000000000001FFULL +#define ROGUE_CR_MTS_SCHEDULE7_HOST_SHIFT 8U +#define ROGUE_CR_MTS_SCHEDULE7_HOST_CLRMSK 0xFFFFFEFFU +#define ROGUE_CR_MTS_SCHEDULE7_HOST_BG_TIMER 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE7_HOST_HOST 0x00000100U +#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_SHIFT 6U +#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_CLRMSK 0xFFFFFF3FU +#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_PRT0 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_PRT1 0x00000040U +#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_PRT2 0x00000080U +#define ROGUE_CR_MTS_SCHEDULE7_PRIORITY_PRT3 0x000000C0U +#define ROGUE_CR_MTS_SCHEDULE7_CONTEXT_SHIFT 5U +#define ROGUE_CR_MTS_SCHEDULE7_CONTEXT_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_MTS_SCHEDULE7_CONTEXT_BGCTX 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE7_CONTEXT_INTCTX 0x00000020U +#define ROGUE_CR_MTS_SCHEDULE7_TASK_SHIFT 4U +#define ROGUE_CR_MTS_SCHEDULE7_TASK_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_MTS_SCHEDULE7_TASK_NON_COUNTED 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE7_TASK_COUNTED 0x00000010U +#define ROGUE_CR_MTS_SCHEDULE7_DM_SHIFT 0U +#define ROGUE_CR_MTS_SCHEDULE7_DM_CLRMSK 0xFFFFFFF0U +#define ROGUE_CR_MTS_SCHEDULE7_DM_DM0 0x00000000U +#define ROGUE_CR_MTS_SCHEDULE7_DM_DM1 0x00000001U +#define ROGUE_CR_MTS_SCHEDULE7_DM_DM2 0x00000002U +#define ROGUE_CR_MTS_SCHEDULE7_DM_DM3 0x00000003U +#define ROGUE_CR_MTS_SCHEDULE7_DM_DM4 0x00000004U +#define ROGUE_CR_MTS_SCHEDULE7_DM_DM5 0x00000005U +#define ROGUE_CR_MTS_SCHEDULE7_DM_DM6 0x00000006U +#define ROGUE_CR_MTS_SCHEDULE7_DM_DM7 0x00000007U +#define ROGUE_CR_MTS_SCHEDULE7_DM_DM_ALL 0x0000000FU + +/* Register ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC */ +#define ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC 0x0B30U +#define ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL 0x000000000000FFFFULL +#define ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT 0U +#define ROGUE_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK 0xFFFF0000U + +/* Register ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC */ +#define ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC 0x0B38U +#define ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL 0x000000000000FFFFULL +#define ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT 0U +#define ROGUE_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK 0xFFFF0000U + +/* Register ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC */ +#define ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC 0x0B40U +#define ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL 0x000000000000FFFFULL +#define ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT 0U +#define ROGUE_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK 0xFFFF0000U + +/* Register ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC */ +#define ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC 0x0B48U +#define ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL 0x000000000000FFFFULL +#define ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT 0U +#define ROGUE_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK 0xFFFF0000U + +/* Register ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG */ +#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG 0x0B50U +#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__MASKFULL 0x000FF0FFFFFFF701ULL +#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_MASKFULL 0x0000FFFFFFFFF001ULL +#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT 44U +#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK 0xFFFF0FFFFFFFFFFFULL +#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_SHIFT 44U +#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_CLRMSK 0xFFF00FFFFFFFFFFFULL +#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT 40U +#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_CLRMSK 0xFFFFF0FFFFFFFFFFULL +#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_SHIFT 12U +#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PERSISTENCE_SHIFT 9U +#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PERSISTENCE_CLRMSK 0xFFFFFFFFFFFFF9FFULL +#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_SHIFT 8U +#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_CLRMSK 0xFFFFFFFFFFFFFEFFULL +#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_EN 0x0000000000000100ULL +#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_SHIFT 0U +#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META 0x0000000000000000ULL +#define ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_MTS 0x0000000000000001ULL + +/* Register ROGUE_CR_MTS_DM0_INTERRUPT_ENABLE */ +#define ROGUE_CR_MTS_DM0_INTERRUPT_ENABLE 0x0B58U +#define ROGUE_CR_MTS_DM0_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U +#define ROGUE_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_MTS_DM1_INTERRUPT_ENABLE */ +#define ROGUE_CR_MTS_DM1_INTERRUPT_ENABLE 0x0B60U +#define ROGUE_CR_MTS_DM1_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U +#define ROGUE_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_MTS_DM2_INTERRUPT_ENABLE */ +#define ROGUE_CR_MTS_DM2_INTERRUPT_ENABLE 0x0B68U +#define ROGUE_CR_MTS_DM2_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U +#define ROGUE_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_MTS_DM3_INTERRUPT_ENABLE */ +#define ROGUE_CR_MTS_DM3_INTERRUPT_ENABLE 0x0B70U +#define ROGUE_CR_MTS_DM3_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U +#define ROGUE_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_MTS_DM4_INTERRUPT_ENABLE */ +#define ROGUE_CR_MTS_DM4_INTERRUPT_ENABLE 0x0B78U +#define ROGUE_CR_MTS_DM4_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U +#define ROGUE_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_MTS_DM5_INTERRUPT_ENABLE */ +#define ROGUE_CR_MTS_DM5_INTERRUPT_ENABLE 0x0B80U +#define ROGUE_CR_MTS_DM5_INTERRUPT_ENABLE_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_SHIFT 0U +#define ROGUE_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_MTS_INTCTX */ +#define ROGUE_CR_MTS_INTCTX 0x0B98U +#define ROGUE_CR_MTS_INTCTX_MASKFULL 0x000000003FFFFFFFULL +#define ROGUE_CR_MTS_INTCTX_DM_HOST_SCHEDULE_SHIFT 22U +#define ROGUE_CR_MTS_INTCTX_DM_HOST_SCHEDULE_CLRMSK 0xC03FFFFFU +#define ROGUE_CR_MTS_INTCTX_DM_PTR_SHIFT 18U +#define ROGUE_CR_MTS_INTCTX_DM_PTR_CLRMSK 0xFFC3FFFFU +#define ROGUE_CR_MTS_INTCTX_THREAD_ACTIVE_SHIFT 16U +#define ROGUE_CR_MTS_INTCTX_THREAD_ACTIVE_CLRMSK 0xFFFCFFFFU +#define ROGUE_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_SHIFT 8U +#define ROGUE_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_CLRMSK 0xFFFF00FFU +#define ROGUE_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_SHIFT 0U +#define ROGUE_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_MTS_BGCTX */ +#define ROGUE_CR_MTS_BGCTX 0x0BA0U +#define ROGUE_CR_MTS_BGCTX_MASKFULL 0x0000000000003FFFULL +#define ROGUE_CR_MTS_BGCTX_DM_PTR_SHIFT 10U +#define ROGUE_CR_MTS_BGCTX_DM_PTR_CLRMSK 0xFFFFC3FFU +#define ROGUE_CR_MTS_BGCTX_THREAD_ACTIVE_SHIFT 8U +#define ROGUE_CR_MTS_BGCTX_THREAD_ACTIVE_CLRMSK 0xFFFFFCFFU +#define ROGUE_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_SHIFT 0U +#define ROGUE_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE */ +#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE 0x0BA8U +#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_MASKFULL 0xFFFFFFFFFFFFFFFFULL +#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_SHIFT 56U +#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_CLRMSK 0x00FFFFFFFFFFFFFFULL +#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_SHIFT 48U +#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_CLRMSK 0xFF00FFFFFFFFFFFFULL +#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_SHIFT 40U +#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_CLRMSK 0xFFFF00FFFFFFFFFFULL +#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_SHIFT 32U +#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_CLRMSK 0xFFFFFF00FFFFFFFFULL +#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_SHIFT 24U +#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_CLRMSK 0xFFFFFFFF00FFFFFFULL +#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_SHIFT 16U +#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_CLRMSK 0xFFFFFFFFFF00FFFFULL +#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_SHIFT 8U +#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_CLRMSK 0xFFFFFFFFFFFF00FFULL +#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_SHIFT 0U +#define ROGUE_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_CLRMSK 0xFFFFFFFFFFFFFF00ULL + +/* Register ROGUE_CR_MTS_GPU_INT_STATUS */ +#define ROGUE_CR_MTS_GPU_INT_STATUS 0x0BB0U +#define ROGUE_CR_MTS_GPU_INT_STATUS_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_MTS_GPU_INT_STATUS_STATUS_SHIFT 0U +#define ROGUE_CR_MTS_GPU_INT_STATUS_STATUS_CLRMSK 0x00000000U + +/* Register ROGUE_CR_MTS_SCHEDULE_ENABLE */ +#define ROGUE_CR_MTS_SCHEDULE_ENABLE 0x0BC8U +#define ROGUE_CR_MTS_SCHEDULE_ENABLE_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_MTS_SCHEDULE_ENABLE_MASK_SHIFT 0U +#define ROGUE_CR_MTS_SCHEDULE_ENABLE_MASK_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_IRQ_OS0_EVENT_STATUS */ +#define ROGUE_CR_IRQ_OS0_EVENT_STATUS 0x0BD8U +#define ROGUE_CR_IRQ_OS0_EVENT_STATUS_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_IRQ_OS0_EVENT_STATUS_SOURCE_SHIFT 0U +#define ROGUE_CR_IRQ_OS0_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN 0x00000001U + +/* Register ROGUE_CR_IRQ_OS0_EVENT_CLEAR */ +#define ROGUE_CR_IRQ_OS0_EVENT_CLEAR 0x0BE8U +#define ROGUE_CR_IRQ_OS0_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_SHIFT 0U +#define ROGUE_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN 0x00000001U + +/* Register ROGUE_CR_IRQ_OS1_EVENT_STATUS */ +#define ROGUE_CR_IRQ_OS1_EVENT_STATUS 0x10BD8U +#define ROGUE_CR_IRQ_OS1_EVENT_STATUS_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_IRQ_OS1_EVENT_STATUS_SOURCE_SHIFT 0U +#define ROGUE_CR_IRQ_OS1_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_IRQ_OS1_EVENT_STATUS_SOURCE_EN 0x00000001U + +/* Register ROGUE_CR_IRQ_OS1_EVENT_CLEAR */ +#define ROGUE_CR_IRQ_OS1_EVENT_CLEAR 0x10BE8U +#define ROGUE_CR_IRQ_OS1_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_SHIFT 0U +#define ROGUE_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_EN 0x00000001U + +/* Register ROGUE_CR_IRQ_OS2_EVENT_STATUS */ +#define ROGUE_CR_IRQ_OS2_EVENT_STATUS 0x20BD8U +#define ROGUE_CR_IRQ_OS2_EVENT_STATUS_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_IRQ_OS2_EVENT_STATUS_SOURCE_SHIFT 0U +#define ROGUE_CR_IRQ_OS2_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_IRQ_OS2_EVENT_STATUS_SOURCE_EN 0x00000001U + +/* Register ROGUE_CR_IRQ_OS2_EVENT_CLEAR */ +#define ROGUE_CR_IRQ_OS2_EVENT_CLEAR 0x20BE8U +#define ROGUE_CR_IRQ_OS2_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_SHIFT 0U +#define ROGUE_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_EN 0x00000001U + +/* Register ROGUE_CR_IRQ_OS3_EVENT_STATUS */ +#define ROGUE_CR_IRQ_OS3_EVENT_STATUS 0x30BD8U +#define ROGUE_CR_IRQ_OS3_EVENT_STATUS_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_IRQ_OS3_EVENT_STATUS_SOURCE_SHIFT 0U +#define ROGUE_CR_IRQ_OS3_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_IRQ_OS3_EVENT_STATUS_SOURCE_EN 0x00000001U + +/* Register ROGUE_CR_IRQ_OS3_EVENT_CLEAR */ +#define ROGUE_CR_IRQ_OS3_EVENT_CLEAR 0x30BE8U +#define ROGUE_CR_IRQ_OS3_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_SHIFT 0U +#define ROGUE_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_EN 0x00000001U + +/* Register ROGUE_CR_IRQ_OS4_EVENT_STATUS */ +#define ROGUE_CR_IRQ_OS4_EVENT_STATUS 0x40BD8U +#define ROGUE_CR_IRQ_OS4_EVENT_STATUS_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_IRQ_OS4_EVENT_STATUS_SOURCE_SHIFT 0U +#define ROGUE_CR_IRQ_OS4_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_IRQ_OS4_EVENT_STATUS_SOURCE_EN 0x00000001U + +/* Register ROGUE_CR_IRQ_OS4_EVENT_CLEAR */ +#define ROGUE_CR_IRQ_OS4_EVENT_CLEAR 0x40BE8U +#define ROGUE_CR_IRQ_OS4_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_SHIFT 0U +#define ROGUE_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_EN 0x00000001U + +/* Register ROGUE_CR_IRQ_OS5_EVENT_STATUS */ +#define ROGUE_CR_IRQ_OS5_EVENT_STATUS 0x50BD8U +#define ROGUE_CR_IRQ_OS5_EVENT_STATUS_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_IRQ_OS5_EVENT_STATUS_SOURCE_SHIFT 0U +#define ROGUE_CR_IRQ_OS5_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_IRQ_OS5_EVENT_STATUS_SOURCE_EN 0x00000001U + +/* Register ROGUE_CR_IRQ_OS5_EVENT_CLEAR */ +#define ROGUE_CR_IRQ_OS5_EVENT_CLEAR 0x50BE8U +#define ROGUE_CR_IRQ_OS5_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_SHIFT 0U +#define ROGUE_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_EN 0x00000001U + +/* Register ROGUE_CR_IRQ_OS6_EVENT_STATUS */ +#define ROGUE_CR_IRQ_OS6_EVENT_STATUS 0x60BD8U +#define ROGUE_CR_IRQ_OS6_EVENT_STATUS_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_IRQ_OS6_EVENT_STATUS_SOURCE_SHIFT 0U +#define ROGUE_CR_IRQ_OS6_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_IRQ_OS6_EVENT_STATUS_SOURCE_EN 0x00000001U + +/* Register ROGUE_CR_IRQ_OS6_EVENT_CLEAR */ +#define ROGUE_CR_IRQ_OS6_EVENT_CLEAR 0x60BE8U +#define ROGUE_CR_IRQ_OS6_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_SHIFT 0U +#define ROGUE_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_EN 0x00000001U + +/* Register ROGUE_CR_IRQ_OS7_EVENT_STATUS */ +#define ROGUE_CR_IRQ_OS7_EVENT_STATUS 0x70BD8U +#define ROGUE_CR_IRQ_OS7_EVENT_STATUS_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_IRQ_OS7_EVENT_STATUS_SOURCE_SHIFT 0U +#define ROGUE_CR_IRQ_OS7_EVENT_STATUS_SOURCE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_IRQ_OS7_EVENT_STATUS_SOURCE_EN 0x00000001U + +/* Register ROGUE_CR_IRQ_OS7_EVENT_CLEAR */ +#define ROGUE_CR_IRQ_OS7_EVENT_CLEAR 0x70BE8U +#define ROGUE_CR_IRQ_OS7_EVENT_CLEAR_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_SHIFT 0U +#define ROGUE_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_EN 0x00000001U + +/* Register ROGUE_CR_META_BOOT */ +#define ROGUE_CR_META_BOOT 0x0BF8U +#define ROGUE_CR_META_BOOT_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_META_BOOT_MODE_SHIFT 0U +#define ROGUE_CR_META_BOOT_MODE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_META_BOOT_MODE_EN 0x00000001U + +/* Register ROGUE_CR_GARTEN_SLC */ +#define ROGUE_CR_GARTEN_SLC 0x0BB8U +#define ROGUE_CR_GARTEN_SLC_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_GARTEN_SLC_FORCE_COHERENCY_SHIFT 0U +#define ROGUE_CR_GARTEN_SLC_FORCE_COHERENCY_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_GARTEN_SLC_FORCE_COHERENCY_EN 0x00000001U + +/* Register ROGUE_CR_PPP */ +#define ROGUE_CR_PPP 0x0CD0U +#define ROGUE_CR_PPP_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PPP_CHECKSUM_SHIFT 0U +#define ROGUE_CR_PPP_CHECKSUM_CLRMSK 0x00000000U + +#define ROGUE_CR_ISP_RENDER_DIR_TYPE_MASK 0x00000003U +/* Top-left to bottom-right */ +#define ROGUE_CR_ISP_RENDER_DIR_TYPE_TL2BR 0x00000000U +/* Top-right to bottom-left */ +#define ROGUE_CR_ISP_RENDER_DIR_TYPE_TR2BL 0x00000001U +/* Bottom-left to top-right */ +#define ROGUE_CR_ISP_RENDER_DIR_TYPE_BL2TR 0x00000002U +/* Bottom-right to top-left */ +#define ROGUE_CR_ISP_RENDER_DIR_TYPE_BR2TL 0x00000003U + +#define ROGUE_CR_ISP_RENDER_MODE_TYPE_MASK 0x00000003U +/* Normal render */ +#define ROGUE_CR_ISP_RENDER_MODE_TYPE_NORM 0x00000000U +/* Fast 2D render */ +#define ROGUE_CR_ISP_RENDER_MODE_TYPE_FAST_2D 0x00000002U +/* Fast scale render */ +#define ROGUE_CR_ISP_RENDER_MODE_TYPE_FAST_SCALE 0x00000003U + +/* Register ROGUE_CR_ISP_RENDER */ +#define ROGUE_CR_ISP_RENDER 0x0F08U +#define ROGUE_CR_ISP_RENDER_MASKFULL 0x00000000000001FFULL +#define ROGUE_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_SHIFT 8U +#define ROGUE_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_CLRMSK 0xFFFFFEFFU +#define ROGUE_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_EN 0x00000100U +#define ROGUE_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_SHIFT 7U +#define ROGUE_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_CLRMSK 0xFFFFFF7FU +#define ROGUE_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_EN 0x00000080U +#define ROGUE_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_SHIFT 6U +#define ROGUE_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_CLRMSK 0xFFFFFFBFU +#define ROGUE_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_EN 0x00000040U +#define ROGUE_CR_ISP_RENDER_DISABLE_EOMT_SHIFT 5U +#define ROGUE_CR_ISP_RENDER_DISABLE_EOMT_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_ISP_RENDER_DISABLE_EOMT_EN 0x00000020U +#define ROGUE_CR_ISP_RENDER_RESUME_SHIFT 4U +#define ROGUE_CR_ISP_RENDER_RESUME_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_ISP_RENDER_RESUME_EN 0x00000010U +#define ROGUE_CR_ISP_RENDER_DIR_SHIFT 2U +#define ROGUE_CR_ISP_RENDER_DIR_CLRMSK 0xFFFFFFF3U +#define ROGUE_CR_ISP_RENDER_DIR_TL2BR 0x00000000U +#define ROGUE_CR_ISP_RENDER_DIR_TR2BL 0x00000004U +#define ROGUE_CR_ISP_RENDER_DIR_BL2TR 0x00000008U +#define ROGUE_CR_ISP_RENDER_DIR_BR2TL 0x0000000CU +#define ROGUE_CR_ISP_RENDER_MODE_SHIFT 0U +#define ROGUE_CR_ISP_RENDER_MODE_CLRMSK 0xFFFFFFFCU +#define ROGUE_CR_ISP_RENDER_MODE_NORM 0x00000000U +#define ROGUE_CR_ISP_RENDER_MODE_FAST_2D 0x00000002U +#define ROGUE_CR_ISP_RENDER_MODE_FAST_SCALE 0x00000003U + +/* Register ROGUE_CR_ISP_CTL */ +#define ROGUE_CR_ISP_CTL 0x0F38U +#define ROGUE_CR_ISP_CTL_MASKFULL 0x00000000FFFFF3FFULL +#define ROGUE_CR_ISP_CTL_SKIP_INIT_HDRS_SHIFT 31U +#define ROGUE_CR_ISP_CTL_SKIP_INIT_HDRS_CLRMSK 0x7FFFFFFFU +#define ROGUE_CR_ISP_CTL_SKIP_INIT_HDRS_EN 0x80000000U +#define ROGUE_CR_ISP_CTL_LINE_STYLE_SHIFT 30U +#define ROGUE_CR_ISP_CTL_LINE_STYLE_CLRMSK 0xBFFFFFFFU +#define ROGUE_CR_ISP_CTL_LINE_STYLE_EN 0x40000000U +#define ROGUE_CR_ISP_CTL_LINE_STYLE_PIX_SHIFT 29U +#define ROGUE_CR_ISP_CTL_LINE_STYLE_PIX_CLRMSK 0xDFFFFFFFU +#define ROGUE_CR_ISP_CTL_LINE_STYLE_PIX_EN 0x20000000U +#define ROGUE_CR_ISP_CTL_PAIR_TILES_VERT_SHIFT 28U +#define ROGUE_CR_ISP_CTL_PAIR_TILES_VERT_CLRMSK 0xEFFFFFFFU +#define ROGUE_CR_ISP_CTL_PAIR_TILES_VERT_EN 0x10000000U +#define ROGUE_CR_ISP_CTL_PAIR_TILES_SHIFT 27U +#define ROGUE_CR_ISP_CTL_PAIR_TILES_CLRMSK 0xF7FFFFFFU +#define ROGUE_CR_ISP_CTL_PAIR_TILES_EN 0x08000000U +#define ROGUE_CR_ISP_CTL_CREQ_BUF_EN_SHIFT 26U +#define ROGUE_CR_ISP_CTL_CREQ_BUF_EN_CLRMSK 0xFBFFFFFFU +#define ROGUE_CR_ISP_CTL_CREQ_BUF_EN_EN 0x04000000U +#define ROGUE_CR_ISP_CTL_TILE_AGE_EN_SHIFT 25U +#define ROGUE_CR_ISP_CTL_TILE_AGE_EN_CLRMSK 0xFDFFFFFFU +#define ROGUE_CR_ISP_CTL_TILE_AGE_EN_EN 0x02000000U +#define ROGUE_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_SHIFT 23U +#define ROGUE_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_CLRMSK 0xFE7FFFFFU +#define ROGUE_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX9 0x00000000U +#define ROGUE_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX10 0x00800000U +#define ROGUE_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_OGL 0x01000000U +#define ROGUE_CR_ISP_CTL_NUM_TILES_PER_USC_SHIFT 21U +#define ROGUE_CR_ISP_CTL_NUM_TILES_PER_USC_CLRMSK 0xFF9FFFFFU +#define ROGUE_CR_ISP_CTL_DBIAS_IS_INT_SHIFT 20U +#define ROGUE_CR_ISP_CTL_DBIAS_IS_INT_CLRMSK 0xFFEFFFFFU +#define ROGUE_CR_ISP_CTL_DBIAS_IS_INT_EN 0x00100000U +#define ROGUE_CR_ISP_CTL_OVERLAP_CHECK_MODE_SHIFT 19U +#define ROGUE_CR_ISP_CTL_OVERLAP_CHECK_MODE_CLRMSK 0xFFF7FFFFU +#define ROGUE_CR_ISP_CTL_OVERLAP_CHECK_MODE_EN 0x00080000U +#define ROGUE_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_SHIFT 18U +#define ROGUE_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_CLRMSK 0xFFFBFFFFU +#define ROGUE_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_EN 0x00040000U +#define ROGUE_CR_ISP_CTL_PROCESS_EMPTY_TILES_SHIFT 17U +#define ROGUE_CR_ISP_CTL_PROCESS_EMPTY_TILES_CLRMSK 0xFFFDFFFFU +#define ROGUE_CR_ISP_CTL_PROCESS_EMPTY_TILES_EN 0x00020000U +#define ROGUE_CR_ISP_CTL_SAMPLE_POS_SHIFT 16U +#define ROGUE_CR_ISP_CTL_SAMPLE_POS_CLRMSK 0xFFFEFFFFU +#define ROGUE_CR_ISP_CTL_SAMPLE_POS_EN 0x00010000U +#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_SHIFT 12U +#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_CLRMSK 0xFFFF0FFFU +#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_ONE 0x00000000U +#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWO 0x00001000U +#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_THREE 0x00002000U +#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOUR 0x00003000U +#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIVE 0x00004000U +#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIX 0x00005000U +#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_SEVEN 0x00006000U +#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_EIGHT 0x00007000U +#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_NINE 0x00008000U +#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_TEN 0x00009000U +#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_ELEVEN 0x0000A000U +#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWELVE 0x0000B000U +#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_THIRTEEN 0x0000C000U +#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOURTEEN 0x0000D000U +#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIFTEEN 0x0000E000U +#define ROGUE_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIXTEEN 0x0000F000U +#define ROGUE_CR_ISP_CTL_VALID_ID_SHIFT 4U +#define ROGUE_CR_ISP_CTL_VALID_ID_CLRMSK 0xFFFFFC0FU +#define ROGUE_CR_ISP_CTL_UPASS_START_SHIFT 0U +#define ROGUE_CR_ISP_CTL_UPASS_START_CLRMSK 0xFFFFFFF0U + +/* Register ROGUE_CR_ISP_STATUS */ +#define ROGUE_CR_ISP_STATUS 0x1038U +#define ROGUE_CR_ISP_STATUS_MASKFULL 0x0000000000000007ULL +#define ROGUE_CR_ISP_STATUS_SPLIT_MAX_SHIFT 2U +#define ROGUE_CR_ISP_STATUS_SPLIT_MAX_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_ISP_STATUS_SPLIT_MAX_EN 0x00000004U +#define ROGUE_CR_ISP_STATUS_ACTIVE_SHIFT 1U +#define ROGUE_CR_ISP_STATUS_ACTIVE_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_ISP_STATUS_ACTIVE_EN 0x00000002U +#define ROGUE_CR_ISP_STATUS_EOR_SHIFT 0U +#define ROGUE_CR_ISP_STATUS_EOR_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_ISP_STATUS_EOR_EN 0x00000001U + +/* Register group: ROGUE_CR_ISP_XTP_RESUME, with 64 repeats */ +#define ROGUE_CR_ISP_XTP_RESUME_REPEATCOUNT 64U +/* Register ROGUE_CR_ISP_XTP_RESUME0 */ +#define ROGUE_CR_ISP_XTP_RESUME0 0x3A00U +#define ROGUE_CR_ISP_XTP_RESUME0_MASKFULL 0x00000000003FF3FFULL +#define ROGUE_CR_ISP_XTP_RESUME0_TILE_X_SHIFT 12U +#define ROGUE_CR_ISP_XTP_RESUME0_TILE_X_CLRMSK 0xFFC00FFFU +#define ROGUE_CR_ISP_XTP_RESUME0_TILE_Y_SHIFT 0U +#define ROGUE_CR_ISP_XTP_RESUME0_TILE_Y_CLRMSK 0xFFFFFC00U + +/* Register group: ROGUE_CR_ISP_XTP_STORE, with 32 repeats */ +#define ROGUE_CR_ISP_XTP_STORE_REPEATCOUNT 32U +/* Register ROGUE_CR_ISP_XTP_STORE0 */ +#define ROGUE_CR_ISP_XTP_STORE0 0x3C00U +#define ROGUE_CR_ISP_XTP_STORE0_MASKFULL 0x000000007F3FF3FFULL +#define ROGUE_CR_ISP_XTP_STORE0_ACTIVE_SHIFT 30U +#define ROGUE_CR_ISP_XTP_STORE0_ACTIVE_CLRMSK 0xBFFFFFFFU +#define ROGUE_CR_ISP_XTP_STORE0_ACTIVE_EN 0x40000000U +#define ROGUE_CR_ISP_XTP_STORE0_EOR_SHIFT 29U +#define ROGUE_CR_ISP_XTP_STORE0_EOR_CLRMSK 0xDFFFFFFFU +#define ROGUE_CR_ISP_XTP_STORE0_EOR_EN 0x20000000U +#define ROGUE_CR_ISP_XTP_STORE0_TILE_LAST_SHIFT 28U +#define ROGUE_CR_ISP_XTP_STORE0_TILE_LAST_CLRMSK 0xEFFFFFFFU +#define ROGUE_CR_ISP_XTP_STORE0_TILE_LAST_EN 0x10000000U +#define ROGUE_CR_ISP_XTP_STORE0_MT_SHIFT 24U +#define ROGUE_CR_ISP_XTP_STORE0_MT_CLRMSK 0xF0FFFFFFU +#define ROGUE_CR_ISP_XTP_STORE0_TILE_X_SHIFT 12U +#define ROGUE_CR_ISP_XTP_STORE0_TILE_X_CLRMSK 0xFFC00FFFU +#define ROGUE_CR_ISP_XTP_STORE0_TILE_Y_SHIFT 0U +#define ROGUE_CR_ISP_XTP_STORE0_TILE_Y_CLRMSK 0xFFFFFC00U + +/* Register group: ROGUE_CR_BIF_CAT_BASE, with 8 repeats */ +#define ROGUE_CR_BIF_CAT_BASE_REPEATCOUNT 8U +/* Register ROGUE_CR_BIF_CAT_BASE0 */ +#define ROGUE_CR_BIF_CAT_BASE0 0x1200U +#define ROGUE_CR_BIF_CAT_BASE0_MASKFULL 0x000000FFFFFFF000ULL +#define ROGUE_CR_BIF_CAT_BASE0_ADDR_SHIFT 12U +#define ROGUE_CR_BIF_CAT_BASE0_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_CAT_BASE0_ADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_BIF_CAT_BASE1 */ +#define ROGUE_CR_BIF_CAT_BASE1 0x1208U +#define ROGUE_CR_BIF_CAT_BASE1_MASKFULL 0x000000FFFFFFF000ULL +#define ROGUE_CR_BIF_CAT_BASE1_ADDR_SHIFT 12U +#define ROGUE_CR_BIF_CAT_BASE1_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_BIF_CAT_BASE1_ADDR_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_CAT_BASE1_ADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_BIF_CAT_BASE2 */ +#define ROGUE_CR_BIF_CAT_BASE2 0x1210U +#define ROGUE_CR_BIF_CAT_BASE2_MASKFULL 0x000000FFFFFFF000ULL +#define ROGUE_CR_BIF_CAT_BASE2_ADDR_SHIFT 12U +#define ROGUE_CR_BIF_CAT_BASE2_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_BIF_CAT_BASE2_ADDR_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_CAT_BASE2_ADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_BIF_CAT_BASE3 */ +#define ROGUE_CR_BIF_CAT_BASE3 0x1218U +#define ROGUE_CR_BIF_CAT_BASE3_MASKFULL 0x000000FFFFFFF000ULL +#define ROGUE_CR_BIF_CAT_BASE3_ADDR_SHIFT 12U +#define ROGUE_CR_BIF_CAT_BASE3_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_BIF_CAT_BASE3_ADDR_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_CAT_BASE3_ADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_BIF_CAT_BASE4 */ +#define ROGUE_CR_BIF_CAT_BASE4 0x1220U +#define ROGUE_CR_BIF_CAT_BASE4_MASKFULL 0x000000FFFFFFF000ULL +#define ROGUE_CR_BIF_CAT_BASE4_ADDR_SHIFT 12U +#define ROGUE_CR_BIF_CAT_BASE4_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_BIF_CAT_BASE4_ADDR_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_CAT_BASE4_ADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_BIF_CAT_BASE5 */ +#define ROGUE_CR_BIF_CAT_BASE5 0x1228U +#define ROGUE_CR_BIF_CAT_BASE5_MASKFULL 0x000000FFFFFFF000ULL +#define ROGUE_CR_BIF_CAT_BASE5_ADDR_SHIFT 12U +#define ROGUE_CR_BIF_CAT_BASE5_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_BIF_CAT_BASE5_ADDR_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_CAT_BASE5_ADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_BIF_CAT_BASE6 */ +#define ROGUE_CR_BIF_CAT_BASE6 0x1230U +#define ROGUE_CR_BIF_CAT_BASE6_MASKFULL 0x000000FFFFFFF000ULL +#define ROGUE_CR_BIF_CAT_BASE6_ADDR_SHIFT 12U +#define ROGUE_CR_BIF_CAT_BASE6_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_BIF_CAT_BASE6_ADDR_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_CAT_BASE6_ADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_BIF_CAT_BASE7 */ +#define ROGUE_CR_BIF_CAT_BASE7 0x1238U +#define ROGUE_CR_BIF_CAT_BASE7_MASKFULL 0x000000FFFFFFF000ULL +#define ROGUE_CR_BIF_CAT_BASE7_ADDR_SHIFT 12U +#define ROGUE_CR_BIF_CAT_BASE7_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_BIF_CAT_BASE7_ADDR_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_CAT_BASE7_ADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_BIF_CAT_BASE_INDEX */ +#define ROGUE_CR_BIF_CAT_BASE_INDEX 0x1240U +#define ROGUE_CR_BIF_CAT_BASE_INDEX_MASKFULL 0x00070707073F0707ULL +#define ROGUE_CR_BIF_CAT_BASE_INDEX_RVTX_SHIFT 48U +#define ROGUE_CR_BIF_CAT_BASE_INDEX_RVTX_CLRMSK 0xFFF8FFFFFFFFFFFFULL +#define ROGUE_CR_BIF_CAT_BASE_INDEX_RAY_SHIFT 40U +#define ROGUE_CR_BIF_CAT_BASE_INDEX_RAY_CLRMSK 0xFFFFF8FFFFFFFFFFULL +#define ROGUE_CR_BIF_CAT_BASE_INDEX_HOST_SHIFT 32U +#define ROGUE_CR_BIF_CAT_BASE_INDEX_HOST_CLRMSK 0xFFFFFFF8FFFFFFFFULL +#define ROGUE_CR_BIF_CAT_BASE_INDEX_TLA_SHIFT 24U +#define ROGUE_CR_BIF_CAT_BASE_INDEX_TLA_CLRMSK 0xFFFFFFFFF8FFFFFFULL +#define ROGUE_CR_BIF_CAT_BASE_INDEX_TDM_SHIFT 19U +#define ROGUE_CR_BIF_CAT_BASE_INDEX_TDM_CLRMSK 0xFFFFFFFFFFC7FFFFULL +#define ROGUE_CR_BIF_CAT_BASE_INDEX_CDM_SHIFT 16U +#define ROGUE_CR_BIF_CAT_BASE_INDEX_CDM_CLRMSK 0xFFFFFFFFFFF8FFFFULL +#define ROGUE_CR_BIF_CAT_BASE_INDEX_PIXEL_SHIFT 8U +#define ROGUE_CR_BIF_CAT_BASE_INDEX_PIXEL_CLRMSK 0xFFFFFFFFFFFFF8FFULL +#define ROGUE_CR_BIF_CAT_BASE_INDEX_TA_SHIFT 0U +#define ROGUE_CR_BIF_CAT_BASE_INDEX_TA_CLRMSK 0xFFFFFFFFFFFFFFF8ULL + +/* Register ROGUE_CR_BIF_PM_CAT_BASE_VCE0 */ +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0 0x1248U +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_MASKFULL 0x0FFFFFFFFFFFF003ULL +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_SHIFT 40U +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_ADDR_SHIFT 12U +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_WRAP_SHIFT 1U +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_WRAP_EN 0x0000000000000002ULL +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_VALID_SHIFT 0U +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE0_VALID_EN 0x0000000000000001ULL + +/* Register ROGUE_CR_BIF_PM_CAT_BASE_TE0 */ +#define ROGUE_CR_BIF_PM_CAT_BASE_TE0 0x1250U +#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_MASKFULL 0x0FFFFFFFFFFFF003ULL +#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_SHIFT 40U +#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL +#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_ADDR_SHIFT 12U +#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_WRAP_SHIFT 1U +#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL +#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_WRAP_EN 0x0000000000000002ULL +#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_VALID_SHIFT 0U +#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_BIF_PM_CAT_BASE_TE0_VALID_EN 0x0000000000000001ULL + +/* Register ROGUE_CR_BIF_PM_CAT_BASE_ALIST0 */ +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0 0x1260U +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_MASKFULL 0x0FFFFFFFFFFFF003ULL +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_SHIFT 40U +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_SHIFT 12U +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_SHIFT 1U +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_EN 0x0000000000000002ULL +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_VALID_SHIFT 0U +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST0_VALID_EN 0x0000000000000001ULL + +/* Register ROGUE_CR_BIF_PM_CAT_BASE_VCE1 */ +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1 0x1268U +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_MASKFULL 0x0FFFFFFFFFFFF003ULL +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_SHIFT 40U +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_ADDR_SHIFT 12U +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_WRAP_SHIFT 1U +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_WRAP_EN 0x0000000000000002ULL +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_VALID_SHIFT 0U +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_BIF_PM_CAT_BASE_VCE1_VALID_EN 0x0000000000000001ULL + +/* Register ROGUE_CR_BIF_PM_CAT_BASE_TE1 */ +#define ROGUE_CR_BIF_PM_CAT_BASE_TE1 0x1270U +#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_MASKFULL 0x0FFFFFFFFFFFF003ULL +#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_SHIFT 40U +#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL +#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_ADDR_SHIFT 12U +#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_WRAP_SHIFT 1U +#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL +#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_WRAP_EN 0x0000000000000002ULL +#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_VALID_SHIFT 0U +#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_BIF_PM_CAT_BASE_TE1_VALID_EN 0x0000000000000001ULL + +/* Register ROGUE_CR_BIF_PM_CAT_BASE_ALIST1 */ +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1 0x1280U +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_MASKFULL 0x0FFFFFFFFFFFF003ULL +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_SHIFT 40U +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_CLRMSK 0xF00000FFFFFFFFFFULL +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_SHIFT 12U +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_SHIFT 1U +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_CLRMSK 0xFFFFFFFFFFFFFFFDULL +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_EN 0x0000000000000002ULL +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_VALID_SHIFT 0U +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_VALID_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_BIF_PM_CAT_BASE_ALIST1_VALID_EN 0x0000000000000001ULL + +/* Register ROGUE_CR_BIF_MMU_ENTRY_STATUS */ +#define ROGUE_CR_BIF_MMU_ENTRY_STATUS 0x1288U +#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_MASKFULL 0x000000FFFFFFF0F3ULL +#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_SHIFT 12U +#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT 4U +#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK 0xFFFFFFFFFFFFFF0FULL +#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT 0U +#define ROGUE_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK 0xFFFFFFFFFFFFFFFCULL + +/* Register ROGUE_CR_BIF_MMU_ENTRY */ +#define ROGUE_CR_BIF_MMU_ENTRY 0x1290U +#define ROGUE_CR_BIF_MMU_ENTRY_MASKFULL 0x0000000000000003ULL +#define ROGUE_CR_BIF_MMU_ENTRY_ENABLE_SHIFT 1U +#define ROGUE_CR_BIF_MMU_ENTRY_ENABLE_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_BIF_MMU_ENTRY_ENABLE_EN 0x00000002U +#define ROGUE_CR_BIF_MMU_ENTRY_PENDING_SHIFT 0U +#define ROGUE_CR_BIF_MMU_ENTRY_PENDING_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_BIF_MMU_ENTRY_PENDING_EN 0x00000001U + +/* Register ROGUE_CR_BIF_CTRL_INVAL */ +#define ROGUE_CR_BIF_CTRL_INVAL 0x12A0U +#define ROGUE_CR_BIF_CTRL_INVAL_MASKFULL 0x000000000000000FULL +#define ROGUE_CR_BIF_CTRL_INVAL_TLB1_SHIFT 3U +#define ROGUE_CR_BIF_CTRL_INVAL_TLB1_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_BIF_CTRL_INVAL_TLB1_EN 0x00000008U +#define ROGUE_CR_BIF_CTRL_INVAL_PC_SHIFT 2U +#define ROGUE_CR_BIF_CTRL_INVAL_PC_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_BIF_CTRL_INVAL_PC_EN 0x00000004U +#define ROGUE_CR_BIF_CTRL_INVAL_PD_SHIFT 1U +#define ROGUE_CR_BIF_CTRL_INVAL_PD_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_BIF_CTRL_INVAL_PD_EN 0x00000002U +#define ROGUE_CR_BIF_CTRL_INVAL_PT_SHIFT 0U +#define ROGUE_CR_BIF_CTRL_INVAL_PT_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_BIF_CTRL_INVAL_PT_EN 0x00000001U + +/* Register ROGUE_CR_BIF_CTRL */ +#define ROGUE_CR_BIF_CTRL 0x12A8U +#define ROGUE_CR_BIF_CTRL__XE_MEM__MASKFULL 0x000000000000033FULL +#define ROGUE_CR_BIF_CTRL_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_CPU_SHIFT 9U +#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_CPU_CLRMSK 0xFFFFFDFFU +#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_CPU_EN 0x00000200U +#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF4_SHIFT 8U +#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF4_CLRMSK 0xFFFFFEFFU +#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF4_EN 0x00000100U +#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_SHIFT 7U +#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_CLRMSK 0xFFFFFF7FU +#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_EN 0x00000080U +#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_SHIFT 6U +#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_CLRMSK 0xFFFFFFBFU +#define ROGUE_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_EN 0x00000040U +#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF3_SHIFT 5U +#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF3_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF3_EN 0x00000020U +#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF2_SHIFT 4U +#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF2_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF2_EN 0x00000010U +#define ROGUE_CR_BIF_CTRL_PAUSE_BIF1_SHIFT 3U +#define ROGUE_CR_BIF_CTRL_PAUSE_BIF1_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_BIF_CTRL_PAUSE_BIF1_EN 0x00000008U +#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_PM_SHIFT 2U +#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_PM_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_PM_EN 0x00000004U +#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF1_SHIFT 1U +#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF1_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF1_EN 0x00000002U +#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF0_SHIFT 0U +#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF0_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_BIF_CTRL_PAUSE_MMU_BIF0_EN 0x00000001U + +/* Register ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS */ +#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS 0x12B0U +#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL 0x000000000000F775ULL +#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT 12U +#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK 0xFFFF0FFFU +#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT 8U +#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK 0xFFFFF8FFU +#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT 5U +#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK 0xFFFFFF9FU +#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT 4U +#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN 0x00000010U +#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT 2U +#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN 0x00000004U +#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT 0U +#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN 0x00000001U + +/* Register ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS */ +#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS 0x12B8U +#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__MASKFULL 0x001FFFFFFFFFFFF0ULL +#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL 0x0007FFFFFFFFFFF0ULL +#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_SHIFT 52U +#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_CLRMSK 0xFFEFFFFFFFFFFFFFULL +#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN 0x0010000000000000ULL +#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT 50U +#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK 0xFFFBFFFFFFFFFFFFULL +#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN 0x0004000000000000ULL +#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT 46U +#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK 0xFFF03FFFFFFFFFFFULL +#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT 44U +#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK 0xFFFC0FFFFFFFFFFFULL +#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT 40U +#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK 0xFFFFF0FFFFFFFFFFULL +#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT 40U +#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK 0xFFFFC0FFFFFFFFFFULL +#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT 4U +#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK 0xFFFFFF000000000FULL +#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT 4U +#define ROGUE_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE 16U + +/* Register ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS */ +#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS 0x12C0U +#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_MASKFULL 0x000000000000F775ULL +#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_SHIFT 12U +#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_CLRMSK 0xFFFF0FFFU +#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_SHIFT 8U +#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_CLRMSK 0xFFFFF8FFU +#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_SHIFT 5U +#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_CLRMSK 0xFFFFFF9FU +#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_SHIFT 4U +#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_EN 0x00000010U +#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_SHIFT 2U +#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_EN 0x00000004U +#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_SHIFT 0U +#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_EN 0x00000001U + +/* Register ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS */ +#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS 0x12C8U +#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_MASKFULL 0x0007FFFFFFFFFFF0ULL +#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_SHIFT 50U +#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_CLRMSK 0xFFFBFFFFFFFFFFFFULL +#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_EN 0x0004000000000000ULL +#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_SHIFT 44U +#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_CLRMSK 0xFFFC0FFFFFFFFFFFULL +#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_SHIFT 40U +#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_CLRMSK 0xFFFFF0FFFFFFFFFFULL +#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_SHIFT 4U +#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_CLRMSK 0xFFFFFF000000000FULL +#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSHIFT 4U +#define ROGUE_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSIZE 16U + +/* Register ROGUE_CR_BIF_MMU_STATUS */ +#define ROGUE_CR_BIF_MMU_STATUS 0x12D0U +#define ROGUE_CR_BIF_MMU_STATUS__XE_MEM__MASKFULL 0x000000001FFFFFF7ULL +#define ROGUE_CR_BIF_MMU_STATUS_MASKFULL 0x000000001FFFFFF7ULL +#define ROGUE_CR_BIF_MMU_STATUS_PM_FAULT_SHIFT 28U +#define ROGUE_CR_BIF_MMU_STATUS_PM_FAULT_CLRMSK 0xEFFFFFFFU +#define ROGUE_CR_BIF_MMU_STATUS_PM_FAULT_EN 0x10000000U +#define ROGUE_CR_BIF_MMU_STATUS_PC_DATA_SHIFT 20U +#define ROGUE_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK 0xF00FFFFFU +#define ROGUE_CR_BIF_MMU_STATUS_PD_DATA_SHIFT 12U +#define ROGUE_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK 0xFFF00FFFU +#define ROGUE_CR_BIF_MMU_STATUS_PT_DATA_SHIFT 4U +#define ROGUE_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK 0xFFFFF00FU +#define ROGUE_CR_BIF_MMU_STATUS_STALLED_SHIFT 2U +#define ROGUE_CR_BIF_MMU_STATUS_STALLED_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_BIF_MMU_STATUS_STALLED_EN 0x00000004U +#define ROGUE_CR_BIF_MMU_STATUS_PAUSED_SHIFT 1U +#define ROGUE_CR_BIF_MMU_STATUS_PAUSED_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_BIF_MMU_STATUS_PAUSED_EN 0x00000002U +#define ROGUE_CR_BIF_MMU_STATUS_BUSY_SHIFT 0U +#define ROGUE_CR_BIF_MMU_STATUS_BUSY_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_BIF_MMU_STATUS_BUSY_EN 0x00000001U + +/* Register group: ROGUE_CR_BIF_TILING_CFG, with 8 repeats */ +#define ROGUE_CR_BIF_TILING_CFG_REPEATCOUNT 8U +/* Register ROGUE_CR_BIF_TILING_CFG0 */ +#define ROGUE_CR_BIF_TILING_CFG0 0x12D8U +#define ROGUE_CR_BIF_TILING_CFG0_MASKFULL 0xFFFFFFFF0FFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG0_XSTRIDE_SHIFT 61U +#define ROGUE_CR_BIF_TILING_CFG0_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG0_ENABLE_SHIFT 60U +#define ROGUE_CR_BIF_TILING_CFG0_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG0_ENABLE_EN 0x1000000000000000ULL +#define ROGUE_CR_BIF_TILING_CFG0_MAX_ADDRESS_SHIFT 32U +#define ROGUE_CR_BIF_TILING_CFG0_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG0_MAX_ADDRESS_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_TILING_CFG0_MAX_ADDRESS_ALIGNSIZE 4096U +#define ROGUE_CR_BIF_TILING_CFG0_MIN_ADDRESS_SHIFT 0U +#define ROGUE_CR_BIF_TILING_CFG0_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL +#define ROGUE_CR_BIF_TILING_CFG0_MIN_ADDRESS_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_TILING_CFG0_MIN_ADDRESS_ALIGNSIZE 4096U + +/* Register ROGUE_CR_BIF_TILING_CFG1 */ +#define ROGUE_CR_BIF_TILING_CFG1 0x12E0U +#define ROGUE_CR_BIF_TILING_CFG1_MASKFULL 0xFFFFFFFF0FFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG1_XSTRIDE_SHIFT 61U +#define ROGUE_CR_BIF_TILING_CFG1_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG1_ENABLE_SHIFT 60U +#define ROGUE_CR_BIF_TILING_CFG1_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG1_ENABLE_EN 0x1000000000000000ULL +#define ROGUE_CR_BIF_TILING_CFG1_MAX_ADDRESS_SHIFT 32U +#define ROGUE_CR_BIF_TILING_CFG1_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG1_MAX_ADDRESS_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_TILING_CFG1_MAX_ADDRESS_ALIGNSIZE 4096U +#define ROGUE_CR_BIF_TILING_CFG1_MIN_ADDRESS_SHIFT 0U +#define ROGUE_CR_BIF_TILING_CFG1_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL +#define ROGUE_CR_BIF_TILING_CFG1_MIN_ADDRESS_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_TILING_CFG1_MIN_ADDRESS_ALIGNSIZE 4096U + +/* Register ROGUE_CR_BIF_TILING_CFG2 */ +#define ROGUE_CR_BIF_TILING_CFG2 0x12E8U +#define ROGUE_CR_BIF_TILING_CFG2_MASKFULL 0xFFFFFFFF0FFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG2_XSTRIDE_SHIFT 61U +#define ROGUE_CR_BIF_TILING_CFG2_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG2_ENABLE_SHIFT 60U +#define ROGUE_CR_BIF_TILING_CFG2_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG2_ENABLE_EN 0x1000000000000000ULL +#define ROGUE_CR_BIF_TILING_CFG2_MAX_ADDRESS_SHIFT 32U +#define ROGUE_CR_BIF_TILING_CFG2_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG2_MAX_ADDRESS_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_TILING_CFG2_MAX_ADDRESS_ALIGNSIZE 4096U +#define ROGUE_CR_BIF_TILING_CFG2_MIN_ADDRESS_SHIFT 0U +#define ROGUE_CR_BIF_TILING_CFG2_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL +#define ROGUE_CR_BIF_TILING_CFG2_MIN_ADDRESS_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_TILING_CFG2_MIN_ADDRESS_ALIGNSIZE 4096U + +/* Register ROGUE_CR_BIF_TILING_CFG3 */ +#define ROGUE_CR_BIF_TILING_CFG3 0x12F0U +#define ROGUE_CR_BIF_TILING_CFG3_MASKFULL 0xFFFFFFFF0FFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG3_XSTRIDE_SHIFT 61U +#define ROGUE_CR_BIF_TILING_CFG3_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG3_ENABLE_SHIFT 60U +#define ROGUE_CR_BIF_TILING_CFG3_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG3_ENABLE_EN 0x1000000000000000ULL +#define ROGUE_CR_BIF_TILING_CFG3_MAX_ADDRESS_SHIFT 32U +#define ROGUE_CR_BIF_TILING_CFG3_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG3_MAX_ADDRESS_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_TILING_CFG3_MAX_ADDRESS_ALIGNSIZE 4096U +#define ROGUE_CR_BIF_TILING_CFG3_MIN_ADDRESS_SHIFT 0U +#define ROGUE_CR_BIF_TILING_CFG3_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL +#define ROGUE_CR_BIF_TILING_CFG3_MIN_ADDRESS_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_TILING_CFG3_MIN_ADDRESS_ALIGNSIZE 4096U + +/* Register ROGUE_CR_BIF_TILING_CFG4 */ +#define ROGUE_CR_BIF_TILING_CFG4 0x12F8U +#define ROGUE_CR_BIF_TILING_CFG4_MASKFULL 0xFFFFFFFF0FFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG4_XSTRIDE_SHIFT 61U +#define ROGUE_CR_BIF_TILING_CFG4_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG4_ENABLE_SHIFT 60U +#define ROGUE_CR_BIF_TILING_CFG4_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG4_ENABLE_EN 0x1000000000000000ULL +#define ROGUE_CR_BIF_TILING_CFG4_MAX_ADDRESS_SHIFT 32U +#define ROGUE_CR_BIF_TILING_CFG4_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG4_MAX_ADDRESS_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_TILING_CFG4_MAX_ADDRESS_ALIGNSIZE 4096U +#define ROGUE_CR_BIF_TILING_CFG4_MIN_ADDRESS_SHIFT 0U +#define ROGUE_CR_BIF_TILING_CFG4_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL +#define ROGUE_CR_BIF_TILING_CFG4_MIN_ADDRESS_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_TILING_CFG4_MIN_ADDRESS_ALIGNSIZE 4096U + +/* Register ROGUE_CR_BIF_TILING_CFG5 */ +#define ROGUE_CR_BIF_TILING_CFG5 0x1300U +#define ROGUE_CR_BIF_TILING_CFG5_MASKFULL 0xFFFFFFFF0FFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG5_XSTRIDE_SHIFT 61U +#define ROGUE_CR_BIF_TILING_CFG5_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG5_ENABLE_SHIFT 60U +#define ROGUE_CR_BIF_TILING_CFG5_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG5_ENABLE_EN 0x1000000000000000ULL +#define ROGUE_CR_BIF_TILING_CFG5_MAX_ADDRESS_SHIFT 32U +#define ROGUE_CR_BIF_TILING_CFG5_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG5_MAX_ADDRESS_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_TILING_CFG5_MAX_ADDRESS_ALIGNSIZE 4096U +#define ROGUE_CR_BIF_TILING_CFG5_MIN_ADDRESS_SHIFT 0U +#define ROGUE_CR_BIF_TILING_CFG5_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL +#define ROGUE_CR_BIF_TILING_CFG5_MIN_ADDRESS_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_TILING_CFG5_MIN_ADDRESS_ALIGNSIZE 4096U + +/* Register ROGUE_CR_BIF_TILING_CFG6 */ +#define ROGUE_CR_BIF_TILING_CFG6 0x1308U +#define ROGUE_CR_BIF_TILING_CFG6_MASKFULL 0xFFFFFFFF0FFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG6_XSTRIDE_SHIFT 61U +#define ROGUE_CR_BIF_TILING_CFG6_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG6_ENABLE_SHIFT 60U +#define ROGUE_CR_BIF_TILING_CFG6_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG6_ENABLE_EN 0x1000000000000000ULL +#define ROGUE_CR_BIF_TILING_CFG6_MAX_ADDRESS_SHIFT 32U +#define ROGUE_CR_BIF_TILING_CFG6_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG6_MAX_ADDRESS_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_TILING_CFG6_MAX_ADDRESS_ALIGNSIZE 4096U +#define ROGUE_CR_BIF_TILING_CFG6_MIN_ADDRESS_SHIFT 0U +#define ROGUE_CR_BIF_TILING_CFG6_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL +#define ROGUE_CR_BIF_TILING_CFG6_MIN_ADDRESS_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_TILING_CFG6_MIN_ADDRESS_ALIGNSIZE 4096U + +/* Register ROGUE_CR_BIF_TILING_CFG7 */ +#define ROGUE_CR_BIF_TILING_CFG7 0x1310U +#define ROGUE_CR_BIF_TILING_CFG7_MASKFULL 0xFFFFFFFF0FFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG7_XSTRIDE_SHIFT 61U +#define ROGUE_CR_BIF_TILING_CFG7_XSTRIDE_CLRMSK 0x1FFFFFFFFFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG7_ENABLE_SHIFT 60U +#define ROGUE_CR_BIF_TILING_CFG7_ENABLE_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG7_ENABLE_EN 0x1000000000000000ULL +#define ROGUE_CR_BIF_TILING_CFG7_MAX_ADDRESS_SHIFT 32U +#define ROGUE_CR_BIF_TILING_CFG7_MAX_ADDRESS_CLRMSK 0xF0000000FFFFFFFFULL +#define ROGUE_CR_BIF_TILING_CFG7_MAX_ADDRESS_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_TILING_CFG7_MAX_ADDRESS_ALIGNSIZE 4096U +#define ROGUE_CR_BIF_TILING_CFG7_MIN_ADDRESS_SHIFT 0U +#define ROGUE_CR_BIF_TILING_CFG7_MIN_ADDRESS_CLRMSK 0xFFFFFFFFF0000000ULL +#define ROGUE_CR_BIF_TILING_CFG7_MIN_ADDRESS_ALIGNSHIFT 12U +#define ROGUE_CR_BIF_TILING_CFG7_MIN_ADDRESS_ALIGNSIZE 4096U + +/* Register ROGUE_CR_BIF_READS_EXT_STATUS */ +#define ROGUE_CR_BIF_READS_EXT_STATUS 0x1320U +#define ROGUE_CR_BIF_READS_EXT_STATUS_MASKFULL 0x000000000FFFFFFFULL +#define ROGUE_CR_BIF_READS_EXT_STATUS_MMU_SHIFT 16U +#define ROGUE_CR_BIF_READS_EXT_STATUS_MMU_CLRMSK 0xF000FFFFU +#define ROGUE_CR_BIF_READS_EXT_STATUS_BANK1_SHIFT 0U +#define ROGUE_CR_BIF_READS_EXT_STATUS_BANK1_CLRMSK 0xFFFF0000U + +/* Register ROGUE_CR_BIF_READS_INT_STATUS */ +#define ROGUE_CR_BIF_READS_INT_STATUS 0x1328U +#define ROGUE_CR_BIF_READS_INT_STATUS_MASKFULL 0x0000000007FFFFFFULL +#define ROGUE_CR_BIF_READS_INT_STATUS_MMU_SHIFT 16U +#define ROGUE_CR_BIF_READS_INT_STATUS_MMU_CLRMSK 0xF800FFFFU +#define ROGUE_CR_BIF_READS_INT_STATUS_BANK1_SHIFT 0U +#define ROGUE_CR_BIF_READS_INT_STATUS_BANK1_CLRMSK 0xFFFF0000U + +/* Register ROGUE_CR_BIFPM_READS_INT_STATUS */ +#define ROGUE_CR_BIFPM_READS_INT_STATUS 0x1330U +#define ROGUE_CR_BIFPM_READS_INT_STATUS_MASKFULL 0x000000000000FFFFULL +#define ROGUE_CR_BIFPM_READS_INT_STATUS_BANK0_SHIFT 0U +#define ROGUE_CR_BIFPM_READS_INT_STATUS_BANK0_CLRMSK 0xFFFF0000U + +/* Register ROGUE_CR_BIFPM_READS_EXT_STATUS */ +#define ROGUE_CR_BIFPM_READS_EXT_STATUS 0x1338U +#define ROGUE_CR_BIFPM_READS_EXT_STATUS_MASKFULL 0x000000000000FFFFULL +#define ROGUE_CR_BIFPM_READS_EXT_STATUS_BANK0_SHIFT 0U +#define ROGUE_CR_BIFPM_READS_EXT_STATUS_BANK0_CLRMSK 0xFFFF0000U + +/* Register ROGUE_CR_BIFPM_STATUS_MMU */ +#define ROGUE_CR_BIFPM_STATUS_MMU 0x1350U +#define ROGUE_CR_BIFPM_STATUS_MMU_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_BIFPM_STATUS_MMU_REQUESTS_SHIFT 0U +#define ROGUE_CR_BIFPM_STATUS_MMU_REQUESTS_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_BIF_STATUS_MMU */ +#define ROGUE_CR_BIF_STATUS_MMU 0x1358U +#define ROGUE_CR_BIF_STATUS_MMU_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_BIF_STATUS_MMU_REQUESTS_SHIFT 0U +#define ROGUE_CR_BIF_STATUS_MMU_REQUESTS_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_BIF_FAULT_READ */ +#define ROGUE_CR_BIF_FAULT_READ 0x13E0U +#define ROGUE_CR_BIF_FAULT_READ_MASKFULL 0x000000FFFFFFFFF0ULL +#define ROGUE_CR_BIF_FAULT_READ_ADDRESS_SHIFT 4U +#define ROGUE_CR_BIF_FAULT_READ_ADDRESS_CLRMSK 0xFFFFFF000000000FULL +#define ROGUE_CR_BIF_FAULT_READ_ADDRESS_ALIGNSHIFT 4U +#define ROGUE_CR_BIF_FAULT_READ_ADDRESS_ALIGNSIZE 16U + +/* Register ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS */ +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS 0x1430U +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL 0x000000000000F775ULL +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT 12U +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK 0xFFFF0FFFU +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT 8U +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK 0xFFFFF8FFU +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT 5U +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK 0xFFFFFF9FU +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT 4U +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN 0x00000010U +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT 2U +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN 0x00000004U +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT 0U +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN 0x00000001U + +/* Register ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS */ +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS 0x1438U +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL 0x0007FFFFFFFFFFF0ULL +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT 50U +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK 0xFFFBFFFFFFFFFFFFULL +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN 0x0004000000000000ULL +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT 44U +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK 0xFFFC0FFFFFFFFFFFULL +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT 40U +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK 0xFFFFF0FFFFFFFFFFULL +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT 4U +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK 0xFFFFFF000000000FULL +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT 4U +#define ROGUE_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE 16U + +/* Register ROGUE_CR_MCU_FENCE */ +#define ROGUE_CR_MCU_FENCE 0x1740U +#define ROGUE_CR_MCU_FENCE_MASKFULL 0x000007FFFFFFFFE0ULL +#define ROGUE_CR_MCU_FENCE_DM_SHIFT 40U +#define ROGUE_CR_MCU_FENCE_DM_CLRMSK 0xFFFFF8FFFFFFFFFFULL +#define ROGUE_CR_MCU_FENCE_DM_VERTEX 0x0000000000000000ULL +#define ROGUE_CR_MCU_FENCE_DM_PIXEL 0x0000010000000000ULL +#define ROGUE_CR_MCU_FENCE_DM_COMPUTE 0x0000020000000000ULL +#define ROGUE_CR_MCU_FENCE_DM_RAY_VERTEX 0x0000030000000000ULL +#define ROGUE_CR_MCU_FENCE_DM_RAY 0x0000040000000000ULL +#define ROGUE_CR_MCU_FENCE_DM_FASTRENDER 0x0000050000000000ULL +#define ROGUE_CR_MCU_FENCE_ADDR_SHIFT 5U +#define ROGUE_CR_MCU_FENCE_ADDR_CLRMSK 0xFFFFFF000000001FULL +#define ROGUE_CR_MCU_FENCE_ADDR_ALIGNSHIFT 5U +#define ROGUE_CR_MCU_FENCE_ADDR_ALIGNSIZE 32U + +/* Register group: ROGUE_CR_SCRATCH, with 16 repeats */ +#define ROGUE_CR_SCRATCH_REPEATCOUNT 16U +/* Register ROGUE_CR_SCRATCH0 */ +#define ROGUE_CR_SCRATCH0 0x1A00U +#define ROGUE_CR_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SCRATCH0_DATA_SHIFT 0U +#define ROGUE_CR_SCRATCH0_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_SCRATCH1 */ +#define ROGUE_CR_SCRATCH1 0x1A08U +#define ROGUE_CR_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SCRATCH1_DATA_SHIFT 0U +#define ROGUE_CR_SCRATCH1_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_SCRATCH2 */ +#define ROGUE_CR_SCRATCH2 0x1A10U +#define ROGUE_CR_SCRATCH2_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SCRATCH2_DATA_SHIFT 0U +#define ROGUE_CR_SCRATCH2_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_SCRATCH3 */ +#define ROGUE_CR_SCRATCH3 0x1A18U +#define ROGUE_CR_SCRATCH3_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SCRATCH3_DATA_SHIFT 0U +#define ROGUE_CR_SCRATCH3_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_SCRATCH4 */ +#define ROGUE_CR_SCRATCH4 0x1A20U +#define ROGUE_CR_SCRATCH4_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SCRATCH4_DATA_SHIFT 0U +#define ROGUE_CR_SCRATCH4_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_SCRATCH5 */ +#define ROGUE_CR_SCRATCH5 0x1A28U +#define ROGUE_CR_SCRATCH5_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SCRATCH5_DATA_SHIFT 0U +#define ROGUE_CR_SCRATCH5_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_SCRATCH6 */ +#define ROGUE_CR_SCRATCH6 0x1A30U +#define ROGUE_CR_SCRATCH6_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SCRATCH6_DATA_SHIFT 0U +#define ROGUE_CR_SCRATCH6_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_SCRATCH7 */ +#define ROGUE_CR_SCRATCH7 0x1A38U +#define ROGUE_CR_SCRATCH7_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SCRATCH7_DATA_SHIFT 0U +#define ROGUE_CR_SCRATCH7_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_SCRATCH8 */ +#define ROGUE_CR_SCRATCH8 0x1A40U +#define ROGUE_CR_SCRATCH8_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SCRATCH8_DATA_SHIFT 0U +#define ROGUE_CR_SCRATCH8_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_SCRATCH9 */ +#define ROGUE_CR_SCRATCH9 0x1A48U +#define ROGUE_CR_SCRATCH9_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SCRATCH9_DATA_SHIFT 0U +#define ROGUE_CR_SCRATCH9_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_SCRATCH10 */ +#define ROGUE_CR_SCRATCH10 0x1A50U +#define ROGUE_CR_SCRATCH10_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SCRATCH10_DATA_SHIFT 0U +#define ROGUE_CR_SCRATCH10_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_SCRATCH11 */ +#define ROGUE_CR_SCRATCH11 0x1A58U +#define ROGUE_CR_SCRATCH11_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SCRATCH11_DATA_SHIFT 0U +#define ROGUE_CR_SCRATCH11_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_SCRATCH12 */ +#define ROGUE_CR_SCRATCH12 0x1A60U +#define ROGUE_CR_SCRATCH12_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SCRATCH12_DATA_SHIFT 0U +#define ROGUE_CR_SCRATCH12_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_SCRATCH13 */ +#define ROGUE_CR_SCRATCH13 0x1A68U +#define ROGUE_CR_SCRATCH13_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SCRATCH13_DATA_SHIFT 0U +#define ROGUE_CR_SCRATCH13_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_SCRATCH14 */ +#define ROGUE_CR_SCRATCH14 0x1A70U +#define ROGUE_CR_SCRATCH14_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SCRATCH14_DATA_SHIFT 0U +#define ROGUE_CR_SCRATCH14_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_SCRATCH15 */ +#define ROGUE_CR_SCRATCH15 0x1A78U +#define ROGUE_CR_SCRATCH15_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SCRATCH15_DATA_SHIFT 0U +#define ROGUE_CR_SCRATCH15_DATA_CLRMSK 0x00000000U + +/* Register group: ROGUE_CR_OS0_SCRATCH, with 2 repeats */ +#define ROGUE_CR_OS0_SCRATCH_REPEATCOUNT 2U +/* Register ROGUE_CR_OS0_SCRATCH0 */ +#define ROGUE_CR_OS0_SCRATCH0 0x1A80U +#define ROGUE_CR_OS0_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_OS0_SCRATCH0_DATA_SHIFT 0U +#define ROGUE_CR_OS0_SCRATCH0_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_OS0_SCRATCH1 */ +#define ROGUE_CR_OS0_SCRATCH1 0x1A88U +#define ROGUE_CR_OS0_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_OS0_SCRATCH1_DATA_SHIFT 0U +#define ROGUE_CR_OS0_SCRATCH1_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_OS0_SCRATCH2 */ +#define ROGUE_CR_OS0_SCRATCH2 0x1A90U +#define ROGUE_CR_OS0_SCRATCH2_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_OS0_SCRATCH2_DATA_SHIFT 0U +#define ROGUE_CR_OS0_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_OS0_SCRATCH3 */ +#define ROGUE_CR_OS0_SCRATCH3 0x1A98U +#define ROGUE_CR_OS0_SCRATCH3_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_OS0_SCRATCH3_DATA_SHIFT 0U +#define ROGUE_CR_OS0_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U + +/* Register group: ROGUE_CR_OS1_SCRATCH, with 2 repeats */ +#define ROGUE_CR_OS1_SCRATCH_REPEATCOUNT 2U +/* Register ROGUE_CR_OS1_SCRATCH0 */ +#define ROGUE_CR_OS1_SCRATCH0 0x11A80U +#define ROGUE_CR_OS1_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_OS1_SCRATCH0_DATA_SHIFT 0U +#define ROGUE_CR_OS1_SCRATCH0_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_OS1_SCRATCH1 */ +#define ROGUE_CR_OS1_SCRATCH1 0x11A88U +#define ROGUE_CR_OS1_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_OS1_SCRATCH1_DATA_SHIFT 0U +#define ROGUE_CR_OS1_SCRATCH1_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_OS1_SCRATCH2 */ +#define ROGUE_CR_OS1_SCRATCH2 0x11A90U +#define ROGUE_CR_OS1_SCRATCH2_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_OS1_SCRATCH2_DATA_SHIFT 0U +#define ROGUE_CR_OS1_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_OS1_SCRATCH3 */ +#define ROGUE_CR_OS1_SCRATCH3 0x11A98U +#define ROGUE_CR_OS1_SCRATCH3_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_OS1_SCRATCH3_DATA_SHIFT 0U +#define ROGUE_CR_OS1_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U + +/* Register group: ROGUE_CR_OS2_SCRATCH, with 2 repeats */ +#define ROGUE_CR_OS2_SCRATCH_REPEATCOUNT 2U +/* Register ROGUE_CR_OS2_SCRATCH0 */ +#define ROGUE_CR_OS2_SCRATCH0 0x21A80U +#define ROGUE_CR_OS2_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_OS2_SCRATCH0_DATA_SHIFT 0U +#define ROGUE_CR_OS2_SCRATCH0_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_OS2_SCRATCH1 */ +#define ROGUE_CR_OS2_SCRATCH1 0x21A88U +#define ROGUE_CR_OS2_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_OS2_SCRATCH1_DATA_SHIFT 0U +#define ROGUE_CR_OS2_SCRATCH1_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_OS2_SCRATCH2 */ +#define ROGUE_CR_OS2_SCRATCH2 0x21A90U +#define ROGUE_CR_OS2_SCRATCH2_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_OS2_SCRATCH2_DATA_SHIFT 0U +#define ROGUE_CR_OS2_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_OS2_SCRATCH3 */ +#define ROGUE_CR_OS2_SCRATCH3 0x21A98U +#define ROGUE_CR_OS2_SCRATCH3_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_OS2_SCRATCH3_DATA_SHIFT 0U +#define ROGUE_CR_OS2_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U + +/* Register group: ROGUE_CR_OS3_SCRATCH, with 2 repeats */ +#define ROGUE_CR_OS3_SCRATCH_REPEATCOUNT 2U +/* Register ROGUE_CR_OS3_SCRATCH0 */ +#define ROGUE_CR_OS3_SCRATCH0 0x31A80U +#define ROGUE_CR_OS3_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_OS3_SCRATCH0_DATA_SHIFT 0U +#define ROGUE_CR_OS3_SCRATCH0_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_OS3_SCRATCH1 */ +#define ROGUE_CR_OS3_SCRATCH1 0x31A88U +#define ROGUE_CR_OS3_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_OS3_SCRATCH1_DATA_SHIFT 0U +#define ROGUE_CR_OS3_SCRATCH1_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_OS3_SCRATCH2 */ +#define ROGUE_CR_OS3_SCRATCH2 0x31A90U +#define ROGUE_CR_OS3_SCRATCH2_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_OS3_SCRATCH2_DATA_SHIFT 0U +#define ROGUE_CR_OS3_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_OS3_SCRATCH3 */ +#define ROGUE_CR_OS3_SCRATCH3 0x31A98U +#define ROGUE_CR_OS3_SCRATCH3_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_OS3_SCRATCH3_DATA_SHIFT 0U +#define ROGUE_CR_OS3_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U + +/* Register group: ROGUE_CR_OS4_SCRATCH, with 2 repeats */ +#define ROGUE_CR_OS4_SCRATCH_REPEATCOUNT 2U +/* Register ROGUE_CR_OS4_SCRATCH0 */ +#define ROGUE_CR_OS4_SCRATCH0 0x41A80U +#define ROGUE_CR_OS4_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_OS4_SCRATCH0_DATA_SHIFT 0U +#define ROGUE_CR_OS4_SCRATCH0_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_OS4_SCRATCH1 */ +#define ROGUE_CR_OS4_SCRATCH1 0x41A88U +#define ROGUE_CR_OS4_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_OS4_SCRATCH1_DATA_SHIFT 0U +#define ROGUE_CR_OS4_SCRATCH1_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_OS4_SCRATCH2 */ +#define ROGUE_CR_OS4_SCRATCH2 0x41A90U +#define ROGUE_CR_OS4_SCRATCH2_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_OS4_SCRATCH2_DATA_SHIFT 0U +#define ROGUE_CR_OS4_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_OS4_SCRATCH3 */ +#define ROGUE_CR_OS4_SCRATCH3 0x41A98U +#define ROGUE_CR_OS4_SCRATCH3_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_OS4_SCRATCH3_DATA_SHIFT 0U +#define ROGUE_CR_OS4_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U + +/* Register group: ROGUE_CR_OS5_SCRATCH, with 2 repeats */ +#define ROGUE_CR_OS5_SCRATCH_REPEATCOUNT 2U +/* Register ROGUE_CR_OS5_SCRATCH0 */ +#define ROGUE_CR_OS5_SCRATCH0 0x51A80U +#define ROGUE_CR_OS5_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_OS5_SCRATCH0_DATA_SHIFT 0U +#define ROGUE_CR_OS5_SCRATCH0_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_OS5_SCRATCH1 */ +#define ROGUE_CR_OS5_SCRATCH1 0x51A88U +#define ROGUE_CR_OS5_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_OS5_SCRATCH1_DATA_SHIFT 0U +#define ROGUE_CR_OS5_SCRATCH1_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_OS5_SCRATCH2 */ +#define ROGUE_CR_OS5_SCRATCH2 0x51A90U +#define ROGUE_CR_OS5_SCRATCH2_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_OS5_SCRATCH2_DATA_SHIFT 0U +#define ROGUE_CR_OS5_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_OS5_SCRATCH3 */ +#define ROGUE_CR_OS5_SCRATCH3 0x51A98U +#define ROGUE_CR_OS5_SCRATCH3_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_OS5_SCRATCH3_DATA_SHIFT 0U +#define ROGUE_CR_OS5_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U + +/* Register group: ROGUE_CR_OS6_SCRATCH, with 2 repeats */ +#define ROGUE_CR_OS6_SCRATCH_REPEATCOUNT 2U +/* Register ROGUE_CR_OS6_SCRATCH0 */ +#define ROGUE_CR_OS6_SCRATCH0 0x61A80U +#define ROGUE_CR_OS6_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_OS6_SCRATCH0_DATA_SHIFT 0U +#define ROGUE_CR_OS6_SCRATCH0_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_OS6_SCRATCH1 */ +#define ROGUE_CR_OS6_SCRATCH1 0x61A88U +#define ROGUE_CR_OS6_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_OS6_SCRATCH1_DATA_SHIFT 0U +#define ROGUE_CR_OS6_SCRATCH1_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_OS6_SCRATCH2 */ +#define ROGUE_CR_OS6_SCRATCH2 0x61A90U +#define ROGUE_CR_OS6_SCRATCH2_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_OS6_SCRATCH2_DATA_SHIFT 0U +#define ROGUE_CR_OS6_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_OS6_SCRATCH3 */ +#define ROGUE_CR_OS6_SCRATCH3 0x61A98U +#define ROGUE_CR_OS6_SCRATCH3_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_OS6_SCRATCH3_DATA_SHIFT 0U +#define ROGUE_CR_OS6_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U + +/* Register group: ROGUE_CR_OS7_SCRATCH, with 2 repeats */ +#define ROGUE_CR_OS7_SCRATCH_REPEATCOUNT 2U +/* Register ROGUE_CR_OS7_SCRATCH0 */ +#define ROGUE_CR_OS7_SCRATCH0 0x71A80U +#define ROGUE_CR_OS7_SCRATCH0_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_OS7_SCRATCH0_DATA_SHIFT 0U +#define ROGUE_CR_OS7_SCRATCH0_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_OS7_SCRATCH1 */ +#define ROGUE_CR_OS7_SCRATCH1 0x71A88U +#define ROGUE_CR_OS7_SCRATCH1_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_OS7_SCRATCH1_DATA_SHIFT 0U +#define ROGUE_CR_OS7_SCRATCH1_DATA_CLRMSK 0x00000000U + +/* Register ROGUE_CR_OS7_SCRATCH2 */ +#define ROGUE_CR_OS7_SCRATCH2 0x71A90U +#define ROGUE_CR_OS7_SCRATCH2_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_OS7_SCRATCH2_DATA_SHIFT 0U +#define ROGUE_CR_OS7_SCRATCH2_DATA_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_OS7_SCRATCH3 */ +#define ROGUE_CR_OS7_SCRATCH3 0x71A98U +#define ROGUE_CR_OS7_SCRATCH3_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_OS7_SCRATCH3_DATA_SHIFT 0U +#define ROGUE_CR_OS7_SCRATCH3_DATA_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_SPFILTER_SIGNAL_DESCR */ +#define ROGUE_CR_SPFILTER_SIGNAL_DESCR 0x2700U +#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MASKFULL 0x000000000000FFFFULL +#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_SIZE_SHIFT 0U +#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_SIZE_CLRMSK 0xFFFF0000U +#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSHIFT 4U +#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSIZE 16U + +/* Register ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN */ +#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN 0x2708U +#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN_MASKFULL 0x000000FFFFFFFFF0ULL +#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_SHIFT 4U +#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_CLRMSK 0xFFFFFF000000000FULL +#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSHIFT 4U +#define ROGUE_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSIZE 16U + +/* Register group: ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG, with 16 repeats */ +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG_REPEATCOUNT 16U +/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0 */ +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0 0x3000U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_MASKFULL 0x7FFFF7FFFFFFF000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_SHIFT 62U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_EN 0x4000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_SHIFT 61U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN 0x2000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_SHIFT 60U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN 0x1000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT 44U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_SHIFT 40U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1 */ +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1 0x3008U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_MASKFULL 0x7FFFF7FFFFFFF000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_SHIFT 62U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_EN 0x4000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_SHIFT 61U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_EN 0x2000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_SHIFT 60U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_EN 0x1000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_SHIFT 44U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_CBASE_SHIFT 40U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2 */ +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2 0x3010U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_MASKFULL 0x7FFFF7FFFFFFF000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_SHIFT 62U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_EN 0x4000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_SHIFT 61U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_EN 0x2000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_SHIFT 60U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_EN 0x1000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_SHIFT 44U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_CBASE_SHIFT 40U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3 */ +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3 0x3018U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_MASKFULL 0x7FFFF7FFFFFFF000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_SHIFT 62U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_EN 0x4000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_SHIFT 61U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_EN 0x2000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_SHIFT 60U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_EN 0x1000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_SHIFT 44U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_CBASE_SHIFT 40U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4 */ +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4 0x3020U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_MASKFULL 0x7FFFF7FFFFFFF000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_SHIFT 62U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_EN 0x4000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_SHIFT 61U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_EN 0x2000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_SHIFT 60U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_EN 0x1000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_SHIFT 44U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_CBASE_SHIFT 40U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5 */ +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5 0x3028U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_MASKFULL 0x7FFFF7FFFFFFF000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_SHIFT 62U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_EN 0x4000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_SHIFT 61U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_EN 0x2000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_SHIFT 60U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_EN 0x1000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_SHIFT 44U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_CBASE_SHIFT 40U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6 */ +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6 0x3030U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_MASKFULL 0x7FFFF7FFFFFFF000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_SHIFT 62U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_EN 0x4000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_SHIFT 61U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_EN 0x2000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_SHIFT 60U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_EN 0x1000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_SHIFT 44U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_CBASE_SHIFT 40U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7 */ +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7 0x3038U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_MASKFULL 0x7FFFF7FFFFFFF000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_SHIFT 62U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_EN 0x4000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_SHIFT 61U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_EN 0x2000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_SHIFT 60U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_EN 0x1000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_SHIFT 44U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_CBASE_SHIFT 40U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8 */ +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8 0x3040U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_MASKFULL 0x7FFFF7FFFFFFF000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_SHIFT 62U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_EN 0x4000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_SHIFT 61U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_EN 0x2000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_SHIFT 60U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_EN 0x1000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_SHIFT 44U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_CBASE_SHIFT 40U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9 */ +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9 0x3048U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_MASKFULL 0x7FFFF7FFFFFFF000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_SHIFT 62U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_EN 0x4000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_SHIFT 61U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_EN 0x2000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_SHIFT 60U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_EN 0x1000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_SHIFT 44U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_CBASE_SHIFT 40U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10 */ +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10 0x3050U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_MASKFULL 0x7FFFF7FFFFFFF000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_SHIFT 62U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_EN 0x4000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_SHIFT 61U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_EN 0x2000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_SHIFT 60U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_EN 0x1000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_SHIFT 44U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_CBASE_SHIFT 40U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11 */ +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11 0x3058U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_MASKFULL 0x7FFFF7FFFFFFF000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_SHIFT 62U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_EN 0x4000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_SHIFT 61U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_EN 0x2000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_SHIFT 60U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_EN 0x1000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_SHIFT 44U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_CBASE_SHIFT 40U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12 */ +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12 0x3060U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_MASKFULL 0x7FFFF7FFFFFFF000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_SHIFT 62U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_EN 0x4000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_SHIFT 61U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_EN 0x2000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_SHIFT 60U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_EN 0x1000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_SHIFT 44U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_CBASE_SHIFT 40U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13 */ +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13 0x3068U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_MASKFULL 0x7FFFF7FFFFFFF000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_SHIFT 62U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_EN 0x4000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_SHIFT 61U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_EN 0x2000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_SHIFT 60U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_EN 0x1000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_SHIFT 44U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_CBASE_SHIFT 40U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14 */ +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14 0x3070U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_MASKFULL 0x7FFFF7FFFFFFF000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_SHIFT 62U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_EN 0x4000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_SHIFT 61U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_EN 0x2000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_SHIFT 60U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_EN 0x1000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_SHIFT 44U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_CBASE_SHIFT 40U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15 */ +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15 0x3078U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_MASKFULL 0x7FFFF7FFFFFFF000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_SHIFT 62U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_CLRMSK 0xBFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_EN 0x4000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_SHIFT 61U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_CLRMSK 0xDFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_EN 0x2000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_SHIFT 60U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_CLRMSK 0xEFFFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_EN 0x1000000000000000ULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_SHIFT 44U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_CLRMSK 0xF0000FFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_CBASE_SHIFT 40U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_CBASE_CLRMSK 0xFFFFF8FFFFFFFFFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_BOOT */ +#define ROGUE_CR_FWCORE_BOOT 0x3090U +#define ROGUE_CR_FWCORE_BOOT_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_FWCORE_BOOT_ENABLE_SHIFT 0U +#define ROGUE_CR_FWCORE_BOOT_ENABLE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_FWCORE_BOOT_ENABLE_EN 0x00000001U + +/* Register ROGUE_CR_FWCORE_RESET_ADDR */ +#define ROGUE_CR_FWCORE_RESET_ADDR 0x3098U +#define ROGUE_CR_FWCORE_RESET_ADDR_MASKFULL 0x00000000FFFFFFFEULL +#define ROGUE_CR_FWCORE_RESET_ADDR_ADDR_SHIFT 1U +#define ROGUE_CR_FWCORE_RESET_ADDR_ADDR_CLRMSK 0x00000001U +#define ROGUE_CR_FWCORE_RESET_ADDR_ADDR_ALIGNSHIFT 1U +#define ROGUE_CR_FWCORE_RESET_ADDR_ADDR_ALIGNSIZE 2U + +/* Register ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR */ +#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR 0x30A0U +#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR_MASKFULL 0x00000000FFFFFFFEULL +#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_SHIFT 1U +#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_CLRMSK 0x00000001U +#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_ALIGNSHIFT 1U +#define ROGUE_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_ALIGNSIZE 2U + +/* Register ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT */ +#define ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT 0x30A8U +#define ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_SHIFT 0U +#define ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_EN 0x00000001U + +/* Register ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS */ +#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS 0x30B0U +#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_MASKFULL 0x000000000000F771ULL +#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_CAT_BASE_SHIFT 12U +#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_CAT_BASE_CLRMSK 0xFFFF0FFFU +#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_SHIFT 8U +#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_CLRMSK 0xFFFFF8FFU +#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_DATA_TYPE_SHIFT 5U +#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_DATA_TYPE_CLRMSK 0xFFFFFF9FU +#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_SHIFT 4U +#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_EN 0x00000010U +#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_SHIFT 0U +#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_EN 0x00000001U + +/* Register ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS */ +#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS 0x30B8U +#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_MASKFULL 0x001FFFFFFFFFFFF0ULL +#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_SHIFT 52U +#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_CLRMSK 0xFFEFFFFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_EN 0x0010000000000000ULL +#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_SHIFT 46U +#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_CLRMSK 0xFFF03FFFFFFFFFFFULL +#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_SHIFT 40U +#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_CLRMSK 0xFFFFC0FFFFFFFFFFULL +#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_SHIFT 4U +#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK 0xFFFFFF000000000FULL +#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSHIFT 4U +#define ROGUE_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSIZE 16U + +/* Register ROGUE_CR_FWCORE_MEM_CTRL_INVAL */ +#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL 0x30C0U +#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_MASKFULL 0x000000000000000FULL +#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_TLB_SHIFT 3U +#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_TLB_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_TLB_EN 0x00000008U +#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PC_SHIFT 2U +#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PC_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PC_EN 0x00000004U +#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PD_SHIFT 1U +#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PD_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PD_EN 0x00000002U +#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PT_SHIFT 0U +#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PT_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_FWCORE_MEM_CTRL_INVAL_PT_EN 0x00000001U + +/* Register ROGUE_CR_FWCORE_MEM_MMU_STATUS */ +#define ROGUE_CR_FWCORE_MEM_MMU_STATUS 0x30C8U +#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_MASKFULL 0x000000000FFFFFF7ULL +#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PC_DATA_SHIFT 20U +#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PC_DATA_CLRMSK 0xF00FFFFFU +#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PD_DATA_SHIFT 12U +#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PD_DATA_CLRMSK 0xFFF00FFFU +#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PT_DATA_SHIFT 4U +#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PT_DATA_CLRMSK 0xFFFFF00FU +#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_STALLED_SHIFT 2U +#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_STALLED_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_STALLED_EN 0x00000004U +#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PAUSED_SHIFT 1U +#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PAUSED_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_PAUSED_EN 0x00000002U +#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_BUSY_SHIFT 0U +#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_BUSY_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_FWCORE_MEM_MMU_STATUS_BUSY_EN 0x00000001U + +/* Register ROGUE_CR_FWCORE_MEM_READS_EXT_STATUS */ +#define ROGUE_CR_FWCORE_MEM_READS_EXT_STATUS 0x30D8U +#define ROGUE_CR_FWCORE_MEM_READS_EXT_STATUS_MASKFULL 0x0000000000000FFFULL +#define ROGUE_CR_FWCORE_MEM_READS_EXT_STATUS_MMU_SHIFT 0U +#define ROGUE_CR_FWCORE_MEM_READS_EXT_STATUS_MMU_CLRMSK 0xFFFFF000U + +/* Register ROGUE_CR_FWCORE_MEM_READS_INT_STATUS */ +#define ROGUE_CR_FWCORE_MEM_READS_INT_STATUS 0x30E0U +#define ROGUE_CR_FWCORE_MEM_READS_INT_STATUS_MASKFULL 0x00000000000007FFULL +#define ROGUE_CR_FWCORE_MEM_READS_INT_STATUS_MMU_SHIFT 0U +#define ROGUE_CR_FWCORE_MEM_READS_INT_STATUS_MMU_CLRMSK 0xFFFFF800U + +/* Register ROGUE_CR_FWCORE_WRAPPER_FENCE */ +#define ROGUE_CR_FWCORE_WRAPPER_FENCE 0x30E8U +#define ROGUE_CR_FWCORE_WRAPPER_FENCE_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_FWCORE_WRAPPER_FENCE_ID_SHIFT 0U +#define ROGUE_CR_FWCORE_WRAPPER_FENCE_ID_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_FWCORE_WRAPPER_FENCE_ID_EN 0x00000001U + +/* Register group: ROGUE_CR_FWCORE_MEM_CAT_BASE, with 8 repeats */ +#define ROGUE_CR_FWCORE_MEM_CAT_BASE_REPEATCOUNT 8U +/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE0 */ +#define ROGUE_CR_FWCORE_MEM_CAT_BASE0 0x30F0U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE0_MASKFULL 0x000000FFFFFFF000ULL +#define ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE1 */ +#define ROGUE_CR_FWCORE_MEM_CAT_BASE1 0x30F8U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE1_MASKFULL 0x000000FFFFFFF000ULL +#define ROGUE_CR_FWCORE_MEM_CAT_BASE1_ADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE1_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_MEM_CAT_BASE1_ADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE1_ADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE2 */ +#define ROGUE_CR_FWCORE_MEM_CAT_BASE2 0x3100U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE2_MASKFULL 0x000000FFFFFFF000ULL +#define ROGUE_CR_FWCORE_MEM_CAT_BASE2_ADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE2_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_MEM_CAT_BASE2_ADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE2_ADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE3 */ +#define ROGUE_CR_FWCORE_MEM_CAT_BASE3 0x3108U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE3_MASKFULL 0x000000FFFFFFF000ULL +#define ROGUE_CR_FWCORE_MEM_CAT_BASE3_ADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE3_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_MEM_CAT_BASE3_ADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE3_ADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE4 */ +#define ROGUE_CR_FWCORE_MEM_CAT_BASE4 0x3110U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE4_MASKFULL 0x000000FFFFFFF000ULL +#define ROGUE_CR_FWCORE_MEM_CAT_BASE4_ADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE4_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_MEM_CAT_BASE4_ADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE4_ADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE5 */ +#define ROGUE_CR_FWCORE_MEM_CAT_BASE5 0x3118U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE5_MASKFULL 0x000000FFFFFFF000ULL +#define ROGUE_CR_FWCORE_MEM_CAT_BASE5_ADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE5_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_MEM_CAT_BASE5_ADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE5_ADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE6 */ +#define ROGUE_CR_FWCORE_MEM_CAT_BASE6 0x3120U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE6_MASKFULL 0x000000FFFFFFF000ULL +#define ROGUE_CR_FWCORE_MEM_CAT_BASE6_ADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE6_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_MEM_CAT_BASE6_ADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE6_ADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_MEM_CAT_BASE7 */ +#define ROGUE_CR_FWCORE_MEM_CAT_BASE7 0x3128U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE7_MASKFULL 0x000000FFFFFFF000ULL +#define ROGUE_CR_FWCORE_MEM_CAT_BASE7_ADDR_SHIFT 12U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE7_ADDR_CLRMSK 0xFFFFFF0000000FFFULL +#define ROGUE_CR_FWCORE_MEM_CAT_BASE7_ADDR_ALIGNSHIFT 12U +#define ROGUE_CR_FWCORE_MEM_CAT_BASE7_ADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_FWCORE_WDT_RESET */ +#define ROGUE_CR_FWCORE_WDT_RESET 0x3130U +#define ROGUE_CR_FWCORE_WDT_RESET_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_FWCORE_WDT_RESET_EN_SHIFT 0U +#define ROGUE_CR_FWCORE_WDT_RESET_EN_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_FWCORE_WDT_RESET_EN_EN 0x00000001U + +/* Register ROGUE_CR_FWCORE_WDT_CTRL */ +#define ROGUE_CR_FWCORE_WDT_CTRL 0x3138U +#define ROGUE_CR_FWCORE_WDT_CTRL_MASKFULL 0x00000000FFFF1F01ULL +#define ROGUE_CR_FWCORE_WDT_CTRL_PROT_SHIFT 16U +#define ROGUE_CR_FWCORE_WDT_CTRL_PROT_CLRMSK 0x0000FFFFU +#define ROGUE_CR_FWCORE_WDT_CTRL_THRESHOLD_SHIFT 8U +#define ROGUE_CR_FWCORE_WDT_CTRL_THRESHOLD_CLRMSK 0xFFFFE0FFU +#define ROGUE_CR_FWCORE_WDT_CTRL_ENABLE_SHIFT 0U +#define ROGUE_CR_FWCORE_WDT_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_FWCORE_WDT_CTRL_ENABLE_EN 0x00000001U + +/* Register ROGUE_CR_FWCORE_WDT_COUNT */ +#define ROGUE_CR_FWCORE_WDT_COUNT 0x3140U +#define ROGUE_CR_FWCORE_WDT_COUNT_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_FWCORE_WDT_COUNT_VALUE_SHIFT 0U +#define ROGUE_CR_FWCORE_WDT_COUNT_VALUE_CLRMSK 0x00000000U + +/* Register group: ROGUE_CR_FWCORE_DMI_RESERVED0, with 4 repeats */ +#define ROGUE_CR_FWCORE_DMI_RESERVED0_REPEATCOUNT 4U +/* Register ROGUE_CR_FWCORE_DMI_RESERVED00 */ +#define ROGUE_CR_FWCORE_DMI_RESERVED00 0x3400U +#define ROGUE_CR_FWCORE_DMI_RESERVED00_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_RESERVED01 */ +#define ROGUE_CR_FWCORE_DMI_RESERVED01 0x3408U +#define ROGUE_CR_FWCORE_DMI_RESERVED01_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_RESERVED02 */ +#define ROGUE_CR_FWCORE_DMI_RESERVED02 0x3410U +#define ROGUE_CR_FWCORE_DMI_RESERVED02_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_RESERVED03 */ +#define ROGUE_CR_FWCORE_DMI_RESERVED03 0x3418U +#define ROGUE_CR_FWCORE_DMI_RESERVED03_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_DATA0 */ +#define ROGUE_CR_FWCORE_DMI_DATA0 0x3420U +#define ROGUE_CR_FWCORE_DMI_DATA0_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_DATA1 */ +#define ROGUE_CR_FWCORE_DMI_DATA1 0x3428U +#define ROGUE_CR_FWCORE_DMI_DATA1_MASKFULL 0x0000000000000000ULL + +/* Register group: ROGUE_CR_FWCORE_DMI_RESERVED1, with 5 repeats */ +#define ROGUE_CR_FWCORE_DMI_RESERVED1_REPEATCOUNT 5U +/* Register ROGUE_CR_FWCORE_DMI_RESERVED10 */ +#define ROGUE_CR_FWCORE_DMI_RESERVED10 0x3430U +#define ROGUE_CR_FWCORE_DMI_RESERVED10_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_RESERVED11 */ +#define ROGUE_CR_FWCORE_DMI_RESERVED11 0x3438U +#define ROGUE_CR_FWCORE_DMI_RESERVED11_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_RESERVED12 */ +#define ROGUE_CR_FWCORE_DMI_RESERVED12 0x3440U +#define ROGUE_CR_FWCORE_DMI_RESERVED12_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_RESERVED13 */ +#define ROGUE_CR_FWCORE_DMI_RESERVED13 0x3448U +#define ROGUE_CR_FWCORE_DMI_RESERVED13_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_RESERVED14 */ +#define ROGUE_CR_FWCORE_DMI_RESERVED14 0x3450U +#define ROGUE_CR_FWCORE_DMI_RESERVED14_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_DMCONTROL */ +#define ROGUE_CR_FWCORE_DMI_DMCONTROL 0x3480U +#define ROGUE_CR_FWCORE_DMI_DMCONTROL_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_DMSTATUS */ +#define ROGUE_CR_FWCORE_DMI_DMSTATUS 0x3488U +#define ROGUE_CR_FWCORE_DMI_DMSTATUS_MASKFULL 0x0000000000000000ULL + +/* Register group: ROGUE_CR_FWCORE_DMI_RESERVED2, with 4 repeats */ +#define ROGUE_CR_FWCORE_DMI_RESERVED2_REPEATCOUNT 4U +/* Register ROGUE_CR_FWCORE_DMI_RESERVED20 */ +#define ROGUE_CR_FWCORE_DMI_RESERVED20 0x3490U +#define ROGUE_CR_FWCORE_DMI_RESERVED20_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_RESERVED21 */ +#define ROGUE_CR_FWCORE_DMI_RESERVED21 0x3498U +#define ROGUE_CR_FWCORE_DMI_RESERVED21_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_RESERVED22 */ +#define ROGUE_CR_FWCORE_DMI_RESERVED22 0x34A0U +#define ROGUE_CR_FWCORE_DMI_RESERVED22_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_RESERVED23 */ +#define ROGUE_CR_FWCORE_DMI_RESERVED23 0x34A8U +#define ROGUE_CR_FWCORE_DMI_RESERVED23_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_ABSTRACTCS */ +#define ROGUE_CR_FWCORE_DMI_ABSTRACTCS 0x34B0U +#define ROGUE_CR_FWCORE_DMI_ABSTRACTCS_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_COMMAND */ +#define ROGUE_CR_FWCORE_DMI_COMMAND 0x34B8U +#define ROGUE_CR_FWCORE_DMI_COMMAND_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_SBCS */ +#define ROGUE_CR_FWCORE_DMI_SBCS 0x35C0U +#define ROGUE_CR_FWCORE_DMI_SBCS_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_SBADDRESS0 */ +#define ROGUE_CR_FWCORE_DMI_SBADDRESS0 0x35C8U +#define ROGUE_CR_FWCORE_DMI_SBADDRESS0_MASKFULL 0x0000000000000000ULL + +/* Register group: ROGUE_CR_FWCORE_DMI_RESERVED3, with 2 repeats */ +#define ROGUE_CR_FWCORE_DMI_RESERVED3_REPEATCOUNT 2U +/* Register ROGUE_CR_FWCORE_DMI_RESERVED30 */ +#define ROGUE_CR_FWCORE_DMI_RESERVED30 0x34D0U +#define ROGUE_CR_FWCORE_DMI_RESERVED30_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_RESERVED31 */ +#define ROGUE_CR_FWCORE_DMI_RESERVED31 0x34D8U +#define ROGUE_CR_FWCORE_DMI_RESERVED31_MASKFULL 0x0000000000000000ULL + +/* Register group: ROGUE_CR_FWCORE_DMI_SBDATA, with 4 repeats */ +#define ROGUE_CR_FWCORE_DMI_SBDATA_REPEATCOUNT 4U +/* Register ROGUE_CR_FWCORE_DMI_SBDATA0 */ +#define ROGUE_CR_FWCORE_DMI_SBDATA0 0x35E0U +#define ROGUE_CR_FWCORE_DMI_SBDATA0_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_SBDATA1 */ +#define ROGUE_CR_FWCORE_DMI_SBDATA1 0x35E8U +#define ROGUE_CR_FWCORE_DMI_SBDATA1_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_SBDATA2 */ +#define ROGUE_CR_FWCORE_DMI_SBDATA2 0x35F0U +#define ROGUE_CR_FWCORE_DMI_SBDATA2_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_SBDATA3 */ +#define ROGUE_CR_FWCORE_DMI_SBDATA3 0x35F8U +#define ROGUE_CR_FWCORE_DMI_SBDATA3_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_FWCORE_DMI_HALTSUM0 */ +#define ROGUE_CR_FWCORE_DMI_HALTSUM0 0x3600U +#define ROGUE_CR_FWCORE_DMI_HALTSUM0_MASKFULL 0x0000000000000000ULL + +/* Register ROGUE_CR_SLC_CTRL_MISC */ +#define ROGUE_CR_SLC_CTRL_MISC 0x3800U +#define ROGUE_CR_SLC_CTRL_MISC_MASKFULL 0xFFFFFFFF01FF010FULL +#define ROGUE_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_SHIFT 32U +#define ROGUE_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_CLRMSK 0x00000000FFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_SHIFT 24U +#define ROGUE_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_CLRMSK 0xFFFFFFFFFEFFFFFFULL +#define ROGUE_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_EN 0x0000000001000000ULL +#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SHIFT 16U +#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK 0xFFFFFFFFFF00FFFFULL +#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_64_BYTE 0x0000000000000000ULL +#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_128_BYTE 0x0000000000010000ULL +#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH1 0x0000000000100000ULL +#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH2 0x0000000000110000ULL +#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1 0x0000000000200000ULL +#define ROGUE_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH2_SCRAMBLE 0x0000000000210000ULL +#define ROGUE_CR_SLC_CTRL_MISC_PAUSE_SHIFT 8U +#define ROGUE_CR_SLC_CTRL_MISC_PAUSE_CLRMSK 0xFFFFFFFFFFFFFEFFULL +#define ROGUE_CR_SLC_CTRL_MISC_PAUSE_EN 0x0000000000000100ULL +#define ROGUE_CR_SLC_CTRL_MISC_RESP_PRIORITY_SHIFT 3U +#define ROGUE_CR_SLC_CTRL_MISC_RESP_PRIORITY_CLRMSK 0xFFFFFFFFFFFFFFF7ULL +#define ROGUE_CR_SLC_CTRL_MISC_RESP_PRIORITY_EN 0x0000000000000008ULL +#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_SHIFT 2U +#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_CLRMSK 0xFFFFFFFFFFFFFFFBULL +#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_EN 0x0000000000000004ULL +#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_SHIFT 1U +#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_CLRMSK 0xFFFFFFFFFFFFFFFDULL +#define ROGUE_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN 0x0000000000000002ULL +#define ROGUE_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_SHIFT 0U +#define ROGUE_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN 0x0000000000000001ULL + +/* Register ROGUE_CR_SLC_CTRL_FLUSH_INVAL */ +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL 0x3818U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_MASKFULL 0x0000000080000FFFULL +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_LAZY_SHIFT 31U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_LAZY_CLRMSK 0x7FFFFFFFU +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_LAZY_EN 0x80000000U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_SHIFT 11U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_CLRMSK 0xFFFFF7FFU +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_EN 0x00000800U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_SHIFT 10U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_CLRMSK 0xFFFFFBFFU +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_EN 0x00000400U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_SHIFT 9U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_CLRMSK 0xFFFFFDFFU +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_EN 0x00000200U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_SHIFT 8U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_CLRMSK 0xFFFFFEFFU +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_EN 0x00000100U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_SHIFT 7U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_CLRMSK 0xFFFFFF7FU +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_EN 0x00000080U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_SHIFT 6U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_CLRMSK 0xFFFFFFBFU +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_EN 0x00000040U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_SHIFT 5U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_EN 0x00000020U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_SHIFT 4U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_EN 0x00000010U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_SHIFT 3U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_EN 0x00000008U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_SHIFT 2U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_EN 0x00000004U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_SHIFT 1U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_EN 0x00000002U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_ALL_SHIFT 0U +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_ALL_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_SLC_CTRL_FLUSH_INVAL_ALL_EN 0x00000001U + +/* Register ROGUE_CR_SLC_STATUS0 */ +#define ROGUE_CR_SLC_STATUS0 0x3820U +#define ROGUE_CR_SLC_STATUS0_MASKFULL 0x0000000000000007ULL +#define ROGUE_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_SHIFT 2U +#define ROGUE_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_EN 0x00000004U +#define ROGUE_CR_SLC_STATUS0_INVAL_PENDING_SHIFT 1U +#define ROGUE_CR_SLC_STATUS0_INVAL_PENDING_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_SLC_STATUS0_INVAL_PENDING_EN 0x00000002U +#define ROGUE_CR_SLC_STATUS0_FLUSH_PENDING_SHIFT 0U +#define ROGUE_CR_SLC_STATUS0_FLUSH_PENDING_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_SLC_STATUS0_FLUSH_PENDING_EN 0x00000001U + +/* Register ROGUE_CR_SLC_CTRL_BYPASS */ +#define ROGUE_CR_SLC_CTRL_BYPASS 0x3828U +#define ROGUE_CR_SLC_CTRL_BYPASS__XE_MEM__MASKFULL 0x0FFFFFFFFFFF7FFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_MASKFULL 0x000000000FFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_SHIFT 59U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_CLRMSK 0xF7FFFFFFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_EN 0x0800000000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_SHIFT 58U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_CLRMSK 0xFBFFFFFFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_EN 0x0400000000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_SHIFT 57U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_CLRMSK 0xFDFFFFFFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_EN 0x0200000000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_SHIFT 56U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_CLRMSK 0xFEFFFFFFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_EN 0x0100000000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_SHIFT 55U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_CLRMSK 0xFF7FFFFFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_EN 0x0080000000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_SHIFT 54U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_CLRMSK 0xFFBFFFFFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_EN 0x0040000000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_SHIFT 53U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_CLRMSK 0xFFDFFFFFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_EN 0x0020000000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_SHIFT 52U +#define ROGUE_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_CLRMSK 0xFFEFFFFFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_EN 0x0010000000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_SHIFT 51U +#define ROGUE_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_CLRMSK 0xFFF7FFFFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_EN 0x0008000000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBC_SHIFT 50U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBC_CLRMSK 0xFFFBFFFFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBC_EN 0x0004000000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_SHIFT 49U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_CLRMSK 0xFFFDFFFFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_EN 0x0002000000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_SHIFT 48U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_CLRMSK 0xFFFEFFFFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_EN 0x0001000000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_SHIFT 47U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_CLRMSK 0xFFFF7FFFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_EN 0x0000800000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_SHIFT 46U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_CLRMSK 0xFFFFBFFFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_EN 0x0000400000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_SHIFT 45U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_CLRMSK 0xFFFFDFFFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TCU_EN 0x0000200000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PBE_SHIFT 44U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PBE_CLRMSK 0xFFFFEFFFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PBE_EN 0x0000100000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_ISP_SHIFT 43U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_ISP_CLRMSK 0xFFFFF7FFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_ISP_EN 0x0000080000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PM_SHIFT 42U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PM_CLRMSK 0xFFFFFBFFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PM_EN 0x0000040000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TDM_SHIFT 41U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TDM_CLRMSK 0xFFFFFDFFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TDM_EN 0x0000020000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_CDM_SHIFT 40U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_CDM_CLRMSK 0xFFFFFEFFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_CDM_EN 0x0000010000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_SHIFT 39U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_CLRMSK 0xFFFFFF7FFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_EN 0x0000008000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_SHIFT 38U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_CLRMSK 0xFFFFFFBFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_EN 0x0000004000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_SHIFT 37U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_CLRMSK 0xFFFFFFDFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_EN 0x0000002000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_VDM_SHIFT 36U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_VDM_CLRMSK 0xFFFFFFEFFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_VDM_EN 0x0000001000000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_SHIFT 35U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_CLRMSK 0xFFFFFFF7FFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_EN 0x0000000800000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_SHIFT 34U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_CLRMSK 0xFFFFFFFBFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_EN 0x0000000400000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_SHIFT 33U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_CLRMSK 0xFFFFFFFDFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_EN 0x0000000200000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_SHIFT 32U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_CLRMSK 0xFFFFFFFEFFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_EN 0x0000000100000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_SHIFT 31U +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_CLRMSK 0xFFFFFFFF7FFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_EN 0x0000000080000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_SHIFT 30U +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_CLRMSK 0xFFFFFFFFBFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_EN 0x0000000040000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_TE_SHIFT 29U +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_TE_CLRMSK 0xFFFFFFFFDFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_TE_EN 0x0000000020000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_VCE_SHIFT 28U +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_VCE_CLRMSK 0xFFFFFFFFEFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PB_VCE_EN 0x0000000010000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_SHIFT 27U +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_CLRMSK 0xFFFFFFFFF7FFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_EN 0x0000000008000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_SHIFT 26U +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_CLRMSK 0xFFFFFFFFFBFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_RAY_EN 0x0000000004000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_SHIFT 25U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_CLRMSK 0xFFFFFFFFFDFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN 0x0000000002000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPU_SHIFT 24U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPU_CLRMSK 0xFFFFFFFFFEFFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPU_EN 0x0000000001000000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBDC_SHIFT 23U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBDC_CLRMSK 0xFFFFFFFFFF7FFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_FBDC_EN 0x0000000000800000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TLA_SHIFT 22U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TLA_CLRMSK 0xFFFFFFFFFFBFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TLA_EN 0x0000000000400000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_N_SHIFT 21U +#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_N_CLRMSK 0xFFFFFFFFFFDFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_N_EN 0x0000000000200000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_SHIFT 20U +#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_CLRMSK 0xFFFFFFFFFFEFFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_BYP_CC_EN 0x0000000000100000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MCU_SHIFT 19U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MCU_CLRMSK 0xFFFFFFFFFFF7FFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MCU_EN 0x0000000000080000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PDS_SHIFT 18U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PDS_CLRMSK 0xFFFFFFFFFFFBFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_PDS_EN 0x0000000000040000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPF_SHIFT 17U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPF_CLRMSK 0xFFFFFFFFFFFDFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TPF_EN 0x0000000000020000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_SHIFT 16U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_CLRMSK 0xFFFFFFFFFFFEFFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_EN 0x0000000000010000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_SHIFT 15U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_CLRMSK 0xFFFFFFFFFFFF7FFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN 0x0000000000008000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_USC_SHIFT 14U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_USC_CLRMSK 0xFFFFFFFFFFFFBFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_USC_EN 0x0000000000004000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_META_SHIFT 13U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_META_CLRMSK 0xFFFFFFFFFFFFDFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_META_EN 0x0000000000002000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_HOST_SHIFT 12U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_HOST_CLRMSK 0xFFFFFFFFFFFFEFFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_HOST_EN 0x0000000000001000ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_SHIFT 11U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_CLRMSK 0xFFFFFFFFFFFFF7FFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_EN 0x0000000000000800ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_SHIFT 10U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_CLRMSK 0xFFFFFFFFFFFFFBFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_EN 0x0000000000000400ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_SHIFT 9U +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_CLRMSK 0xFFFFFFFFFFFFFDFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_EN 0x0000000000000200ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FRC_SHIFT 8U +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FRC_CLRMSK 0xFFFFFFFFFFFFFEFFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_FRC_EN 0x0000000000000100ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXE_SHIFT 7U +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXE_CLRMSK 0xFFFFFFFFFFFFFF7FULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXE_EN 0x0000000000000080ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXD_SHIFT 6U +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXD_CLRMSK 0xFFFFFFFFFFFFFFBFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_VXD_EN 0x0000000000000040ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_HOST_META_SHIFT 5U +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_HOST_META_CLRMSK 0xFFFFFFFFFFFFFFDFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_HOST_META_EN 0x0000000000000020ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_MMU_SHIFT 4U +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_MMU_CLRMSK 0xFFFFFFFFFFFFFFEFULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_MMU_EN 0x0000000000000010ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_COMPUTE_SHIFT 3U +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_COMPUTE_CLRMSK 0xFFFFFFFFFFFFFFF7ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_COMPUTE_EN 0x0000000000000008ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PIXEL_SHIFT 2U +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PIXEL_CLRMSK 0xFFFFFFFFFFFFFFFBULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_PIXEL_EN 0x0000000000000004ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_TA_SHIFT 1U +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_TA_CLRMSK 0xFFFFFFFFFFFFFFFDULL +#define ROGUE_CR_SLC_CTRL_BYPASS_DM_TA_EN 0x0000000000000002ULL +#define ROGUE_CR_SLC_CTRL_BYPASS_ALL_SHIFT 0U +#define ROGUE_CR_SLC_CTRL_BYPASS_ALL_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_SLC_CTRL_BYPASS_ALL_EN 0x0000000000000001ULL + +/* Register ROGUE_CR_SLC_STATUS1 */ +#define ROGUE_CR_SLC_STATUS1 0x3870U +#define ROGUE_CR_SLC_STATUS1_MASKFULL 0x800003FF03FFFFFFULL +#define ROGUE_CR_SLC_STATUS1_PAUSED_SHIFT 63U +#define ROGUE_CR_SLC_STATUS1_PAUSED_CLRMSK 0x7FFFFFFFFFFFFFFFULL +#define ROGUE_CR_SLC_STATUS1_PAUSED_EN 0x8000000000000000ULL +#define ROGUE_CR_SLC_STATUS1_READS1_SHIFT 32U +#define ROGUE_CR_SLC_STATUS1_READS1_CLRMSK 0xFFFFFC00FFFFFFFFULL +#define ROGUE_CR_SLC_STATUS1_READS0_SHIFT 16U +#define ROGUE_CR_SLC_STATUS1_READS0_CLRMSK 0xFFFFFFFFFC00FFFFULL +#define ROGUE_CR_SLC_STATUS1_READS1_EXT_SHIFT 8U +#define ROGUE_CR_SLC_STATUS1_READS1_EXT_CLRMSK 0xFFFFFFFFFFFF00FFULL +#define ROGUE_CR_SLC_STATUS1_READS0_EXT_SHIFT 0U +#define ROGUE_CR_SLC_STATUS1_READS0_EXT_CLRMSK 0xFFFFFFFFFFFFFF00ULL + +/* Register ROGUE_CR_SLC_IDLE */ +#define ROGUE_CR_SLC_IDLE 0x3898U +#define ROGUE_CR_SLC_IDLE__XE_MEM__MASKFULL 0x00000000000003FFULL +#define ROGUE_CR_SLC_IDLE_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_SLC_IDLE_MH_SYSARB1_SHIFT 9U +#define ROGUE_CR_SLC_IDLE_MH_SYSARB1_CLRMSK 0xFFFFFDFFU +#define ROGUE_CR_SLC_IDLE_MH_SYSARB1_EN 0x00000200U +#define ROGUE_CR_SLC_IDLE_MH_SYSARB0_SHIFT 8U +#define ROGUE_CR_SLC_IDLE_MH_SYSARB0_CLRMSK 0xFFFFFEFFU +#define ROGUE_CR_SLC_IDLE_MH_SYSARB0_EN 0x00000100U +#define ROGUE_CR_SLC_IDLE_IMGBV4_SHIFT 7U +#define ROGUE_CR_SLC_IDLE_IMGBV4_CLRMSK 0xFFFFFF7FU +#define ROGUE_CR_SLC_IDLE_IMGBV4_EN 0x00000080U +#define ROGUE_CR_SLC_IDLE_CACHE_BANKS_SHIFT 6U +#define ROGUE_CR_SLC_IDLE_CACHE_BANKS_CLRMSK 0xFFFFFFBFU +#define ROGUE_CR_SLC_IDLE_CACHE_BANKS_EN 0x00000040U +#define ROGUE_CR_SLC_IDLE_RBOFIFO_SHIFT 5U +#define ROGUE_CR_SLC_IDLE_RBOFIFO_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_SLC_IDLE_RBOFIFO_EN 0x00000020U +#define ROGUE_CR_SLC_IDLE_FRC_CONV_SHIFT 4U +#define ROGUE_CR_SLC_IDLE_FRC_CONV_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_SLC_IDLE_FRC_CONV_EN 0x00000010U +#define ROGUE_CR_SLC_IDLE_VXE_CONV_SHIFT 3U +#define ROGUE_CR_SLC_IDLE_VXE_CONV_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_SLC_IDLE_VXE_CONV_EN 0x00000008U +#define ROGUE_CR_SLC_IDLE_VXD_CONV_SHIFT 2U +#define ROGUE_CR_SLC_IDLE_VXD_CONV_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_SLC_IDLE_VXD_CONV_EN 0x00000004U +#define ROGUE_CR_SLC_IDLE_BIF1_CONV_SHIFT 1U +#define ROGUE_CR_SLC_IDLE_BIF1_CONV_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_SLC_IDLE_BIF1_CONV_EN 0x00000002U +#define ROGUE_CR_SLC_IDLE_CBAR_SHIFT 0U +#define ROGUE_CR_SLC_IDLE_CBAR_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_SLC_IDLE_CBAR_EN 0x00000001U + +/* Register ROGUE_CR_SLC_STATUS2 */ +#define ROGUE_CR_SLC_STATUS2 0x3908U +#define ROGUE_CR_SLC_STATUS2_MASKFULL 0x000003FF03FFFFFFULL +#define ROGUE_CR_SLC_STATUS2_READS3_SHIFT 32U +#define ROGUE_CR_SLC_STATUS2_READS3_CLRMSK 0xFFFFFC00FFFFFFFFULL +#define ROGUE_CR_SLC_STATUS2_READS2_SHIFT 16U +#define ROGUE_CR_SLC_STATUS2_READS2_CLRMSK 0xFFFFFFFFFC00FFFFULL +#define ROGUE_CR_SLC_STATUS2_READS3_EXT_SHIFT 8U +#define ROGUE_CR_SLC_STATUS2_READS3_EXT_CLRMSK 0xFFFFFFFFFFFF00FFULL +#define ROGUE_CR_SLC_STATUS2_READS2_EXT_SHIFT 0U +#define ROGUE_CR_SLC_STATUS2_READS2_EXT_CLRMSK 0xFFFFFFFFFFFFFF00ULL + +/* Register ROGUE_CR_SLC_CTRL_MISC2 */ +#define ROGUE_CR_SLC_CTRL_MISC2 0x3930U +#define ROGUE_CR_SLC_CTRL_MISC2_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_SHIFT 0U +#define ROGUE_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_CLRMSK 0x00000000U + +/* Register ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE */ +#define ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE 0x3938U +#define ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_SHIFT 0U +#define ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_EN 0x00000001U + +/* Register ROGUE_CR_USC_UVS0_CHECKSUM */ +#define ROGUE_CR_USC_UVS0_CHECKSUM 0x5000U +#define ROGUE_CR_USC_UVS0_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_USC_UVS0_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_USC_UVS0_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_USC_UVS1_CHECKSUM */ +#define ROGUE_CR_USC_UVS1_CHECKSUM 0x5008U +#define ROGUE_CR_USC_UVS1_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_USC_UVS1_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_USC_UVS1_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_USC_UVS2_CHECKSUM */ +#define ROGUE_CR_USC_UVS2_CHECKSUM 0x5010U +#define ROGUE_CR_USC_UVS2_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_USC_UVS2_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_USC_UVS2_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_USC_UVS3_CHECKSUM */ +#define ROGUE_CR_USC_UVS3_CHECKSUM 0x5018U +#define ROGUE_CR_USC_UVS3_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_USC_UVS3_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_USC_UVS3_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_PPP_SIGNATURE */ +#define ROGUE_CR_PPP_SIGNATURE 0x5020U +#define ROGUE_CR_PPP_SIGNATURE_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PPP_SIGNATURE_VALUE_SHIFT 0U +#define ROGUE_CR_PPP_SIGNATURE_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_TE_SIGNATURE */ +#define ROGUE_CR_TE_SIGNATURE 0x5028U +#define ROGUE_CR_TE_SIGNATURE_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_TE_SIGNATURE_VALUE_SHIFT 0U +#define ROGUE_CR_TE_SIGNATURE_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_TE_CHECKSUM */ +#define ROGUE_CR_TE_CHECKSUM 0x5110U +#define ROGUE_CR_TE_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_TE_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_TE_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_USC_UVB_CHECKSUM */ +#define ROGUE_CR_USC_UVB_CHECKSUM 0x5118U +#define ROGUE_CR_USC_UVB_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_USC_UVB_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_USC_UVB_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_VCE_CHECKSUM */ +#define ROGUE_CR_VCE_CHECKSUM 0x5030U +#define ROGUE_CR_VCE_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_VCE_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_VCE_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_ISP_PDS_CHECKSUM */ +#define ROGUE_CR_ISP_PDS_CHECKSUM 0x5038U +#define ROGUE_CR_ISP_PDS_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_ISP_PDS_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_ISP_PDS_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_ISP_TPF_CHECKSUM */ +#define ROGUE_CR_ISP_TPF_CHECKSUM 0x5040U +#define ROGUE_CR_ISP_TPF_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_ISP_TPF_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_ISP_TPF_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_TFPU_PLANE0_CHECKSUM */ +#define ROGUE_CR_TFPU_PLANE0_CHECKSUM 0x5048U +#define ROGUE_CR_TFPU_PLANE0_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_TFPU_PLANE0_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_TFPU_PLANE0_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_TFPU_PLANE1_CHECKSUM */ +#define ROGUE_CR_TFPU_PLANE1_CHECKSUM 0x5050U +#define ROGUE_CR_TFPU_PLANE1_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_TFPU_PLANE1_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_TFPU_PLANE1_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_PBE_CHECKSUM */ +#define ROGUE_CR_PBE_CHECKSUM 0x5058U +#define ROGUE_CR_PBE_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PBE_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_PBE_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_PDS_DOUTM_STM_SIGNATURE */ +#define ROGUE_CR_PDS_DOUTM_STM_SIGNATURE 0x5060U +#define ROGUE_CR_PDS_DOUTM_STM_SIGNATURE_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_SHIFT 0U +#define ROGUE_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_IFPU_ISP_CHECKSUM */ +#define ROGUE_CR_IFPU_ISP_CHECKSUM 0x5068U +#define ROGUE_CR_IFPU_ISP_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_IFPU_ISP_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_IFPU_ISP_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_USC_UVS4_CHECKSUM */ +#define ROGUE_CR_USC_UVS4_CHECKSUM 0x5100U +#define ROGUE_CR_USC_UVS4_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_USC_UVS4_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_USC_UVS4_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_USC_UVS5_CHECKSUM */ +#define ROGUE_CR_USC_UVS5_CHECKSUM 0x5108U +#define ROGUE_CR_USC_UVS5_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_USC_UVS5_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_USC_UVS5_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_PPP_CLIP_CHECKSUM */ +#define ROGUE_CR_PPP_CLIP_CHECKSUM 0x5120U +#define ROGUE_CR_PPP_CLIP_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PPP_CLIP_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_PPP_CLIP_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_PERF_TA_PHASE */ +#define ROGUE_CR_PERF_TA_PHASE 0x6008U +#define ROGUE_CR_PERF_TA_PHASE_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PERF_TA_PHASE_COUNT_SHIFT 0U +#define ROGUE_CR_PERF_TA_PHASE_COUNT_CLRMSK 0x00000000U + +/* Register ROGUE_CR_PERF_3D_PHASE */ +#define ROGUE_CR_PERF_3D_PHASE 0x6010U +#define ROGUE_CR_PERF_3D_PHASE_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PERF_3D_PHASE_COUNT_SHIFT 0U +#define ROGUE_CR_PERF_3D_PHASE_COUNT_CLRMSK 0x00000000U + +/* Register ROGUE_CR_PERF_COMPUTE_PHASE */ +#define ROGUE_CR_PERF_COMPUTE_PHASE 0x6018U +#define ROGUE_CR_PERF_COMPUTE_PHASE_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PERF_COMPUTE_PHASE_COUNT_SHIFT 0U +#define ROGUE_CR_PERF_COMPUTE_PHASE_COUNT_CLRMSK 0x00000000U + +/* Register ROGUE_CR_PERF_TA_CYCLE */ +#define ROGUE_CR_PERF_TA_CYCLE 0x6020U +#define ROGUE_CR_PERF_TA_CYCLE_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PERF_TA_CYCLE_COUNT_SHIFT 0U +#define ROGUE_CR_PERF_TA_CYCLE_COUNT_CLRMSK 0x00000000U + +/* Register ROGUE_CR_PERF_3D_CYCLE */ +#define ROGUE_CR_PERF_3D_CYCLE 0x6028U +#define ROGUE_CR_PERF_3D_CYCLE_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PERF_3D_CYCLE_COUNT_SHIFT 0U +#define ROGUE_CR_PERF_3D_CYCLE_COUNT_CLRMSK 0x00000000U + +/* Register ROGUE_CR_PERF_COMPUTE_CYCLE */ +#define ROGUE_CR_PERF_COMPUTE_CYCLE 0x6030U +#define ROGUE_CR_PERF_COMPUTE_CYCLE_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PERF_COMPUTE_CYCLE_COUNT_SHIFT 0U +#define ROGUE_CR_PERF_COMPUTE_CYCLE_COUNT_CLRMSK 0x00000000U + +/* Register ROGUE_CR_PERF_TA_OR_3D_CYCLE */ +#define ROGUE_CR_PERF_TA_OR_3D_CYCLE 0x6038U +#define ROGUE_CR_PERF_TA_OR_3D_CYCLE_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PERF_TA_OR_3D_CYCLE_COUNT_SHIFT 0U +#define ROGUE_CR_PERF_TA_OR_3D_CYCLE_COUNT_CLRMSK 0x00000000U + +/* Register ROGUE_CR_PERF_INITIAL_TA_CYCLE */ +#define ROGUE_CR_PERF_INITIAL_TA_CYCLE 0x6040U +#define ROGUE_CR_PERF_INITIAL_TA_CYCLE_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PERF_INITIAL_TA_CYCLE_COUNT_SHIFT 0U +#define ROGUE_CR_PERF_INITIAL_TA_CYCLE_COUNT_CLRMSK 0x00000000U + +/* Register ROGUE_CR_PERF_SLC0_READ_STALL */ +#define ROGUE_CR_PERF_SLC0_READ_STALL 0x60B8U +#define ROGUE_CR_PERF_SLC0_READ_STALL_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT 0U +#define ROGUE_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK 0x00000000U + +/* Register ROGUE_CR_PERF_SLC0_WRITE_STALL */ +#define ROGUE_CR_PERF_SLC0_WRITE_STALL 0x60C0U +#define ROGUE_CR_PERF_SLC0_WRITE_STALL_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT 0U +#define ROGUE_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK 0x00000000U + +/* Register ROGUE_CR_PERF_SLC1_READ_STALL */ +#define ROGUE_CR_PERF_SLC1_READ_STALL 0x60E0U +#define ROGUE_CR_PERF_SLC1_READ_STALL_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PERF_SLC1_READ_STALL_COUNT_SHIFT 0U +#define ROGUE_CR_PERF_SLC1_READ_STALL_COUNT_CLRMSK 0x00000000U + +/* Register ROGUE_CR_PERF_SLC1_WRITE_STALL */ +#define ROGUE_CR_PERF_SLC1_WRITE_STALL 0x60E8U +#define ROGUE_CR_PERF_SLC1_WRITE_STALL_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PERF_SLC1_WRITE_STALL_COUNT_SHIFT 0U +#define ROGUE_CR_PERF_SLC1_WRITE_STALL_COUNT_CLRMSK 0x00000000U + +/* Register ROGUE_CR_PERF_SLC2_READ_STALL */ +#define ROGUE_CR_PERF_SLC2_READ_STALL 0x6158U +#define ROGUE_CR_PERF_SLC2_READ_STALL_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PERF_SLC2_READ_STALL_COUNT_SHIFT 0U +#define ROGUE_CR_PERF_SLC2_READ_STALL_COUNT_CLRMSK 0x00000000U + +/* Register ROGUE_CR_PERF_SLC2_WRITE_STALL */ +#define ROGUE_CR_PERF_SLC2_WRITE_STALL 0x6160U +#define ROGUE_CR_PERF_SLC2_WRITE_STALL_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PERF_SLC2_WRITE_STALL_COUNT_SHIFT 0U +#define ROGUE_CR_PERF_SLC2_WRITE_STALL_COUNT_CLRMSK 0x00000000U + +/* Register ROGUE_CR_PERF_SLC3_READ_STALL */ +#define ROGUE_CR_PERF_SLC3_READ_STALL 0x6180U +#define ROGUE_CR_PERF_SLC3_READ_STALL_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PERF_SLC3_READ_STALL_COUNT_SHIFT 0U +#define ROGUE_CR_PERF_SLC3_READ_STALL_COUNT_CLRMSK 0x00000000U + +/* Register ROGUE_CR_PERF_SLC3_WRITE_STALL */ +#define ROGUE_CR_PERF_SLC3_WRITE_STALL 0x6188U +#define ROGUE_CR_PERF_SLC3_WRITE_STALL_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PERF_SLC3_WRITE_STALL_COUNT_SHIFT 0U +#define ROGUE_CR_PERF_SLC3_WRITE_STALL_COUNT_CLRMSK 0x00000000U + +/* Register ROGUE_CR_PERF_3D_SPINUP */ +#define ROGUE_CR_PERF_3D_SPINUP 0x6220U +#define ROGUE_CR_PERF_3D_SPINUP_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PERF_3D_SPINUP_CYCLES_SHIFT 0U +#define ROGUE_CR_PERF_3D_SPINUP_CYCLES_CLRMSK 0x00000000U + +/* Register ROGUE_CR_AXI_ACE_LITE_CONFIGURATION */ +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION 0x38C0U +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_MASKFULL 0x00003FFFFFFFFFFFULL +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_SHIFT 45U +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_CLRMSK 0xFFFFDFFFFFFFFFFFULL +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_EN 0x0000200000000000ULL +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT 37U +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_CLRMSK 0xFFFFE01FFFFFFFFFULL +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT 36U +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_CLRMSK \ + 0xFFFFFFEFFFFFFFFFULL +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_EN \ + 0x0000001000000000ULL +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_SHIFT 35U +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_CLRMSK 0xFFFFFFF7FFFFFFFFULL +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_EN 0x0000000800000000ULL +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_SHIFT 34U +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_CLRMSK 0xFFFFFFFBFFFFFFFFULL +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_EN 0x0000000400000000ULL +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT 30U +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_CLRMSK 0xFFFFFFFC3FFFFFFFULL +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT 26U +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_CLRMSK 0xFFFFFFFFC3FFFFFFULL +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT 22U +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_CLRMSK 0xFFFFFFFFFC3FFFFFULL +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_SHIFT 20U +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_CLRMSK 0xFFFFFFFFFFCFFFFFULL +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_SHIFT 18U +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_CLRMSK 0xFFFFFFFFFFF3FFFFULL +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT 16U +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_CLRMSK 0xFFFFFFFFFFFCFFFFULL +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT 14U +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_CLRMSK 0xFFFFFFFFFFFF3FFFULL +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT 12U +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_CLRMSK 0xFFFFFFFFFFFFCFFFULL +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT 10U +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_CLRMSK 0xFFFFFFFFFFFFF3FFULL +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT 8U +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_CLRMSK 0xFFFFFFFFFFFFFCFFULL +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_SHIFT 4U +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_CLRMSK 0xFFFFFFFFFFFFFF0FULL +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_SHIFT 0U +#define ROGUE_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_CLRMSK 0xFFFFFFFFFFFFFFF0ULL + +/* Register ROGUE_CR_POWER_ESTIMATE_RESULT */ +#define ROGUE_CR_POWER_ESTIMATE_RESULT 0x6328U +#define ROGUE_CR_POWER_ESTIMATE_RESULT_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_POWER_ESTIMATE_RESULT_VALUE_SHIFT 0U +#define ROGUE_CR_POWER_ESTIMATE_RESULT_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_TA_PERF */ +#define ROGUE_CR_TA_PERF 0x7600U +#define ROGUE_CR_TA_PERF_MASKFULL 0x000000000000001FULL +#define ROGUE_CR_TA_PERF_CLR_3_SHIFT 4U +#define ROGUE_CR_TA_PERF_CLR_3_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_TA_PERF_CLR_3_EN 0x00000010U +#define ROGUE_CR_TA_PERF_CLR_2_SHIFT 3U +#define ROGUE_CR_TA_PERF_CLR_2_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_TA_PERF_CLR_2_EN 0x00000008U +#define ROGUE_CR_TA_PERF_CLR_1_SHIFT 2U +#define ROGUE_CR_TA_PERF_CLR_1_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_TA_PERF_CLR_1_EN 0x00000004U +#define ROGUE_CR_TA_PERF_CLR_0_SHIFT 1U +#define ROGUE_CR_TA_PERF_CLR_0_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_TA_PERF_CLR_0_EN 0x00000002U +#define ROGUE_CR_TA_PERF_CTRL_ENABLE_SHIFT 0U +#define ROGUE_CR_TA_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_TA_PERF_CTRL_ENABLE_EN 0x00000001U + +/* Register ROGUE_CR_TA_PERF_SELECT0 */ +#define ROGUE_CR_TA_PERF_SELECT0 0x7608U +#define ROGUE_CR_TA_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL +#define ROGUE_CR_TA_PERF_SELECT0_BATCH_MAX_SHIFT 48U +#define ROGUE_CR_TA_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL +#define ROGUE_CR_TA_PERF_SELECT0_BATCH_MIN_SHIFT 32U +#define ROGUE_CR_TA_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL +#define ROGUE_CR_TA_PERF_SELECT0_MODE_SHIFT 21U +#define ROGUE_CR_TA_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL +#define ROGUE_CR_TA_PERF_SELECT0_MODE_EN 0x0000000000200000ULL +#define ROGUE_CR_TA_PERF_SELECT0_GROUP_SELECT_SHIFT 16U +#define ROGUE_CR_TA_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL +#define ROGUE_CR_TA_PERF_SELECT0_BIT_SELECT_SHIFT 0U +#define ROGUE_CR_TA_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL + +/* Register ROGUE_CR_TA_PERF_SELECT1 */ +#define ROGUE_CR_TA_PERF_SELECT1 0x7610U +#define ROGUE_CR_TA_PERF_SELECT1_MASKFULL 0x3FFF3FFF003FFFFFULL +#define ROGUE_CR_TA_PERF_SELECT1_BATCH_MAX_SHIFT 48U +#define ROGUE_CR_TA_PERF_SELECT1_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL +#define ROGUE_CR_TA_PERF_SELECT1_BATCH_MIN_SHIFT 32U +#define ROGUE_CR_TA_PERF_SELECT1_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL +#define ROGUE_CR_TA_PERF_SELECT1_MODE_SHIFT 21U +#define ROGUE_CR_TA_PERF_SELECT1_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL +#define ROGUE_CR_TA_PERF_SELECT1_MODE_EN 0x0000000000200000ULL +#define ROGUE_CR_TA_PERF_SELECT1_GROUP_SELECT_SHIFT 16U +#define ROGUE_CR_TA_PERF_SELECT1_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL +#define ROGUE_CR_TA_PERF_SELECT1_BIT_SELECT_SHIFT 0U +#define ROGUE_CR_TA_PERF_SELECT1_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL + +/* Register ROGUE_CR_TA_PERF_SELECT2 */ +#define ROGUE_CR_TA_PERF_SELECT2 0x7618U +#define ROGUE_CR_TA_PERF_SELECT2_MASKFULL 0x3FFF3FFF003FFFFFULL +#define ROGUE_CR_TA_PERF_SELECT2_BATCH_MAX_SHIFT 48U +#define ROGUE_CR_TA_PERF_SELECT2_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL +#define ROGUE_CR_TA_PERF_SELECT2_BATCH_MIN_SHIFT 32U +#define ROGUE_CR_TA_PERF_SELECT2_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL +#define ROGUE_CR_TA_PERF_SELECT2_MODE_SHIFT 21U +#define ROGUE_CR_TA_PERF_SELECT2_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL +#define ROGUE_CR_TA_PERF_SELECT2_MODE_EN 0x0000000000200000ULL +#define ROGUE_CR_TA_PERF_SELECT2_GROUP_SELECT_SHIFT 16U +#define ROGUE_CR_TA_PERF_SELECT2_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL +#define ROGUE_CR_TA_PERF_SELECT2_BIT_SELECT_SHIFT 0U +#define ROGUE_CR_TA_PERF_SELECT2_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL + +/* Register ROGUE_CR_TA_PERF_SELECT3 */ +#define ROGUE_CR_TA_PERF_SELECT3 0x7620U +#define ROGUE_CR_TA_PERF_SELECT3_MASKFULL 0x3FFF3FFF003FFFFFULL +#define ROGUE_CR_TA_PERF_SELECT3_BATCH_MAX_SHIFT 48U +#define ROGUE_CR_TA_PERF_SELECT3_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL +#define ROGUE_CR_TA_PERF_SELECT3_BATCH_MIN_SHIFT 32U +#define ROGUE_CR_TA_PERF_SELECT3_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL +#define ROGUE_CR_TA_PERF_SELECT3_MODE_SHIFT 21U +#define ROGUE_CR_TA_PERF_SELECT3_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL +#define ROGUE_CR_TA_PERF_SELECT3_MODE_EN 0x0000000000200000ULL +#define ROGUE_CR_TA_PERF_SELECT3_GROUP_SELECT_SHIFT 16U +#define ROGUE_CR_TA_PERF_SELECT3_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL +#define ROGUE_CR_TA_PERF_SELECT3_BIT_SELECT_SHIFT 0U +#define ROGUE_CR_TA_PERF_SELECT3_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL + +/* Register ROGUE_CR_TA_PERF_SELECTED_BITS */ +#define ROGUE_CR_TA_PERF_SELECTED_BITS 0x7648U +#define ROGUE_CR_TA_PERF_SELECTED_BITS_MASKFULL 0xFFFFFFFFFFFFFFFFULL +#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG3_SHIFT 48U +#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG3_CLRMSK 0x0000FFFFFFFFFFFFULL +#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG2_SHIFT 32U +#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG2_CLRMSK 0xFFFF0000FFFFFFFFULL +#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG1_SHIFT 16U +#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG1_CLRMSK 0xFFFFFFFF0000FFFFULL +#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG0_SHIFT 0U +#define ROGUE_CR_TA_PERF_SELECTED_BITS_REG0_CLRMSK 0xFFFFFFFFFFFF0000ULL + +/* Register ROGUE_CR_TA_PERF_COUNTER_0 */ +#define ROGUE_CR_TA_PERF_COUNTER_0 0x7650U +#define ROGUE_CR_TA_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_TA_PERF_COUNTER_0_REG_SHIFT 0U +#define ROGUE_CR_TA_PERF_COUNTER_0_REG_CLRMSK 0x00000000U + +/* Register ROGUE_CR_TA_PERF_COUNTER_1 */ +#define ROGUE_CR_TA_PERF_COUNTER_1 0x7658U +#define ROGUE_CR_TA_PERF_COUNTER_1_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_TA_PERF_COUNTER_1_REG_SHIFT 0U +#define ROGUE_CR_TA_PERF_COUNTER_1_REG_CLRMSK 0x00000000U + +/* Register ROGUE_CR_TA_PERF_COUNTER_2 */ +#define ROGUE_CR_TA_PERF_COUNTER_2 0x7660U +#define ROGUE_CR_TA_PERF_COUNTER_2_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_TA_PERF_COUNTER_2_REG_SHIFT 0U +#define ROGUE_CR_TA_PERF_COUNTER_2_REG_CLRMSK 0x00000000U + +/* Register ROGUE_CR_TA_PERF_COUNTER_3 */ +#define ROGUE_CR_TA_PERF_COUNTER_3 0x7668U +#define ROGUE_CR_TA_PERF_COUNTER_3_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_TA_PERF_COUNTER_3_REG_SHIFT 0U +#define ROGUE_CR_TA_PERF_COUNTER_3_REG_CLRMSK 0x00000000U + +/* Register ROGUE_CR_RASTERISATION_PERF */ +#define ROGUE_CR_RASTERISATION_PERF 0x7700U +#define ROGUE_CR_RASTERISATION_PERF_MASKFULL 0x000000000000001FULL +#define ROGUE_CR_RASTERISATION_PERF_CLR_3_SHIFT 4U +#define ROGUE_CR_RASTERISATION_PERF_CLR_3_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_RASTERISATION_PERF_CLR_3_EN 0x00000010U +#define ROGUE_CR_RASTERISATION_PERF_CLR_2_SHIFT 3U +#define ROGUE_CR_RASTERISATION_PERF_CLR_2_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_RASTERISATION_PERF_CLR_2_EN 0x00000008U +#define ROGUE_CR_RASTERISATION_PERF_CLR_1_SHIFT 2U +#define ROGUE_CR_RASTERISATION_PERF_CLR_1_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_RASTERISATION_PERF_CLR_1_EN 0x00000004U +#define ROGUE_CR_RASTERISATION_PERF_CLR_0_SHIFT 1U +#define ROGUE_CR_RASTERISATION_PERF_CLR_0_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_RASTERISATION_PERF_CLR_0_EN 0x00000002U +#define ROGUE_CR_RASTERISATION_PERF_CTRL_ENABLE_SHIFT 0U +#define ROGUE_CR_RASTERISATION_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_RASTERISATION_PERF_CTRL_ENABLE_EN 0x00000001U + +/* Register ROGUE_CR_RASTERISATION_PERF_SELECT0 */ +#define ROGUE_CR_RASTERISATION_PERF_SELECT0 0x7708U +#define ROGUE_CR_RASTERISATION_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL +#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_SHIFT 48U +#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL +#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_SHIFT 32U +#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL +#define ROGUE_CR_RASTERISATION_PERF_SELECT0_MODE_SHIFT 21U +#define ROGUE_CR_RASTERISATION_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL +#define ROGUE_CR_RASTERISATION_PERF_SELECT0_MODE_EN 0x0000000000200000ULL +#define ROGUE_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_SHIFT 16U +#define ROGUE_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL +#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_SHIFT 0U +#define ROGUE_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL + +/* Register ROGUE_CR_RASTERISATION_PERF_COUNTER_0 */ +#define ROGUE_CR_RASTERISATION_PERF_COUNTER_0 0x7750U +#define ROGUE_CR_RASTERISATION_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_RASTERISATION_PERF_COUNTER_0_REG_SHIFT 0U +#define ROGUE_CR_RASTERISATION_PERF_COUNTER_0_REG_CLRMSK 0x00000000U + +/* Register ROGUE_CR_HUB_BIFPMCACHE_PERF */ +#define ROGUE_CR_HUB_BIFPMCACHE_PERF 0x7800U +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_MASKFULL 0x000000000000001FULL +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_3_SHIFT 4U +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_3_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_3_EN 0x00000010U +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_2_SHIFT 3U +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_2_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_2_EN 0x00000008U +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_1_SHIFT 2U +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_1_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_1_EN 0x00000004U +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_0_SHIFT 1U +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_0_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CLR_0_EN 0x00000002U +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_SHIFT 0U +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_EN 0x00000001U + +/* Register ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0 */ +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0 0x7808U +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_SHIFT 48U +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_SHIFT 32U +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_SHIFT 21U +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_EN 0x0000000000200000ULL +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_SHIFT 16U +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_SHIFT 0U +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL + +/* Register ROGUE_CR_HUB_BIFPMCACHE_PERF_COUNTER_0 */ +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_COUNTER_0 0x7850U +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_SHIFT 0U +#define ROGUE_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_CLRMSK 0x00000000U + +/* Register ROGUE_CR_TPU_MCU_L0_PERF */ +#define ROGUE_CR_TPU_MCU_L0_PERF 0x7900U +#define ROGUE_CR_TPU_MCU_L0_PERF_MASKFULL 0x000000000000001FULL +#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_3_SHIFT 4U +#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_3_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_3_EN 0x00000010U +#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_2_SHIFT 3U +#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_2_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_2_EN 0x00000008U +#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_1_SHIFT 2U +#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_1_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_1_EN 0x00000004U +#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_0_SHIFT 1U +#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_0_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_TPU_MCU_L0_PERF_CLR_0_EN 0x00000002U +#define ROGUE_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_SHIFT 0U +#define ROGUE_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_EN 0x00000001U + +/* Register ROGUE_CR_TPU_MCU_L0_PERF_SELECT0 */ +#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0 0x7908U +#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL +#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_SHIFT 48U +#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL +#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_SHIFT 32U +#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL +#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_MODE_SHIFT 21U +#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL +#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_MODE_EN 0x0000000000200000ULL +#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_SHIFT 16U +#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL +#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_SHIFT 0U +#define ROGUE_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL + +/* Register ROGUE_CR_TPU_MCU_L0_PERF_COUNTER_0 */ +#define ROGUE_CR_TPU_MCU_L0_PERF_COUNTER_0 0x7950U +#define ROGUE_CR_TPU_MCU_L0_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_SHIFT 0U +#define ROGUE_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_CLRMSK 0x00000000U + +/* Register ROGUE_CR_USC_PERF */ +#define ROGUE_CR_USC_PERF 0x8100U +#define ROGUE_CR_USC_PERF_MASKFULL 0x000000000000001FULL +#define ROGUE_CR_USC_PERF_CLR_3_SHIFT 4U +#define ROGUE_CR_USC_PERF_CLR_3_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_USC_PERF_CLR_3_EN 0x00000010U +#define ROGUE_CR_USC_PERF_CLR_2_SHIFT 3U +#define ROGUE_CR_USC_PERF_CLR_2_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_USC_PERF_CLR_2_EN 0x00000008U +#define ROGUE_CR_USC_PERF_CLR_1_SHIFT 2U +#define ROGUE_CR_USC_PERF_CLR_1_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_USC_PERF_CLR_1_EN 0x00000004U +#define ROGUE_CR_USC_PERF_CLR_0_SHIFT 1U +#define ROGUE_CR_USC_PERF_CLR_0_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_USC_PERF_CLR_0_EN 0x00000002U +#define ROGUE_CR_USC_PERF_CTRL_ENABLE_SHIFT 0U +#define ROGUE_CR_USC_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_USC_PERF_CTRL_ENABLE_EN 0x00000001U + +/* Register ROGUE_CR_USC_PERF_SELECT0 */ +#define ROGUE_CR_USC_PERF_SELECT0 0x8108U +#define ROGUE_CR_USC_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL +#define ROGUE_CR_USC_PERF_SELECT0_BATCH_MAX_SHIFT 48U +#define ROGUE_CR_USC_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL +#define ROGUE_CR_USC_PERF_SELECT0_BATCH_MIN_SHIFT 32U +#define ROGUE_CR_USC_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL +#define ROGUE_CR_USC_PERF_SELECT0_MODE_SHIFT 21U +#define ROGUE_CR_USC_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL +#define ROGUE_CR_USC_PERF_SELECT0_MODE_EN 0x0000000000200000ULL +#define ROGUE_CR_USC_PERF_SELECT0_GROUP_SELECT_SHIFT 16U +#define ROGUE_CR_USC_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL +#define ROGUE_CR_USC_PERF_SELECT0_BIT_SELECT_SHIFT 0U +#define ROGUE_CR_USC_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL + +/* Register ROGUE_CR_USC_PERF_COUNTER_0 */ +#define ROGUE_CR_USC_PERF_COUNTER_0 0x8150U +#define ROGUE_CR_USC_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_USC_PERF_COUNTER_0_REG_SHIFT 0U +#define ROGUE_CR_USC_PERF_COUNTER_0_REG_CLRMSK 0x00000000U + +/* Register ROGUE_CR_JONES_IDLE */ +#define ROGUE_CR_JONES_IDLE 0x8328U +#define ROGUE_CR_JONES_IDLE_MASKFULL 0x0000000000007FFFULL +#define ROGUE_CR_JONES_IDLE_TDM_SHIFT 14U +#define ROGUE_CR_JONES_IDLE_TDM_CLRMSK 0xFFFFBFFFU +#define ROGUE_CR_JONES_IDLE_TDM_EN 0x00004000U +#define ROGUE_CR_JONES_IDLE_FB_CDC_TLA_SHIFT 13U +#define ROGUE_CR_JONES_IDLE_FB_CDC_TLA_CLRMSK 0xFFFFDFFFU +#define ROGUE_CR_JONES_IDLE_FB_CDC_TLA_EN 0x00002000U +#define ROGUE_CR_JONES_IDLE_FB_CDC_SHIFT 12U +#define ROGUE_CR_JONES_IDLE_FB_CDC_CLRMSK 0xFFFFEFFFU +#define ROGUE_CR_JONES_IDLE_FB_CDC_EN 0x00001000U +#define ROGUE_CR_JONES_IDLE_MMU_SHIFT 11U +#define ROGUE_CR_JONES_IDLE_MMU_CLRMSK 0xFFFFF7FFU +#define ROGUE_CR_JONES_IDLE_MMU_EN 0x00000800U +#define ROGUE_CR_JONES_IDLE_TLA_SHIFT 10U +#define ROGUE_CR_JONES_IDLE_TLA_CLRMSK 0xFFFFFBFFU +#define ROGUE_CR_JONES_IDLE_TLA_EN 0x00000400U +#define ROGUE_CR_JONES_IDLE_GARTEN_SHIFT 9U +#define ROGUE_CR_JONES_IDLE_GARTEN_CLRMSK 0xFFFFFDFFU +#define ROGUE_CR_JONES_IDLE_GARTEN_EN 0x00000200U +#define ROGUE_CR_JONES_IDLE_HOSTIF_SHIFT 8U +#define ROGUE_CR_JONES_IDLE_HOSTIF_CLRMSK 0xFFFFFEFFU +#define ROGUE_CR_JONES_IDLE_HOSTIF_EN 0x00000100U +#define ROGUE_CR_JONES_IDLE_SOCIF_SHIFT 7U +#define ROGUE_CR_JONES_IDLE_SOCIF_CLRMSK 0xFFFFFF7FU +#define ROGUE_CR_JONES_IDLE_SOCIF_EN 0x00000080U +#define ROGUE_CR_JONES_IDLE_TILING_SHIFT 6U +#define ROGUE_CR_JONES_IDLE_TILING_CLRMSK 0xFFFFFFBFU +#define ROGUE_CR_JONES_IDLE_TILING_EN 0x00000040U +#define ROGUE_CR_JONES_IDLE_IPP_SHIFT 5U +#define ROGUE_CR_JONES_IDLE_IPP_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_JONES_IDLE_IPP_EN 0x00000020U +#define ROGUE_CR_JONES_IDLE_USCS_SHIFT 4U +#define ROGUE_CR_JONES_IDLE_USCS_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_JONES_IDLE_USCS_EN 0x00000010U +#define ROGUE_CR_JONES_IDLE_PM_SHIFT 3U +#define ROGUE_CR_JONES_IDLE_PM_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_JONES_IDLE_PM_EN 0x00000008U +#define ROGUE_CR_JONES_IDLE_CDM_SHIFT 2U +#define ROGUE_CR_JONES_IDLE_CDM_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_JONES_IDLE_CDM_EN 0x00000004U +#define ROGUE_CR_JONES_IDLE_VDM_SHIFT 1U +#define ROGUE_CR_JONES_IDLE_VDM_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_JONES_IDLE_VDM_EN 0x00000002U +#define ROGUE_CR_JONES_IDLE_BIF_SHIFT 0U +#define ROGUE_CR_JONES_IDLE_BIF_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_JONES_IDLE_BIF_EN 0x00000001U + +/* Register ROGUE_CR_TORNADO_PERF */ +#define ROGUE_CR_TORNADO_PERF 0x8228U +#define ROGUE_CR_TORNADO_PERF_MASKFULL 0x000000000000001FULL +#define ROGUE_CR_TORNADO_PERF_CLR_3_SHIFT 4U +#define ROGUE_CR_TORNADO_PERF_CLR_3_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_TORNADO_PERF_CLR_3_EN 0x00000010U +#define ROGUE_CR_TORNADO_PERF_CLR_2_SHIFT 3U +#define ROGUE_CR_TORNADO_PERF_CLR_2_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_TORNADO_PERF_CLR_2_EN 0x00000008U +#define ROGUE_CR_TORNADO_PERF_CLR_1_SHIFT 2U +#define ROGUE_CR_TORNADO_PERF_CLR_1_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_TORNADO_PERF_CLR_1_EN 0x00000004U +#define ROGUE_CR_TORNADO_PERF_CLR_0_SHIFT 1U +#define ROGUE_CR_TORNADO_PERF_CLR_0_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_TORNADO_PERF_CLR_0_EN 0x00000002U +#define ROGUE_CR_TORNADO_PERF_CTRL_ENABLE_SHIFT 0U +#define ROGUE_CR_TORNADO_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_TORNADO_PERF_CTRL_ENABLE_EN 0x00000001U + +/* Register ROGUE_CR_TORNADO_PERF_SELECT0 */ +#define ROGUE_CR_TORNADO_PERF_SELECT0 0x8230U +#define ROGUE_CR_TORNADO_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL +#define ROGUE_CR_TORNADO_PERF_SELECT0_BATCH_MAX_SHIFT 48U +#define ROGUE_CR_TORNADO_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL +#define ROGUE_CR_TORNADO_PERF_SELECT0_BATCH_MIN_SHIFT 32U +#define ROGUE_CR_TORNADO_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL +#define ROGUE_CR_TORNADO_PERF_SELECT0_MODE_SHIFT 21U +#define ROGUE_CR_TORNADO_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL +#define ROGUE_CR_TORNADO_PERF_SELECT0_MODE_EN 0x0000000000200000ULL +#define ROGUE_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_SHIFT 16U +#define ROGUE_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL +#define ROGUE_CR_TORNADO_PERF_SELECT0_BIT_SELECT_SHIFT 0U +#define ROGUE_CR_TORNADO_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL + +/* Register ROGUE_CR_TORNADO_PERF_COUNTER_0 */ +#define ROGUE_CR_TORNADO_PERF_COUNTER_0 0x8268U +#define ROGUE_CR_TORNADO_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_TORNADO_PERF_COUNTER_0_REG_SHIFT 0U +#define ROGUE_CR_TORNADO_PERF_COUNTER_0_REG_CLRMSK 0x00000000U + +/* Register ROGUE_CR_TEXAS_PERF */ +#define ROGUE_CR_TEXAS_PERF 0x8290U +#define ROGUE_CR_TEXAS_PERF_MASKFULL 0x000000000000007FULL +#define ROGUE_CR_TEXAS_PERF_CLR_5_SHIFT 6U +#define ROGUE_CR_TEXAS_PERF_CLR_5_CLRMSK 0xFFFFFFBFU +#define ROGUE_CR_TEXAS_PERF_CLR_5_EN 0x00000040U +#define ROGUE_CR_TEXAS_PERF_CLR_4_SHIFT 5U +#define ROGUE_CR_TEXAS_PERF_CLR_4_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_TEXAS_PERF_CLR_4_EN 0x00000020U +#define ROGUE_CR_TEXAS_PERF_CLR_3_SHIFT 4U +#define ROGUE_CR_TEXAS_PERF_CLR_3_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_TEXAS_PERF_CLR_3_EN 0x00000010U +#define ROGUE_CR_TEXAS_PERF_CLR_2_SHIFT 3U +#define ROGUE_CR_TEXAS_PERF_CLR_2_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_TEXAS_PERF_CLR_2_EN 0x00000008U +#define ROGUE_CR_TEXAS_PERF_CLR_1_SHIFT 2U +#define ROGUE_CR_TEXAS_PERF_CLR_1_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_TEXAS_PERF_CLR_1_EN 0x00000004U +#define ROGUE_CR_TEXAS_PERF_CLR_0_SHIFT 1U +#define ROGUE_CR_TEXAS_PERF_CLR_0_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_TEXAS_PERF_CLR_0_EN 0x00000002U +#define ROGUE_CR_TEXAS_PERF_CTRL_ENABLE_SHIFT 0U +#define ROGUE_CR_TEXAS_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_TEXAS_PERF_CTRL_ENABLE_EN 0x00000001U + +/* Register ROGUE_CR_TEXAS_PERF_SELECT0 */ +#define ROGUE_CR_TEXAS_PERF_SELECT0 0x8298U +#define ROGUE_CR_TEXAS_PERF_SELECT0_MASKFULL 0x3FFF3FFF803FFFFFULL +#define ROGUE_CR_TEXAS_PERF_SELECT0_BATCH_MAX_SHIFT 48U +#define ROGUE_CR_TEXAS_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL +#define ROGUE_CR_TEXAS_PERF_SELECT0_BATCH_MIN_SHIFT 32U +#define ROGUE_CR_TEXAS_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL +#define ROGUE_CR_TEXAS_PERF_SELECT0_MODE_SHIFT 31U +#define ROGUE_CR_TEXAS_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFF7FFFFFFFULL +#define ROGUE_CR_TEXAS_PERF_SELECT0_MODE_EN 0x0000000080000000ULL +#define ROGUE_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_SHIFT 16U +#define ROGUE_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFC0FFFFULL +#define ROGUE_CR_TEXAS_PERF_SELECT0_BIT_SELECT_SHIFT 0U +#define ROGUE_CR_TEXAS_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL + +/* Register ROGUE_CR_TEXAS_PERF_COUNTER_0 */ +#define ROGUE_CR_TEXAS_PERF_COUNTER_0 0x82D8U +#define ROGUE_CR_TEXAS_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_TEXAS_PERF_COUNTER_0_REG_SHIFT 0U +#define ROGUE_CR_TEXAS_PERF_COUNTER_0_REG_CLRMSK 0x00000000U + +/* Register ROGUE_CR_JONES_PERF */ +#define ROGUE_CR_JONES_PERF 0x8330U +#define ROGUE_CR_JONES_PERF_MASKFULL 0x000000000000001FULL +#define ROGUE_CR_JONES_PERF_CLR_3_SHIFT 4U +#define ROGUE_CR_JONES_PERF_CLR_3_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_JONES_PERF_CLR_3_EN 0x00000010U +#define ROGUE_CR_JONES_PERF_CLR_2_SHIFT 3U +#define ROGUE_CR_JONES_PERF_CLR_2_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_JONES_PERF_CLR_2_EN 0x00000008U +#define ROGUE_CR_JONES_PERF_CLR_1_SHIFT 2U +#define ROGUE_CR_JONES_PERF_CLR_1_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_JONES_PERF_CLR_1_EN 0x00000004U +#define ROGUE_CR_JONES_PERF_CLR_0_SHIFT 1U +#define ROGUE_CR_JONES_PERF_CLR_0_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_JONES_PERF_CLR_0_EN 0x00000002U +#define ROGUE_CR_JONES_PERF_CTRL_ENABLE_SHIFT 0U +#define ROGUE_CR_JONES_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_JONES_PERF_CTRL_ENABLE_EN 0x00000001U + +/* Register ROGUE_CR_JONES_PERF_SELECT0 */ +#define ROGUE_CR_JONES_PERF_SELECT0 0x8338U +#define ROGUE_CR_JONES_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL +#define ROGUE_CR_JONES_PERF_SELECT0_BATCH_MAX_SHIFT 48U +#define ROGUE_CR_JONES_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL +#define ROGUE_CR_JONES_PERF_SELECT0_BATCH_MIN_SHIFT 32U +#define ROGUE_CR_JONES_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL +#define ROGUE_CR_JONES_PERF_SELECT0_MODE_SHIFT 21U +#define ROGUE_CR_JONES_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL +#define ROGUE_CR_JONES_PERF_SELECT0_MODE_EN 0x0000000000200000ULL +#define ROGUE_CR_JONES_PERF_SELECT0_GROUP_SELECT_SHIFT 16U +#define ROGUE_CR_JONES_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL +#define ROGUE_CR_JONES_PERF_SELECT0_BIT_SELECT_SHIFT 0U +#define ROGUE_CR_JONES_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL + +/* Register ROGUE_CR_JONES_PERF_COUNTER_0 */ +#define ROGUE_CR_JONES_PERF_COUNTER_0 0x8368U +#define ROGUE_CR_JONES_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_JONES_PERF_COUNTER_0_REG_SHIFT 0U +#define ROGUE_CR_JONES_PERF_COUNTER_0_REG_CLRMSK 0x00000000U + +/* Register ROGUE_CR_BLACKPEARL_PERF */ +#define ROGUE_CR_BLACKPEARL_PERF 0x8400U +#define ROGUE_CR_BLACKPEARL_PERF_MASKFULL 0x000000000000007FULL +#define ROGUE_CR_BLACKPEARL_PERF_CLR_5_SHIFT 6U +#define ROGUE_CR_BLACKPEARL_PERF_CLR_5_CLRMSK 0xFFFFFFBFU +#define ROGUE_CR_BLACKPEARL_PERF_CLR_5_EN 0x00000040U +#define ROGUE_CR_BLACKPEARL_PERF_CLR_4_SHIFT 5U +#define ROGUE_CR_BLACKPEARL_PERF_CLR_4_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_BLACKPEARL_PERF_CLR_4_EN 0x00000020U +#define ROGUE_CR_BLACKPEARL_PERF_CLR_3_SHIFT 4U +#define ROGUE_CR_BLACKPEARL_PERF_CLR_3_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_BLACKPEARL_PERF_CLR_3_EN 0x00000010U +#define ROGUE_CR_BLACKPEARL_PERF_CLR_2_SHIFT 3U +#define ROGUE_CR_BLACKPEARL_PERF_CLR_2_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_BLACKPEARL_PERF_CLR_2_EN 0x00000008U +#define ROGUE_CR_BLACKPEARL_PERF_CLR_1_SHIFT 2U +#define ROGUE_CR_BLACKPEARL_PERF_CLR_1_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_BLACKPEARL_PERF_CLR_1_EN 0x00000004U +#define ROGUE_CR_BLACKPEARL_PERF_CLR_0_SHIFT 1U +#define ROGUE_CR_BLACKPEARL_PERF_CLR_0_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_BLACKPEARL_PERF_CLR_0_EN 0x00000002U +#define ROGUE_CR_BLACKPEARL_PERF_CTRL_ENABLE_SHIFT 0U +#define ROGUE_CR_BLACKPEARL_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_BLACKPEARL_PERF_CTRL_ENABLE_EN 0x00000001U + +/* Register ROGUE_CR_BLACKPEARL_PERF_SELECT0 */ +#define ROGUE_CR_BLACKPEARL_PERF_SELECT0 0x8408U +#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_MASKFULL 0x3FFF3FFF803FFFFFULL +#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_SHIFT 48U +#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL +#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_SHIFT 32U +#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL +#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_MODE_SHIFT 31U +#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFF7FFFFFFFULL +#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_MODE_EN 0x0000000080000000ULL +#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_SHIFT 16U +#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFC0FFFFULL +#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_SHIFT 0U +#define ROGUE_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL + +/* Register ROGUE_CR_BLACKPEARL_PERF_COUNTER_0 */ +#define ROGUE_CR_BLACKPEARL_PERF_COUNTER_0 0x8448U +#define ROGUE_CR_BLACKPEARL_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_BLACKPEARL_PERF_COUNTER_0_REG_SHIFT 0U +#define ROGUE_CR_BLACKPEARL_PERF_COUNTER_0_REG_CLRMSK 0x00000000U + +/* Register ROGUE_CR_PBE_PERF */ +#define ROGUE_CR_PBE_PERF 0x8478U +#define ROGUE_CR_PBE_PERF_MASKFULL 0x000000000000001FULL +#define ROGUE_CR_PBE_PERF_CLR_3_SHIFT 4U +#define ROGUE_CR_PBE_PERF_CLR_3_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_PBE_PERF_CLR_3_EN 0x00000010U +#define ROGUE_CR_PBE_PERF_CLR_2_SHIFT 3U +#define ROGUE_CR_PBE_PERF_CLR_2_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_PBE_PERF_CLR_2_EN 0x00000008U +#define ROGUE_CR_PBE_PERF_CLR_1_SHIFT 2U +#define ROGUE_CR_PBE_PERF_CLR_1_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_PBE_PERF_CLR_1_EN 0x00000004U +#define ROGUE_CR_PBE_PERF_CLR_0_SHIFT 1U +#define ROGUE_CR_PBE_PERF_CLR_0_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_PBE_PERF_CLR_0_EN 0x00000002U +#define ROGUE_CR_PBE_PERF_CTRL_ENABLE_SHIFT 0U +#define ROGUE_CR_PBE_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_PBE_PERF_CTRL_ENABLE_EN 0x00000001U + +/* Register ROGUE_CR_PBE_PERF_SELECT0 */ +#define ROGUE_CR_PBE_PERF_SELECT0 0x8480U +#define ROGUE_CR_PBE_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL +#define ROGUE_CR_PBE_PERF_SELECT0_BATCH_MAX_SHIFT 48U +#define ROGUE_CR_PBE_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL +#define ROGUE_CR_PBE_PERF_SELECT0_BATCH_MIN_SHIFT 32U +#define ROGUE_CR_PBE_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL +#define ROGUE_CR_PBE_PERF_SELECT0_MODE_SHIFT 21U +#define ROGUE_CR_PBE_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL +#define ROGUE_CR_PBE_PERF_SELECT0_MODE_EN 0x0000000000200000ULL +#define ROGUE_CR_PBE_PERF_SELECT0_GROUP_SELECT_SHIFT 16U +#define ROGUE_CR_PBE_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL +#define ROGUE_CR_PBE_PERF_SELECT0_BIT_SELECT_SHIFT 0U +#define ROGUE_CR_PBE_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL + +/* Register ROGUE_CR_PBE_PERF_COUNTER_0 */ +#define ROGUE_CR_PBE_PERF_COUNTER_0 0x84B0U +#define ROGUE_CR_PBE_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_PBE_PERF_COUNTER_0_REG_SHIFT 0U +#define ROGUE_CR_PBE_PERF_COUNTER_0_REG_CLRMSK 0x00000000U + +/* Register ROGUE_CR_OCP_REVINFO */ +#define ROGUE_CR_OCP_REVINFO 0x9000U +#define ROGUE_CR_OCP_REVINFO_MASKFULL 0x00000007FFFFFFFFULL +#define ROGUE_CR_OCP_REVINFO_HWINFO_SYSBUS_SHIFT 33U +#define ROGUE_CR_OCP_REVINFO_HWINFO_SYSBUS_CLRMSK 0xFFFFFFF9FFFFFFFFULL +#define ROGUE_CR_OCP_REVINFO_HWINFO_MEMBUS_SHIFT 32U +#define ROGUE_CR_OCP_REVINFO_HWINFO_MEMBUS_CLRMSK 0xFFFFFFFEFFFFFFFFULL +#define ROGUE_CR_OCP_REVINFO_HWINFO_MEMBUS_EN 0x0000000100000000ULL +#define ROGUE_CR_OCP_REVINFO_REVISION_SHIFT 0U +#define ROGUE_CR_OCP_REVINFO_REVISION_CLRMSK 0xFFFFFFFF00000000ULL + +/* Register ROGUE_CR_OCP_SYSCONFIG */ +#define ROGUE_CR_OCP_SYSCONFIG 0x9010U +#define ROGUE_CR_OCP_SYSCONFIG_MASKFULL 0x0000000000000FFFULL +#define ROGUE_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_SHIFT 10U +#define ROGUE_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_CLRMSK 0xFFFFF3FFU +#define ROGUE_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_SHIFT 8U +#define ROGUE_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_CLRMSK 0xFFFFFCFFU +#define ROGUE_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_SHIFT 6U +#define ROGUE_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_CLRMSK 0xFFFFFF3FU +#define ROGUE_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_SHIFT 4U +#define ROGUE_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_CLRMSK 0xFFFFFFCFU +#define ROGUE_CR_OCP_SYSCONFIG_STANDBY_MODE_SHIFT 2U +#define ROGUE_CR_OCP_SYSCONFIG_STANDBY_MODE_CLRMSK 0xFFFFFFF3U +#define ROGUE_CR_OCP_SYSCONFIG_IDLE_MODE_SHIFT 0U +#define ROGUE_CR_OCP_SYSCONFIG_IDLE_MODE_CLRMSK 0xFFFFFFFCU + +/* Register ROGUE_CR_OCP_IRQSTATUS_RAW_0 */ +#define ROGUE_CR_OCP_IRQSTATUS_RAW_0 0x9020U +#define ROGUE_CR_OCP_IRQSTATUS_RAW_0_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_SHIFT 0U +#define ROGUE_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_EN 0x00000001U + +/* Register ROGUE_CR_OCP_IRQSTATUS_RAW_1 */ +#define ROGUE_CR_OCP_IRQSTATUS_RAW_1 0x9028U +#define ROGUE_CR_OCP_IRQSTATUS_RAW_1_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_SHIFT 0U +#define ROGUE_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_EN 0x00000001U + +/* Register ROGUE_CR_OCP_IRQSTATUS_RAW_2 */ +#define ROGUE_CR_OCP_IRQSTATUS_RAW_2 0x9030U +#define ROGUE_CR_OCP_IRQSTATUS_RAW_2_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_SHIFT 0U +#define ROGUE_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_EN 0x00000001U + +/* Register ROGUE_CR_OCP_IRQSTATUS_0 */ +#define ROGUE_CR_OCP_IRQSTATUS_0 0x9038U +#define ROGUE_CR_OCP_IRQSTATUS_0_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_SHIFT 0U +#define ROGUE_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_EN 0x00000001U + +/* Register ROGUE_CR_OCP_IRQSTATUS_1 */ +#define ROGUE_CR_OCP_IRQSTATUS_1 0x9040U +#define ROGUE_CR_OCP_IRQSTATUS_1_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_SHIFT 0U +#define ROGUE_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_EN 0x00000001U + +/* Register ROGUE_CR_OCP_IRQSTATUS_2 */ +#define ROGUE_CR_OCP_IRQSTATUS_2 0x9048U +#define ROGUE_CR_OCP_IRQSTATUS_2_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_SHIFT 0U +#define ROGUE_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN 0x00000001U + +/* Register ROGUE_CR_OCP_IRQENABLE_SET_0 */ +#define ROGUE_CR_OCP_IRQENABLE_SET_0 0x9050U +#define ROGUE_CR_OCP_IRQENABLE_SET_0_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_SHIFT 0U +#define ROGUE_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_EN 0x00000001U + +/* Register ROGUE_CR_OCP_IRQENABLE_SET_1 */ +#define ROGUE_CR_OCP_IRQENABLE_SET_1 0x9058U +#define ROGUE_CR_OCP_IRQENABLE_SET_1_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_SHIFT 0U +#define ROGUE_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_EN 0x00000001U + +/* Register ROGUE_CR_OCP_IRQENABLE_SET_2 */ +#define ROGUE_CR_OCP_IRQENABLE_SET_2 0x9060U +#define ROGUE_CR_OCP_IRQENABLE_SET_2_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_SHIFT 0U +#define ROGUE_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_EN 0x00000001U + +/* Register ROGUE_CR_OCP_IRQENABLE_CLR_0 */ +#define ROGUE_CR_OCP_IRQENABLE_CLR_0 0x9068U +#define ROGUE_CR_OCP_IRQENABLE_CLR_0_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_SHIFT 0U +#define ROGUE_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_EN 0x00000001U + +/* Register ROGUE_CR_OCP_IRQENABLE_CLR_1 */ +#define ROGUE_CR_OCP_IRQENABLE_CLR_1 0x9070U +#define ROGUE_CR_OCP_IRQENABLE_CLR_1_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_SHIFT 0U +#define ROGUE_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_EN 0x00000001U + +/* Register ROGUE_CR_OCP_IRQENABLE_CLR_2 */ +#define ROGUE_CR_OCP_IRQENABLE_CLR_2 0x9078U +#define ROGUE_CR_OCP_IRQENABLE_CLR_2_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_SHIFT 0U +#define ROGUE_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_EN 0x00000001U + +/* Register ROGUE_CR_OCP_IRQ_EVENT */ +#define ROGUE_CR_OCP_IRQ_EVENT 0x9080U +#define ROGUE_CR_OCP_IRQ_EVENT_MASKFULL 0x00000000000FFFFFULL +#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_SHIFT 19U +#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_CLRMSK 0xFFFFFFFFFFF7FFFFULL +#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_EN 0x0000000000080000ULL +#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_SHIFT 18U +#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_CLRMSK 0xFFFFFFFFFFFBFFFFULL +#define ROGUE_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_EN 0x0000000000040000ULL +#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_SHIFT 17U +#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_CLRMSK 0xFFFFFFFFFFFDFFFFULL +#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_EN 0x0000000000020000ULL +#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_SHIFT 16U +#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_CLRMSK 0xFFFFFFFFFFFEFFFFULL +#define ROGUE_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_EN 0x0000000000010000ULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_SHIFT 15U +#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_CLRMSK 0xFFFFFFFFFFFF7FFFULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_EN 0x0000000000008000ULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_SHIFT 14U +#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_CLRMSK 0xFFFFFFFFFFFFBFFFULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_EN 0x0000000000004000ULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_SHIFT 13U +#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_CLRMSK 0xFFFFFFFFFFFFDFFFULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_EN 0x0000000000002000ULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_SHIFT 12U +#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_CLRMSK 0xFFFFFFFFFFFFEFFFULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_EN 0x0000000000001000ULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_SHIFT 11U +#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_CLRMSK 0xFFFFFFFFFFFFF7FFULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_EN 0x0000000000000800ULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_SHIFT 10U +#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_CLRMSK 0xFFFFFFFFFFFFFBFFULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_EN 0x0000000000000400ULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_SHIFT 9U +#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_CLRMSK 0xFFFFFFFFFFFFFDFFULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_EN 0x0000000000000200ULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_SHIFT 8U +#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_CLRMSK 0xFFFFFFFFFFFFFEFFULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_EN 0x0000000000000100ULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_SHIFT 7U +#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_CLRMSK 0xFFFFFFFFFFFFFF7FULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_EN 0x0000000000000080ULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_SHIFT 6U +#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_CLRMSK 0xFFFFFFFFFFFFFFBFULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_EN 0x0000000000000040ULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_SHIFT 5U +#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_CLRMSK 0xFFFFFFFFFFFFFFDFULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_EN 0x0000000000000020ULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_SHIFT 4U +#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_CLRMSK 0xFFFFFFFFFFFFFFEFULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_EN 0x0000000000000010ULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_SHIFT 3U +#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_CLRMSK 0xFFFFFFFFFFFFFFF7ULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_EN 0x0000000000000008ULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_SHIFT 2U +#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_CLRMSK 0xFFFFFFFFFFFFFFFBULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_EN 0x0000000000000004ULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_SHIFT 1U +#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_CLRMSK 0xFFFFFFFFFFFFFFFDULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_EN 0x0000000000000002ULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_SHIFT 0U +#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_EN 0x0000000000000001ULL + +/* Register ROGUE_CR_OCP_DEBUG_CONFIG */ +#define ROGUE_CR_OCP_DEBUG_CONFIG 0x9088U +#define ROGUE_CR_OCP_DEBUG_CONFIG_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_OCP_DEBUG_CONFIG_REG_SHIFT 0U +#define ROGUE_CR_OCP_DEBUG_CONFIG_REG_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_OCP_DEBUG_CONFIG_REG_EN 0x00000001U + +/* Register ROGUE_CR_OCP_DEBUG_STATUS */ +#define ROGUE_CR_OCP_DEBUG_STATUS 0x9090U +#define ROGUE_CR_OCP_DEBUG_STATUS_MASKFULL 0x001F1F77FFFFFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_SHIFT 51U +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_CLRMSK 0xFFE7FFFFFFFFFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_SHIFT 50U +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_CLRMSK 0xFFFBFFFFFFFFFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_EN 0x0004000000000000ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_SHIFT 48U +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_CLRMSK 0xFFFCFFFFFFFFFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_SHIFT 43U +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_CLRMSK 0xFFFFE7FFFFFFFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_SHIFT 42U +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_CLRMSK 0xFFFFFBFFFFFFFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_EN 0x0000040000000000ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_SHIFT 40U +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_CLRMSK 0xFFFFFCFFFFFFFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_SHIFT 38U +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_CLRMSK 0xFFFFFFBFFFFFFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_EN 0x0000004000000000ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_SHIFT 37U +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_CLRMSK 0xFFFFFFDFFFFFFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_EN 0x0000002000000000ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_SHIFT 36U +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_CLRMSK 0xFFFFFFEFFFFFFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_EN 0x0000001000000000ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_SHIFT 34U +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_CLRMSK 0xFFFFFFFBFFFFFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_EN 0x0000000400000000ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_SHIFT 33U +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_CLRMSK 0xFFFFFFFDFFFFFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_EN 0x0000000200000000ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_SHIFT 32U +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_CLRMSK 0xFFFFFFFEFFFFFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_EN 0x0000000100000000ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_SHIFT 31U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_CLRMSK 0xFFFFFFFF7FFFFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_EN 0x0000000080000000ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_SHIFT 30U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_CLRMSK 0xFFFFFFFFBFFFFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_EN 0x0000000040000000ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_SHIFT 29U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_CLRMSK 0xFFFFFFFFDFFFFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_EN 0x0000000020000000ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_SHIFT 27U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_CLRMSK 0xFFFFFFFFE7FFFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_SHIFT 26U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_CLRMSK 0xFFFFFFFFFBFFFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_EN 0x0000000004000000ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_SHIFT 24U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_CLRMSK 0xFFFFFFFFFCFFFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_SHIFT 23U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_CLRMSK 0xFFFFFFFFFF7FFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_EN 0x0000000000800000ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_SHIFT 22U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_CLRMSK 0xFFFFFFFFFFBFFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_EN 0x0000000000400000ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_SHIFT 21U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_CLRMSK 0xFFFFFFFFFFDFFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_EN 0x0000000000200000ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_SHIFT 19U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_CLRMSK 0xFFFFFFFFFFE7FFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_SHIFT 18U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_CLRMSK 0xFFFFFFFFFFFBFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_EN 0x0000000000040000ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_SHIFT 16U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_CLRMSK 0xFFFFFFFFFFFCFFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_SHIFT 15U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_CLRMSK 0xFFFFFFFFFFFF7FFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_EN 0x0000000000008000ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_SHIFT 14U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_CLRMSK 0xFFFFFFFFFFFFBFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_EN 0x0000000000004000ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_SHIFT 13U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_CLRMSK 0xFFFFFFFFFFFFDFFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_EN 0x0000000000002000ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_SHIFT 11U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_CLRMSK 0xFFFFFFFFFFFFE7FFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_SHIFT 10U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_CLRMSK 0xFFFFFFFFFFFFFBFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_EN 0x0000000000000400ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_SHIFT 8U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_CLRMSK 0xFFFFFFFFFFFFFCFFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_SHIFT 7U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_CLRMSK 0xFFFFFFFFFFFFFF7FULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_EN 0x0000000000000080ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_SHIFT 6U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_CLRMSK 0xFFFFFFFFFFFFFFBFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_EN 0x0000000000000040ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_SHIFT 5U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_CLRMSK 0xFFFFFFFFFFFFFFDFULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_EN 0x0000000000000020ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_SHIFT 3U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_CLRMSK 0xFFFFFFFFFFFFFFE7ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_SHIFT 2U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_CLRMSK 0xFFFFFFFFFFFFFFFBULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_EN 0x0000000000000004ULL +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_SHIFT 0U +#define ROGUE_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_CLRMSK 0xFFFFFFFFFFFFFFFCULL + +#define ROGUE_CR_BIF_TRUST_DM_TYPE_PM_ALIST_SHIFT 6U +#define ROGUE_CR_BIF_TRUST_DM_TYPE_PM_ALIST_CLRMSK 0xFFFFFFBFU +#define ROGUE_CR_BIF_TRUST_DM_TYPE_PM_ALIST_EN 0x00000040U +#define ROGUE_CR_BIF_TRUST_DM_TYPE_HOST_SHIFT 5U +#define ROGUE_CR_BIF_TRUST_DM_TYPE_HOST_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_BIF_TRUST_DM_TYPE_HOST_EN 0x00000020U +#define ROGUE_CR_BIF_TRUST_DM_TYPE_META_SHIFT 4U +#define ROGUE_CR_BIF_TRUST_DM_TYPE_META_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_BIF_TRUST_DM_TYPE_META_EN 0x00000010U +#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_ZLS_SHIFT 3U +#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_ZLS_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_ZLS_EN 0x00000008U +#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_TE_SHIFT 2U +#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_TE_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_TE_EN 0x00000004U +#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_VCE_SHIFT 1U +#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_VCE_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_BIF_TRUST_DM_TYPE_PB_VCE_EN 0x00000002U +#define ROGUE_CR_BIF_TRUST_DM_TYPE_TLA_SHIFT 0U +#define ROGUE_CR_BIF_TRUST_DM_TYPE_TLA_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_BIF_TRUST_DM_TYPE_TLA_EN 0x00000001U + +#define ROGUE_CR_BIF_TRUST_DM_MASK 0x0000007FU + +/* Register ROGUE_CR_BIF_TRUST */ +#define ROGUE_CR_BIF_TRUST 0xA000U +#define ROGUE_CR_BIF_TRUST_MASKFULL 0x00000000001FFFFFULL +#define ROGUE_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_SHIFT 20U +#define ROGUE_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_CLRMSK 0xFFEFFFFFU +#define ROGUE_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_EN 0x00100000U +#define ROGUE_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_SHIFT 19U +#define ROGUE_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_CLRMSK 0xFFF7FFFFU +#define ROGUE_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_EN 0x00080000U +#define ROGUE_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_SHIFT 18U +#define ROGUE_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_CLRMSK 0xFFFBFFFFU +#define ROGUE_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_EN 0x00040000U +#define ROGUE_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_SHIFT 17U +#define ROGUE_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_CLRMSK 0xFFFDFFFFU +#define ROGUE_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_EN 0x00020000U +#define ROGUE_CR_BIF_TRUST_ENABLE_SHIFT 16U +#define ROGUE_CR_BIF_TRUST_ENABLE_CLRMSK 0xFFFEFFFFU +#define ROGUE_CR_BIF_TRUST_ENABLE_EN 0x00010000U +#define ROGUE_CR_BIF_TRUST_DM_TRUSTED_SHIFT 9U +#define ROGUE_CR_BIF_TRUST_DM_TRUSTED_CLRMSK 0xFFFF01FFU +#define ROGUE_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_SHIFT 8U +#define ROGUE_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_CLRMSK 0xFFFFFEFFU +#define ROGUE_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_EN 0x00000100U +#define ROGUE_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_SHIFT 7U +#define ROGUE_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_CLRMSK 0xFFFFFF7FU +#define ROGUE_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_EN 0x00000080U +#define ROGUE_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_SHIFT 6U +#define ROGUE_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_CLRMSK 0xFFFFFFBFU +#define ROGUE_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_EN 0x00000040U +#define ROGUE_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_SHIFT 5U +#define ROGUE_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_EN 0x00000020U +#define ROGUE_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_SHIFT 4U +#define ROGUE_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_EN 0x00000010U +#define ROGUE_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_SHIFT 3U +#define ROGUE_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_EN 0x00000008U +#define ROGUE_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_SHIFT 2U +#define ROGUE_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_EN 0x00000004U +#define ROGUE_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_SHIFT 1U +#define ROGUE_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_EN 0x00000002U +#define ROGUE_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_SHIFT 0U +#define ROGUE_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_EN 0x00000001U + +/* Register ROGUE_CR_SYS_BUS_SECURE */ +#define ROGUE_CR_SYS_BUS_SECURE 0xA100U +#define ROGUE_CR_SYS_BUS_SECURE__SECR__MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_SYS_BUS_SECURE_MASKFULL 0x0000000000000001ULL +#define ROGUE_CR_SYS_BUS_SECURE_ENABLE_SHIFT 0U +#define ROGUE_CR_SYS_BUS_SECURE_ENABLE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_SYS_BUS_SECURE_ENABLE_EN 0x00000001U + +/* Register ROGUE_CR_FBA_FC0_CHECKSUM */ +#define ROGUE_CR_FBA_FC0_CHECKSUM 0xD170U +#define ROGUE_CR_FBA_FC0_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_FBA_FC0_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_FBA_FC0_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_FBA_FC1_CHECKSUM */ +#define ROGUE_CR_FBA_FC1_CHECKSUM 0xD178U +#define ROGUE_CR_FBA_FC1_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_FBA_FC1_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_FBA_FC1_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_FBA_FC2_CHECKSUM */ +#define ROGUE_CR_FBA_FC2_CHECKSUM 0xD180U +#define ROGUE_CR_FBA_FC2_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_FBA_FC2_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_FBA_FC2_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_FBA_FC3_CHECKSUM */ +#define ROGUE_CR_FBA_FC3_CHECKSUM 0xD188U +#define ROGUE_CR_FBA_FC3_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_FBA_FC3_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_FBA_FC3_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_CLK_CTRL2 */ +#define ROGUE_CR_CLK_CTRL2 0xD200U +#define ROGUE_CR_CLK_CTRL2_MASKFULL 0x0000000000000F33ULL +#define ROGUE_CR_CLK_CTRL2_MCU_FBTC_SHIFT 10U +#define ROGUE_CR_CLK_CTRL2_MCU_FBTC_CLRMSK 0xFFFFFFFFFFFFF3FFULL +#define ROGUE_CR_CLK_CTRL2_MCU_FBTC_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL2_MCU_FBTC_ON 0x0000000000000400ULL +#define ROGUE_CR_CLK_CTRL2_MCU_FBTC_AUTO 0x0000000000000800ULL +#define ROGUE_CR_CLK_CTRL2_VRDM_SHIFT 8U +#define ROGUE_CR_CLK_CTRL2_VRDM_CLRMSK 0xFFFFFFFFFFFFFCFFULL +#define ROGUE_CR_CLK_CTRL2_VRDM_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL2_VRDM_ON 0x0000000000000100ULL +#define ROGUE_CR_CLK_CTRL2_VRDM_AUTO 0x0000000000000200ULL +#define ROGUE_CR_CLK_CTRL2_SH_SHIFT 4U +#define ROGUE_CR_CLK_CTRL2_SH_CLRMSK 0xFFFFFFFFFFFFFFCFULL +#define ROGUE_CR_CLK_CTRL2_SH_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL2_SH_ON 0x0000000000000010ULL +#define ROGUE_CR_CLK_CTRL2_SH_AUTO 0x0000000000000020ULL +#define ROGUE_CR_CLK_CTRL2_FBA_SHIFT 0U +#define ROGUE_CR_CLK_CTRL2_FBA_CLRMSK 0xFFFFFFFFFFFFFFFCULL +#define ROGUE_CR_CLK_CTRL2_FBA_OFF 0x0000000000000000ULL +#define ROGUE_CR_CLK_CTRL2_FBA_ON 0x0000000000000001ULL +#define ROGUE_CR_CLK_CTRL2_FBA_AUTO 0x0000000000000002ULL + +/* Register ROGUE_CR_CLK_STATUS2 */ +#define ROGUE_CR_CLK_STATUS2 0xD208U +#define ROGUE_CR_CLK_STATUS2_MASKFULL 0x0000000000000015ULL +#define ROGUE_CR_CLK_STATUS2_VRDM_SHIFT 4U +#define ROGUE_CR_CLK_STATUS2_VRDM_CLRMSK 0xFFFFFFFFFFFFFFEFULL +#define ROGUE_CR_CLK_STATUS2_VRDM_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS2_VRDM_RUNNING 0x0000000000000010ULL +#define ROGUE_CR_CLK_STATUS2_SH_SHIFT 2U +#define ROGUE_CR_CLK_STATUS2_SH_CLRMSK 0xFFFFFFFFFFFFFFFBULL +#define ROGUE_CR_CLK_STATUS2_SH_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS2_SH_RUNNING 0x0000000000000004ULL +#define ROGUE_CR_CLK_STATUS2_FBA_SHIFT 0U +#define ROGUE_CR_CLK_STATUS2_FBA_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_CLK_STATUS2_FBA_GATED 0x0000000000000000ULL +#define ROGUE_CR_CLK_STATUS2_FBA_RUNNING 0x0000000000000001ULL + +/* Register ROGUE_CR_RPM_SHF_FPL */ +#define ROGUE_CR_RPM_SHF_FPL 0xD520U +#define ROGUE_CR_RPM_SHF_FPL_MASKFULL 0x3FFFFFFFFFFFFFFCULL +#define ROGUE_CR_RPM_SHF_FPL_SIZE_SHIFT 40U +#define ROGUE_CR_RPM_SHF_FPL_SIZE_CLRMSK 0xC00000FFFFFFFFFFULL +#define ROGUE_CR_RPM_SHF_FPL_BASE_SHIFT 2U +#define ROGUE_CR_RPM_SHF_FPL_BASE_CLRMSK 0xFFFFFF0000000003ULL +#define ROGUE_CR_RPM_SHF_FPL_BASE_ALIGNSHIFT 2U +#define ROGUE_CR_RPM_SHF_FPL_BASE_ALIGNSIZE 4U + +/* Register ROGUE_CR_RPM_SHF_FPL_READ */ +#define ROGUE_CR_RPM_SHF_FPL_READ 0xD528U +#define ROGUE_CR_RPM_SHF_FPL_READ_MASKFULL 0x00000000007FFFFFULL +#define ROGUE_CR_RPM_SHF_FPL_READ_TOGGLE_SHIFT 22U +#define ROGUE_CR_RPM_SHF_FPL_READ_TOGGLE_CLRMSK 0xFFBFFFFFU +#define ROGUE_CR_RPM_SHF_FPL_READ_TOGGLE_EN 0x00400000U +#define ROGUE_CR_RPM_SHF_FPL_READ_OFFSET_SHIFT 0U +#define ROGUE_CR_RPM_SHF_FPL_READ_OFFSET_CLRMSK 0xFFC00000U + +/* Register ROGUE_CR_RPM_SHF_FPL_WRITE */ +#define ROGUE_CR_RPM_SHF_FPL_WRITE 0xD530U +#define ROGUE_CR_RPM_SHF_FPL_WRITE_MASKFULL 0x00000000007FFFFFULL +#define ROGUE_CR_RPM_SHF_FPL_WRITE_TOGGLE_SHIFT 22U +#define ROGUE_CR_RPM_SHF_FPL_WRITE_TOGGLE_CLRMSK 0xFFBFFFFFU +#define ROGUE_CR_RPM_SHF_FPL_WRITE_TOGGLE_EN 0x00400000U +#define ROGUE_CR_RPM_SHF_FPL_WRITE_OFFSET_SHIFT 0U +#define ROGUE_CR_RPM_SHF_FPL_WRITE_OFFSET_CLRMSK 0xFFC00000U + +/* Register ROGUE_CR_RPM_SHG_FPL */ +#define ROGUE_CR_RPM_SHG_FPL 0xD538U +#define ROGUE_CR_RPM_SHG_FPL_MASKFULL 0x3FFFFFFFFFFFFFFCULL +#define ROGUE_CR_RPM_SHG_FPL_SIZE_SHIFT 40U +#define ROGUE_CR_RPM_SHG_FPL_SIZE_CLRMSK 0xC00000FFFFFFFFFFULL +#define ROGUE_CR_RPM_SHG_FPL_BASE_SHIFT 2U +#define ROGUE_CR_RPM_SHG_FPL_BASE_CLRMSK 0xFFFFFF0000000003ULL +#define ROGUE_CR_RPM_SHG_FPL_BASE_ALIGNSHIFT 2U +#define ROGUE_CR_RPM_SHG_FPL_BASE_ALIGNSIZE 4U + +/* Register ROGUE_CR_RPM_SHG_FPL_READ */ +#define ROGUE_CR_RPM_SHG_FPL_READ 0xD540U +#define ROGUE_CR_RPM_SHG_FPL_READ_MASKFULL 0x00000000007FFFFFULL +#define ROGUE_CR_RPM_SHG_FPL_READ_TOGGLE_SHIFT 22U +#define ROGUE_CR_RPM_SHG_FPL_READ_TOGGLE_CLRMSK 0xFFBFFFFFU +#define ROGUE_CR_RPM_SHG_FPL_READ_TOGGLE_EN 0x00400000U +#define ROGUE_CR_RPM_SHG_FPL_READ_OFFSET_SHIFT 0U +#define ROGUE_CR_RPM_SHG_FPL_READ_OFFSET_CLRMSK 0xFFC00000U + +/* Register ROGUE_CR_RPM_SHG_FPL_WRITE */ +#define ROGUE_CR_RPM_SHG_FPL_WRITE 0xD548U +#define ROGUE_CR_RPM_SHG_FPL_WRITE_MASKFULL 0x00000000007FFFFFULL +#define ROGUE_CR_RPM_SHG_FPL_WRITE_TOGGLE_SHIFT 22U +#define ROGUE_CR_RPM_SHG_FPL_WRITE_TOGGLE_CLRMSK 0xFFBFFFFFU +#define ROGUE_CR_RPM_SHG_FPL_WRITE_TOGGLE_EN 0x00400000U +#define ROGUE_CR_RPM_SHG_FPL_WRITE_OFFSET_SHIFT 0U +#define ROGUE_CR_RPM_SHG_FPL_WRITE_OFFSET_CLRMSK 0xFFC00000U + +/* Register ROGUE_CR_SH_PERF */ +#define ROGUE_CR_SH_PERF 0xD5F8U +#define ROGUE_CR_SH_PERF_MASKFULL 0x000000000000001FULL +#define ROGUE_CR_SH_PERF_CLR_3_SHIFT 4U +#define ROGUE_CR_SH_PERF_CLR_3_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_SH_PERF_CLR_3_EN 0x00000010U +#define ROGUE_CR_SH_PERF_CLR_2_SHIFT 3U +#define ROGUE_CR_SH_PERF_CLR_2_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_SH_PERF_CLR_2_EN 0x00000008U +#define ROGUE_CR_SH_PERF_CLR_1_SHIFT 2U +#define ROGUE_CR_SH_PERF_CLR_1_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_SH_PERF_CLR_1_EN 0x00000004U +#define ROGUE_CR_SH_PERF_CLR_0_SHIFT 1U +#define ROGUE_CR_SH_PERF_CLR_0_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_SH_PERF_CLR_0_EN 0x00000002U +#define ROGUE_CR_SH_PERF_CTRL_ENABLE_SHIFT 0U +#define ROGUE_CR_SH_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_SH_PERF_CTRL_ENABLE_EN 0x00000001U + +/* Register ROGUE_CR_SH_PERF_SELECT0 */ +#define ROGUE_CR_SH_PERF_SELECT0 0xD600U +#define ROGUE_CR_SH_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL +#define ROGUE_CR_SH_PERF_SELECT0_BATCH_MAX_SHIFT 48U +#define ROGUE_CR_SH_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL +#define ROGUE_CR_SH_PERF_SELECT0_BATCH_MIN_SHIFT 32U +#define ROGUE_CR_SH_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL +#define ROGUE_CR_SH_PERF_SELECT0_MODE_SHIFT 21U +#define ROGUE_CR_SH_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL +#define ROGUE_CR_SH_PERF_SELECT0_MODE_EN 0x0000000000200000ULL +#define ROGUE_CR_SH_PERF_SELECT0_GROUP_SELECT_SHIFT 16U +#define ROGUE_CR_SH_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL +#define ROGUE_CR_SH_PERF_SELECT0_BIT_SELECT_SHIFT 0U +#define ROGUE_CR_SH_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL + +/* Register ROGUE_CR_SH_PERF_COUNTER_0 */ +#define ROGUE_CR_SH_PERF_COUNTER_0 0xD628U +#define ROGUE_CR_SH_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SH_PERF_COUNTER_0_REG_SHIFT 0U +#define ROGUE_CR_SH_PERF_COUNTER_0_REG_CLRMSK 0x00000000U + +/* Register ROGUE_CR_SHF_SHG_CHECKSUM */ +#define ROGUE_CR_SHF_SHG_CHECKSUM 0xD1C0U +#define ROGUE_CR_SHF_SHG_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SHF_SHG_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_SHF_SHG_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_SHF_VERTEX_BIF_CHECKSUM */ +#define ROGUE_CR_SHF_VERTEX_BIF_CHECKSUM 0xD1C8U +#define ROGUE_CR_SHF_VERTEX_BIF_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_SHF_VARY_BIF_CHECKSUM */ +#define ROGUE_CR_SHF_VARY_BIF_CHECKSUM 0xD1D0U +#define ROGUE_CR_SHF_VARY_BIF_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SHF_VARY_BIF_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_SHF_VARY_BIF_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_RPM_BIF_CHECKSUM */ +#define ROGUE_CR_RPM_BIF_CHECKSUM 0xD1D8U +#define ROGUE_CR_RPM_BIF_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_RPM_BIF_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_RPM_BIF_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_SHG_BIF_CHECKSUM */ +#define ROGUE_CR_SHG_BIF_CHECKSUM 0xD1E0U +#define ROGUE_CR_SHG_BIF_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SHG_BIF_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_SHG_BIF_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register ROGUE_CR_SHG_FE_BE_CHECKSUM */ +#define ROGUE_CR_SHG_FE_BE_CHECKSUM 0xD1E8U +#define ROGUE_CR_SHG_FE_BE_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_SHG_FE_BE_CHECKSUM_VALUE_SHIFT 0U +#define ROGUE_CR_SHG_FE_BE_CHECKSUM_VALUE_CLRMSK 0x00000000U + +/* Register DPX_CR_BF_PERF */ +#define DPX_CR_BF_PERF 0xC458U +#define DPX_CR_BF_PERF_MASKFULL 0x000000000000001FULL +#define DPX_CR_BF_PERF_CLR_3_SHIFT 4U +#define DPX_CR_BF_PERF_CLR_3_CLRMSK 0xFFFFFFEFU +#define DPX_CR_BF_PERF_CLR_3_EN 0x00000010U +#define DPX_CR_BF_PERF_CLR_2_SHIFT 3U +#define DPX_CR_BF_PERF_CLR_2_CLRMSK 0xFFFFFFF7U +#define DPX_CR_BF_PERF_CLR_2_EN 0x00000008U +#define DPX_CR_BF_PERF_CLR_1_SHIFT 2U +#define DPX_CR_BF_PERF_CLR_1_CLRMSK 0xFFFFFFFBU +#define DPX_CR_BF_PERF_CLR_1_EN 0x00000004U +#define DPX_CR_BF_PERF_CLR_0_SHIFT 1U +#define DPX_CR_BF_PERF_CLR_0_CLRMSK 0xFFFFFFFDU +#define DPX_CR_BF_PERF_CLR_0_EN 0x00000002U +#define DPX_CR_BF_PERF_CTRL_ENABLE_SHIFT 0U +#define DPX_CR_BF_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU +#define DPX_CR_BF_PERF_CTRL_ENABLE_EN 0x00000001U + +/* Register DPX_CR_BF_PERF_SELECT0 */ +#define DPX_CR_BF_PERF_SELECT0 0xC460U +#define DPX_CR_BF_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL +#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_SHIFT 48U +#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL +#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_SHIFT 32U +#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL +#define DPX_CR_BF_PERF_SELECT0_MODE_SHIFT 21U +#define DPX_CR_BF_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL +#define DPX_CR_BF_PERF_SELECT0_MODE_EN 0x0000000000200000ULL +#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_SHIFT 16U +#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL +#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_SHIFT 0U +#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL + +/* Register DPX_CR_BF_PERF_COUNTER_0 */ +#define DPX_CR_BF_PERF_COUNTER_0 0xC488U +#define DPX_CR_BF_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL +#define DPX_CR_BF_PERF_COUNTER_0_REG_SHIFT 0U +#define DPX_CR_BF_PERF_COUNTER_0_REG_CLRMSK 0x00000000U + +/* Register DPX_CR_BT_PERF */ +#define DPX_CR_BT_PERF 0xC3D0U +#define DPX_CR_BT_PERF_MASKFULL 0x000000000000001FULL +#define DPX_CR_BT_PERF_CLR_3_SHIFT 4U +#define DPX_CR_BT_PERF_CLR_3_CLRMSK 0xFFFFFFEFU +#define DPX_CR_BT_PERF_CLR_3_EN 0x00000010U +#define DPX_CR_BT_PERF_CLR_2_SHIFT 3U +#define DPX_CR_BT_PERF_CLR_2_CLRMSK 0xFFFFFFF7U +#define DPX_CR_BT_PERF_CLR_2_EN 0x00000008U +#define DPX_CR_BT_PERF_CLR_1_SHIFT 2U +#define DPX_CR_BT_PERF_CLR_1_CLRMSK 0xFFFFFFFBU +#define DPX_CR_BT_PERF_CLR_1_EN 0x00000004U +#define DPX_CR_BT_PERF_CLR_0_SHIFT 1U +#define DPX_CR_BT_PERF_CLR_0_CLRMSK 0xFFFFFFFDU +#define DPX_CR_BT_PERF_CLR_0_EN 0x00000002U +#define DPX_CR_BT_PERF_CTRL_ENABLE_SHIFT 0U +#define DPX_CR_BT_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU +#define DPX_CR_BT_PERF_CTRL_ENABLE_EN 0x00000001U + +/* Register DPX_CR_BT_PERF_SELECT0 */ +#define DPX_CR_BT_PERF_SELECT0 0xC3D8U +#define DPX_CR_BT_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL +#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_SHIFT 48U +#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL +#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_SHIFT 32U +#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL +#define DPX_CR_BT_PERF_SELECT0_MODE_SHIFT 21U +#define DPX_CR_BT_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL +#define DPX_CR_BT_PERF_SELECT0_MODE_EN 0x0000000000200000ULL +#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_SHIFT 16U +#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL +#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_SHIFT 0U +#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL + +/* Register DPX_CR_BT_PERF_COUNTER_0 */ +#define DPX_CR_BT_PERF_COUNTER_0 0xC420U +#define DPX_CR_BT_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL +#define DPX_CR_BT_PERF_COUNTER_0_REG_SHIFT 0U +#define DPX_CR_BT_PERF_COUNTER_0_REG_CLRMSK 0x00000000U + +/* Register DPX_CR_RQ_USC_DEBUG */ +#define DPX_CR_RQ_USC_DEBUG 0xC110U +#define DPX_CR_RQ_USC_DEBUG_MASKFULL 0x00000000FFFFFFFFULL +#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_SHIFT 0U +#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_CLRMSK 0xFFFFFFFF00000000ULL + +/* Register DPX_CR_BIF_FAULT_BANK_MMU_STATUS */ +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS 0xC5C8U +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_MASKFULL 0x000000000000F775ULL +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_SHIFT 12U +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_CLRMSK 0xFFFF0FFFU +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_SHIFT 8U +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_CLRMSK 0xFFFFF8FFU +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_SHIFT 5U +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_CLRMSK 0xFFFFFF9FU +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_SHIFT 4U +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_CLRMSK 0xFFFFFFEFU +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_EN 0x00000010U +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_SHIFT 2U +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_CLRMSK 0xFFFFFFFBU +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_EN 0x00000004U +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_SHIFT 0U +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_CLRMSK 0xFFFFFFFEU +#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_EN 0x00000001U + +/* Register DPX_CR_BIF_FAULT_BANK_REQ_STATUS */ +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS 0xC5D0U +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_MASKFULL 0x03FFFFFFFFFFFFF0ULL +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_SHIFT 57U +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_CLRMSK 0xFDFFFFFFFFFFFFFFULL +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_EN 0x0200000000000000ULL +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_SHIFT 44U +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_CLRMSK 0xFE000FFFFFFFFFFFULL +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_SHIFT 40U +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_CLRMSK 0xFFFFF0FFFFFFFFFFULL +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_SHIFT 4U +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_CLRMSK 0xFFFFFF000000000FULL +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSHIFT 4U +#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSIZE 16U + +/* Register DPX_CR_BIF_MMU_STATUS */ +#define DPX_CR_BIF_MMU_STATUS 0xC5D8U +#define DPX_CR_BIF_MMU_STATUS_MASKFULL 0x000000000FFFFFF7ULL +#define DPX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT 20U +#define DPX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK 0xF00FFFFFU +#define DPX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT 12U +#define DPX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK 0xFFF00FFFU +#define DPX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT 4U +#define DPX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK 0xFFFFF00FU +#define DPX_CR_BIF_MMU_STATUS_STALLED_SHIFT 2U +#define DPX_CR_BIF_MMU_STATUS_STALLED_CLRMSK 0xFFFFFFFBU +#define DPX_CR_BIF_MMU_STATUS_STALLED_EN 0x00000004U +#define DPX_CR_BIF_MMU_STATUS_PAUSED_SHIFT 1U +#define DPX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK 0xFFFFFFFDU +#define DPX_CR_BIF_MMU_STATUS_PAUSED_EN 0x00000002U +#define DPX_CR_BIF_MMU_STATUS_BUSY_SHIFT 0U +#define DPX_CR_BIF_MMU_STATUS_BUSY_CLRMSK 0xFFFFFFFEU +#define DPX_CR_BIF_MMU_STATUS_BUSY_EN 0x00000001U + +/* Register DPX_CR_RT_PERF */ +#define DPX_CR_RT_PERF 0xC700U +#define DPX_CR_RT_PERF_MASKFULL 0x000000000000001FULL +#define DPX_CR_RT_PERF_CLR_3_SHIFT 4U +#define DPX_CR_RT_PERF_CLR_3_CLRMSK 0xFFFFFFEFU +#define DPX_CR_RT_PERF_CLR_3_EN 0x00000010U +#define DPX_CR_RT_PERF_CLR_2_SHIFT 3U +#define DPX_CR_RT_PERF_CLR_2_CLRMSK 0xFFFFFFF7U +#define DPX_CR_RT_PERF_CLR_2_EN 0x00000008U +#define DPX_CR_RT_PERF_CLR_1_SHIFT 2U +#define DPX_CR_RT_PERF_CLR_1_CLRMSK 0xFFFFFFFBU +#define DPX_CR_RT_PERF_CLR_1_EN 0x00000004U +#define DPX_CR_RT_PERF_CLR_0_SHIFT 1U +#define DPX_CR_RT_PERF_CLR_0_CLRMSK 0xFFFFFFFDU +#define DPX_CR_RT_PERF_CLR_0_EN 0x00000002U +#define DPX_CR_RT_PERF_CTRL_ENABLE_SHIFT 0U +#define DPX_CR_RT_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU +#define DPX_CR_RT_PERF_CTRL_ENABLE_EN 0x00000001U + +/* Register DPX_CR_RT_PERF_SELECT0 */ +#define DPX_CR_RT_PERF_SELECT0 0xC708U +#define DPX_CR_RT_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL +#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_SHIFT 48U +#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL +#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_SHIFT 32U +#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL +#define DPX_CR_RT_PERF_SELECT0_MODE_SHIFT 21U +#define DPX_CR_RT_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL +#define DPX_CR_RT_PERF_SELECT0_MODE_EN 0x0000000000200000ULL +#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_SHIFT 16U +#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL +#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_SHIFT 0U +#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL + +/* Register DPX_CR_RT_PERF_COUNTER_0 */ +#define DPX_CR_RT_PERF_COUNTER_0 0xC730U +#define DPX_CR_RT_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL +#define DPX_CR_RT_PERF_COUNTER_0_REG_SHIFT 0U +#define DPX_CR_RT_PERF_COUNTER_0_REG_CLRMSK 0x00000000U + +/* Register DPX_CR_BX_TU_PERF */ +#define DPX_CR_BX_TU_PERF 0xC908U +#define DPX_CR_BX_TU_PERF_MASKFULL 0x000000000000001FULL +#define DPX_CR_BX_TU_PERF_CLR_3_SHIFT 4U +#define DPX_CR_BX_TU_PERF_CLR_3_CLRMSK 0xFFFFFFEFU +#define DPX_CR_BX_TU_PERF_CLR_3_EN 0x00000010U +#define DPX_CR_BX_TU_PERF_CLR_2_SHIFT 3U +#define DPX_CR_BX_TU_PERF_CLR_2_CLRMSK 0xFFFFFFF7U +#define DPX_CR_BX_TU_PERF_CLR_2_EN 0x00000008U +#define DPX_CR_BX_TU_PERF_CLR_1_SHIFT 2U +#define DPX_CR_BX_TU_PERF_CLR_1_CLRMSK 0xFFFFFFFBU +#define DPX_CR_BX_TU_PERF_CLR_1_EN 0x00000004U +#define DPX_CR_BX_TU_PERF_CLR_0_SHIFT 1U +#define DPX_CR_BX_TU_PERF_CLR_0_CLRMSK 0xFFFFFFFDU +#define DPX_CR_BX_TU_PERF_CLR_0_EN 0x00000002U +#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_SHIFT 0U +#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_CLRMSK 0xFFFFFFFEU +#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_EN 0x00000001U + +/* Register DPX_CR_BX_TU_PERF_SELECT0 */ +#define DPX_CR_BX_TU_PERF_SELECT0 0xC910U +#define DPX_CR_BX_TU_PERF_SELECT0_MASKFULL 0x3FFF3FFF003FFFFFULL +#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_SHIFT 48U +#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_CLRMSK 0xC000FFFFFFFFFFFFULL +#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_SHIFT 32U +#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_CLRMSK 0xFFFFC000FFFFFFFFULL +#define DPX_CR_BX_TU_PERF_SELECT0_MODE_SHIFT 21U +#define DPX_CR_BX_TU_PERF_SELECT0_MODE_CLRMSK 0xFFFFFFFFFFDFFFFFULL +#define DPX_CR_BX_TU_PERF_SELECT0_MODE_EN 0x0000000000200000ULL +#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_SHIFT 16U +#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_CLRMSK 0xFFFFFFFFFFE0FFFFULL +#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_SHIFT 0U +#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_CLRMSK 0xFFFFFFFFFFFF0000ULL + +/* Register DPX_CR_BX_TU_PERF_COUNTER_0 */ +#define DPX_CR_BX_TU_PERF_COUNTER_0 0xC938U +#define DPX_CR_BX_TU_PERF_COUNTER_0_MASKFULL 0x00000000FFFFFFFFULL +#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_SHIFT 0U +#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_CLRMSK 0x00000000U + +/* Register DPX_CR_RS_PDS_RR_CHECKSUM */ +#define DPX_CR_RS_PDS_RR_CHECKSUM 0xC0F0U +#define DPX_CR_RS_PDS_RR_CHECKSUM_MASKFULL 0x00000000FFFFFFFFULL +#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_SHIFT 0U +#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_CLRMSK 0xFFFFFFFF00000000ULL + +/* Register ROGUE_CR_MMU_CBASE_MAPPING_CONTEXT */ +#define ROGUE_CR_MMU_CBASE_MAPPING_CONTEXT 0xE140U +#define ROGUE_CR_MMU_CBASE_MAPPING_CONTEXT_MASKFULL 0x00000000000000FFULL +#define ROGUE_CR_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT 0U +#define ROGUE_CR_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_MMU_CBASE_MAPPING */ +#define ROGUE_CR_MMU_CBASE_MAPPING 0xE148U +#define ROGUE_CR_MMU_CBASE_MAPPING_MASKFULL 0x000000000FFFFFFFULL +#define ROGUE_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT 0U +#define ROGUE_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK 0xF0000000U +#define ROGUE_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT 12U +#define ROGUE_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE 4096U + +/* Register ROGUE_CR_MMU_FAULT_STATUS */ +#define ROGUE_CR_MMU_FAULT_STATUS 0xE150U +#define ROGUE_CR_MMU_FAULT_STATUS_MASKFULL 0xFFFFFFFFFFFFFFFFULL +#define ROGUE_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT 28U +#define ROGUE_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK 0x000000000FFFFFFFULL +#define ROGUE_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT 20U +#define ROGUE_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK 0xFFFFFFFFF00FFFFFULL +#define ROGUE_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT 12U +#define ROGUE_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK 0xFFFFFFFFFFF00FFFULL +#define ROGUE_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT 6U +#define ROGUE_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK 0xFFFFFFFFFFFFF03FULL +#define ROGUE_CR_MMU_FAULT_STATUS_LEVEL_SHIFT 4U +#define ROGUE_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK 0xFFFFFFFFFFFFFFCFULL +#define ROGUE_CR_MMU_FAULT_STATUS_RNW_SHIFT 3U +#define ROGUE_CR_MMU_FAULT_STATUS_RNW_CLRMSK 0xFFFFFFFFFFFFFFF7ULL +#define ROGUE_CR_MMU_FAULT_STATUS_RNW_EN 0x0000000000000008ULL +#define ROGUE_CR_MMU_FAULT_STATUS_TYPE_SHIFT 1U +#define ROGUE_CR_MMU_FAULT_STATUS_TYPE_CLRMSK 0xFFFFFFFFFFFFFFF9ULL +#define ROGUE_CR_MMU_FAULT_STATUS_FAULT_SHIFT 0U +#define ROGUE_CR_MMU_FAULT_STATUS_FAULT_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_MMU_FAULT_STATUS_FAULT_EN 0x0000000000000001ULL + +/* Register ROGUE_CR_MMU_FAULT_STATUS_META */ +#define ROGUE_CR_MMU_FAULT_STATUS_META 0xE158U +#define ROGUE_CR_MMU_FAULT_STATUS_META_MASKFULL 0xFFFFFFFFFFFFFFFFULL +#define ROGUE_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT 28U +#define ROGUE_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK 0x000000000FFFFFFFULL +#define ROGUE_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT 20U +#define ROGUE_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK 0xFFFFFFFFF00FFFFFULL +#define ROGUE_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT 12U +#define ROGUE_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK 0xFFFFFFFFFFF00FFFULL +#define ROGUE_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT 6U +#define ROGUE_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK 0xFFFFFFFFFFFFF03FULL +#define ROGUE_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT 4U +#define ROGUE_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK 0xFFFFFFFFFFFFFFCFULL +#define ROGUE_CR_MMU_FAULT_STATUS_META_RNW_SHIFT 3U +#define ROGUE_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK 0xFFFFFFFFFFFFFFF7ULL +#define ROGUE_CR_MMU_FAULT_STATUS_META_RNW_EN 0x0000000000000008ULL +#define ROGUE_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT 1U +#define ROGUE_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK 0xFFFFFFFFFFFFFFF9ULL +#define ROGUE_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT 0U +#define ROGUE_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK 0xFFFFFFFFFFFFFFFEULL +#define ROGUE_CR_MMU_FAULT_STATUS_META_FAULT_EN 0x0000000000000001ULL + +/* Register ROGUE_CR_SLC3_CTRL_MISC */ +#define ROGUE_CR_SLC3_CTRL_MISC 0xE200U +#define ROGUE_CR_SLC3_CTRL_MISC_MASKFULL 0x0000000000000107ULL +#define ROGUE_CR_SLC3_CTRL_MISC_WRITE_COMBINER_SHIFT 8U +#define ROGUE_CR_SLC3_CTRL_MISC_WRITE_COMBINER_CLRMSK 0xFFFFFEFFU +#define ROGUE_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN 0x00000100U +#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SHIFT 0U +#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK 0xFFFFFFF8U +#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_LINEAR 0x00000000U +#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_IN_PAGE_HASH 0x00000001U +#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_FIXED_PVR_HASH 0x00000002U +#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH 0x00000003U +#define ROGUE_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_WEAVED_HASH 0x00000004U + +/* Register ROGUE_CR_SLC3_SCRAMBLE */ +#define ROGUE_CR_SLC3_SCRAMBLE 0xE208U +#define ROGUE_CR_SLC3_SCRAMBLE_MASKFULL 0xFFFFFFFFFFFFFFFFULL +#define ROGUE_CR_SLC3_SCRAMBLE_BITS_SHIFT 0U +#define ROGUE_CR_SLC3_SCRAMBLE_BITS_CLRMSK 0x0000000000000000ULL + +/* Register ROGUE_CR_SLC3_SCRAMBLE2 */ +#define ROGUE_CR_SLC3_SCRAMBLE2 0xE210U +#define ROGUE_CR_SLC3_SCRAMBLE2_MASKFULL 0xFFFFFFFFFFFFFFFFULL +#define ROGUE_CR_SLC3_SCRAMBLE2_BITS_SHIFT 0U +#define ROGUE_CR_SLC3_SCRAMBLE2_BITS_CLRMSK 0x0000000000000000ULL + +/* Register ROGUE_CR_SLC3_SCRAMBLE3 */ +#define ROGUE_CR_SLC3_SCRAMBLE3 0xE218U +#define ROGUE_CR_SLC3_SCRAMBLE3_MASKFULL 0xFFFFFFFFFFFFFFFFULL +#define ROGUE_CR_SLC3_SCRAMBLE3_BITS_SHIFT 0U +#define ROGUE_CR_SLC3_SCRAMBLE3_BITS_CLRMSK 0x0000000000000000ULL + +/* Register ROGUE_CR_SLC3_SCRAMBLE4 */ +#define ROGUE_CR_SLC3_SCRAMBLE4 0xE260U +#define ROGUE_CR_SLC3_SCRAMBLE4_MASKFULL 0xFFFFFFFFFFFFFFFFULL +#define ROGUE_CR_SLC3_SCRAMBLE4_BITS_SHIFT 0U +#define ROGUE_CR_SLC3_SCRAMBLE4_BITS_CLRMSK 0x0000000000000000ULL + +/* Register ROGUE_CR_SLC3_STATUS */ +#define ROGUE_CR_SLC3_STATUS 0xE220U +#define ROGUE_CR_SLC3_STATUS_MASKFULL 0xFFFFFFFFFFFFFFFFULL +#define ROGUE_CR_SLC3_STATUS_WRITES1_SHIFT 48U +#define ROGUE_CR_SLC3_STATUS_WRITES1_CLRMSK 0x0000FFFFFFFFFFFFULL +#define ROGUE_CR_SLC3_STATUS_WRITES0_SHIFT 32U +#define ROGUE_CR_SLC3_STATUS_WRITES0_CLRMSK 0xFFFF0000FFFFFFFFULL +#define ROGUE_CR_SLC3_STATUS_READS1_SHIFT 16U +#define ROGUE_CR_SLC3_STATUS_READS1_CLRMSK 0xFFFFFFFF0000FFFFULL +#define ROGUE_CR_SLC3_STATUS_READS0_SHIFT 0U +#define ROGUE_CR_SLC3_STATUS_READS0_CLRMSK 0xFFFFFFFFFFFF0000ULL + +/* Register ROGUE_CR_SLC3_IDLE */ +#define ROGUE_CR_SLC3_IDLE 0xE228U +#define ROGUE_CR_SLC3_IDLE_MASKFULL 0x00000000000FFFFFULL +#define ROGUE_CR_SLC3_IDLE_ORDERQ_DUST2_SHIFT 18U +#define ROGUE_CR_SLC3_IDLE_ORDERQ_DUST2_CLRMSK 0xFFF3FFFFU +#define ROGUE_CR_SLC3_IDLE_MMU_SHIFT 17U +#define ROGUE_CR_SLC3_IDLE_MMU_CLRMSK 0xFFFDFFFFU +#define ROGUE_CR_SLC3_IDLE_MMU_EN 0x00020000U +#define ROGUE_CR_SLC3_IDLE_RDI_SHIFT 16U +#define ROGUE_CR_SLC3_IDLE_RDI_CLRMSK 0xFFFEFFFFU +#define ROGUE_CR_SLC3_IDLE_RDI_EN 0x00010000U +#define ROGUE_CR_SLC3_IDLE_IMGBV4_SHIFT 12U +#define ROGUE_CR_SLC3_IDLE_IMGBV4_CLRMSK 0xFFFF0FFFU +#define ROGUE_CR_SLC3_IDLE_CACHE_BANKS_SHIFT 4U +#define ROGUE_CR_SLC3_IDLE_CACHE_BANKS_CLRMSK 0xFFFFF00FU +#define ROGUE_CR_SLC3_IDLE_ORDERQ_DUST_SHIFT 2U +#define ROGUE_CR_SLC3_IDLE_ORDERQ_DUST_CLRMSK 0xFFFFFFF3U +#define ROGUE_CR_SLC3_IDLE_ORDERQ_JONES_SHIFT 1U +#define ROGUE_CR_SLC3_IDLE_ORDERQ_JONES_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_SLC3_IDLE_ORDERQ_JONES_EN 0x00000002U +#define ROGUE_CR_SLC3_IDLE_XBAR_SHIFT 0U +#define ROGUE_CR_SLC3_IDLE_XBAR_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_SLC3_IDLE_XBAR_EN 0x00000001U + +/* Register ROGUE_CR_SLC3_FAULT_STOP_STATUS */ +#define ROGUE_CR_SLC3_FAULT_STOP_STATUS 0xE248U +#define ROGUE_CR_SLC3_FAULT_STOP_STATUS_MASKFULL 0x0000000000001FFFULL +#define ROGUE_CR_SLC3_FAULT_STOP_STATUS_BIF_SHIFT 0U +#define ROGUE_CR_SLC3_FAULT_STOP_STATUS_BIF_CLRMSK 0xFFFFE000U + +/* Register ROGUE_CR_VDM_CONTEXT_STORE_MODE */ +#define ROGUE_CR_VDM_CONTEXT_STORE_MODE 0xF048U +#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MASKFULL 0x0000000000000003ULL +#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MODE_SHIFT 0U +#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MODE_CLRMSK 0xFFFFFFFCU +#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MODE_INDEX 0x00000000U +#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MODE_INSTANCE 0x00000001U +#define ROGUE_CR_VDM_CONTEXT_STORE_MODE_MODE_LIST 0x00000002U + +/* Register ROGUE_CR_CONTEXT_MAPPING0 */ +#define ROGUE_CR_CONTEXT_MAPPING0 0xF078U +#define ROGUE_CR_CONTEXT_MAPPING0_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_CONTEXT_MAPPING0_2D_SHIFT 24U +#define ROGUE_CR_CONTEXT_MAPPING0_2D_CLRMSK 0x00FFFFFFU +#define ROGUE_CR_CONTEXT_MAPPING0_CDM_SHIFT 16U +#define ROGUE_CR_CONTEXT_MAPPING0_CDM_CLRMSK 0xFF00FFFFU +#define ROGUE_CR_CONTEXT_MAPPING0_3D_SHIFT 8U +#define ROGUE_CR_CONTEXT_MAPPING0_3D_CLRMSK 0xFFFF00FFU +#define ROGUE_CR_CONTEXT_MAPPING0_TA_SHIFT 0U +#define ROGUE_CR_CONTEXT_MAPPING0_TA_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_CONTEXT_MAPPING1 */ +#define ROGUE_CR_CONTEXT_MAPPING1 0xF080U +#define ROGUE_CR_CONTEXT_MAPPING1_MASKFULL 0x000000000000FFFFULL +#define ROGUE_CR_CONTEXT_MAPPING1_HOST_SHIFT 8U +#define ROGUE_CR_CONTEXT_MAPPING1_HOST_CLRMSK 0xFFFF00FFU +#define ROGUE_CR_CONTEXT_MAPPING1_TLA_SHIFT 0U +#define ROGUE_CR_CONTEXT_MAPPING1_TLA_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_CONTEXT_MAPPING2 */ +#define ROGUE_CR_CONTEXT_MAPPING2 0xF088U +#define ROGUE_CR_CONTEXT_MAPPING2_MASKFULL 0x0000000000FFFFFFULL +#define ROGUE_CR_CONTEXT_MAPPING2_ALIST0_SHIFT 16U +#define ROGUE_CR_CONTEXT_MAPPING2_ALIST0_CLRMSK 0xFF00FFFFU +#define ROGUE_CR_CONTEXT_MAPPING2_TE0_SHIFT 8U +#define ROGUE_CR_CONTEXT_MAPPING2_TE0_CLRMSK 0xFFFF00FFU +#define ROGUE_CR_CONTEXT_MAPPING2_VCE0_SHIFT 0U +#define ROGUE_CR_CONTEXT_MAPPING2_VCE0_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_CONTEXT_MAPPING3 */ +#define ROGUE_CR_CONTEXT_MAPPING3 0xF090U +#define ROGUE_CR_CONTEXT_MAPPING3_MASKFULL 0x0000000000FFFFFFULL +#define ROGUE_CR_CONTEXT_MAPPING3_ALIST1_SHIFT 16U +#define ROGUE_CR_CONTEXT_MAPPING3_ALIST1_CLRMSK 0xFF00FFFFU +#define ROGUE_CR_CONTEXT_MAPPING3_TE1_SHIFT 8U +#define ROGUE_CR_CONTEXT_MAPPING3_TE1_CLRMSK 0xFFFF00FFU +#define ROGUE_CR_CONTEXT_MAPPING3_VCE1_SHIFT 0U +#define ROGUE_CR_CONTEXT_MAPPING3_VCE1_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_BIF_JONES_OUTSTANDING_READ */ +#define ROGUE_CR_BIF_JONES_OUTSTANDING_READ 0xF098U +#define ROGUE_CR_BIF_JONES_OUTSTANDING_READ_MASKFULL 0x000000000000FFFFULL +#define ROGUE_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_SHIFT 0U +#define ROGUE_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_CLRMSK 0xFFFF0000U + +/* Register ROGUE_CR_BIF_BLACKPEARL_OUTSTANDING_READ */ +#define ROGUE_CR_BIF_BLACKPEARL_OUTSTANDING_READ 0xF0A0U +#define ROGUE_CR_BIF_BLACKPEARL_OUTSTANDING_READ_MASKFULL 0x000000000000FFFFULL +#define ROGUE_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_SHIFT 0U +#define ROGUE_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_CLRMSK 0xFFFF0000U + +/* Register ROGUE_CR_BIF_DUST_OUTSTANDING_READ */ +#define ROGUE_CR_BIF_DUST_OUTSTANDING_READ 0xF0A8U +#define ROGUE_CR_BIF_DUST_OUTSTANDING_READ_MASKFULL 0x000000000000FFFFULL +#define ROGUE_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_SHIFT 0U +#define ROGUE_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_CLRMSK 0xFFFF0000U + +/* Register ROGUE_CR_CONTEXT_MAPPING4 */ +#define ROGUE_CR_CONTEXT_MAPPING4 0xF210U +#define ROGUE_CR_CONTEXT_MAPPING4_MASKFULL 0x0000FFFFFFFFFFFFULL +#define ROGUE_CR_CONTEXT_MAPPING4_3D_MMU_STACK_SHIFT 40U +#define ROGUE_CR_CONTEXT_MAPPING4_3D_MMU_STACK_CLRMSK 0xFFFF00FFFFFFFFFFULL +#define ROGUE_CR_CONTEXT_MAPPING4_3D_UFSTACK_SHIFT 32U +#define ROGUE_CR_CONTEXT_MAPPING4_3D_UFSTACK_CLRMSK 0xFFFFFF00FFFFFFFFULL +#define ROGUE_CR_CONTEXT_MAPPING4_3D_FSTACK_SHIFT 24U +#define ROGUE_CR_CONTEXT_MAPPING4_3D_FSTACK_CLRMSK 0xFFFFFFFF00FFFFFFULL +#define ROGUE_CR_CONTEXT_MAPPING4_TA_MMU_STACK_SHIFT 16U +#define ROGUE_CR_CONTEXT_MAPPING4_TA_MMU_STACK_CLRMSK 0xFFFFFFFFFF00FFFFULL +#define ROGUE_CR_CONTEXT_MAPPING4_TA_UFSTACK_SHIFT 8U +#define ROGUE_CR_CONTEXT_MAPPING4_TA_UFSTACK_CLRMSK 0xFFFFFFFFFFFF00FFULL +#define ROGUE_CR_CONTEXT_MAPPING4_TA_FSTACK_SHIFT 0U +#define ROGUE_CR_CONTEXT_MAPPING4_TA_FSTACK_CLRMSK 0xFFFFFFFFFFFFFF00ULL + +/* Register ROGUE_CR_MULTICORE_GPU */ +#define ROGUE_CR_MULTICORE_GPU 0xF300U +#define ROGUE_CR_MULTICORE_GPU_MASKFULL 0x000000000000007FULL +#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_SHIFT 6U +#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_CLRMSK 0xFFFFFFBFU +#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_EN 0x00000040U +#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_SHIFT 5U +#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_EN 0x00000020U +#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_SHIFT 4U +#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_EN 0x00000010U +#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_SHIFT 3U +#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_EN 0x00000008U +#define ROGUE_CR_MULTICORE_GPU_ID_SHIFT 0U +#define ROGUE_CR_MULTICORE_GPU_ID_CLRMSK 0xFFFFFFF8U + +/* Register ROGUE_CR_MULTICORE_SYSTEM */ +#define ROGUE_CR_MULTICORE_SYSTEM 0xF308U +#define ROGUE_CR_MULTICORE_SYSTEM_MASKFULL 0x000000000000000FULL +#define ROGUE_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT 0U +#define ROGUE_CR_MULTICORE_SYSTEM_GPU_COUNT_CLRMSK 0xFFFFFFF0U + +/* Register ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON */ +#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON 0xF310U +#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_SHIFT 30U +#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK 0x3FFFFFFFU +#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT 8U +#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK 0xC00000FFU +#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_SHIFT 0U +#define ROGUE_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON */ +#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON 0xF320U +#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_TYPE_SHIFT 30U +#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK 0x3FFFFFFFU +#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT 8U +#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK 0xC00000FFU +#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_GPU_ENABLE_SHIFT 0U +#define ROGUE_CR_MULTICORE_GEOMETRY_CTRL_COMMON_GPU_ENABLE_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON */ +#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON 0xF330U +#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_MASKFULL 0x00000000FFFFFFFFULL +#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_SHIFT 30U +#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK 0x3FFFFFFFU +#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT 8U +#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK 0xC00000FFU +#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_SHIFT 0U +#define ROGUE_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_CLRMSK 0xFFFFFF00U + +/* Register ROGUE_CR_ECC_RAM_ERR_INJ */ +#define ROGUE_CR_ECC_RAM_ERR_INJ 0xF340U +#define ROGUE_CR_ECC_RAM_ERR_INJ_MASKFULL 0x000000000000001FULL +#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_SHIFT 4U +#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_EN 0x00000010U +#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_SHIFT 3U +#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_ECC_RAM_ERR_INJ_USC_EN 0x00000008U +#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_SHIFT 2U +#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_EN 0x00000004U +#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_SHIFT 1U +#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_ECC_RAM_ERR_INJ_RASCAL_EN 0x00000002U +#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_SHIFT 0U +#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_ECC_RAM_ERR_INJ_MARS_EN 0x00000001U + +/* Register ROGUE_CR_ECC_RAM_INIT_KICK */ +#define ROGUE_CR_ECC_RAM_INIT_KICK 0xF348U +#define ROGUE_CR_ECC_RAM_INIT_KICK_MASKFULL 0x000000000000001FULL +#define ROGUE_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_SHIFT 4U +#define ROGUE_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_EN 0x00000010U +#define ROGUE_CR_ECC_RAM_INIT_KICK_USC_SHIFT 3U +#define ROGUE_CR_ECC_RAM_INIT_KICK_USC_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_ECC_RAM_INIT_KICK_USC_EN 0x00000008U +#define ROGUE_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_SHIFT 2U +#define ROGUE_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_EN 0x00000004U +#define ROGUE_CR_ECC_RAM_INIT_KICK_RASCAL_SHIFT 1U +#define ROGUE_CR_ECC_RAM_INIT_KICK_RASCAL_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_ECC_RAM_INIT_KICK_RASCAL_EN 0x00000002U +#define ROGUE_CR_ECC_RAM_INIT_KICK_MARS_SHIFT 0U +#define ROGUE_CR_ECC_RAM_INIT_KICK_MARS_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_ECC_RAM_INIT_KICK_MARS_EN 0x00000001U + +/* Register ROGUE_CR_ECC_RAM_INIT_DONE */ +#define ROGUE_CR_ECC_RAM_INIT_DONE 0xF350U +#define ROGUE_CR_ECC_RAM_INIT_DONE_MASKFULL 0x000000000000001FULL +#define ROGUE_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_SHIFT 4U +#define ROGUE_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_EN 0x00000010U +#define ROGUE_CR_ECC_RAM_INIT_DONE_USC_SHIFT 3U +#define ROGUE_CR_ECC_RAM_INIT_DONE_USC_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_ECC_RAM_INIT_DONE_USC_EN 0x00000008U +#define ROGUE_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_SHIFT 2U +#define ROGUE_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_EN 0x00000004U +#define ROGUE_CR_ECC_RAM_INIT_DONE_RASCAL_SHIFT 1U +#define ROGUE_CR_ECC_RAM_INIT_DONE_RASCAL_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_ECC_RAM_INIT_DONE_RASCAL_EN 0x00000002U +#define ROGUE_CR_ECC_RAM_INIT_DONE_MARS_SHIFT 0U +#define ROGUE_CR_ECC_RAM_INIT_DONE_MARS_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_ECC_RAM_INIT_DONE_MARS_EN 0x00000001U + +/* Register ROGUE_CR_SAFETY_EVENT_ENABLE */ +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE 0xF390U +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL 0x000000000000007FULL +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_SHIFT 6U +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_CLRMSK 0xFFFFFFBFU +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_EN 0x00000040U +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT 5U +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_EN 0x00000020U +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT 4U +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_EN 0x00000010U +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_SHIFT 3U +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_EN 0x00000008U +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_SHIFT 2U +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_EN 0x00000004U +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_SHIFT 1U +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_EN 0x00000002U +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_SHIFT 0U +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_EN 0x00000001U + +/* Register ROGUE_CR_SAFETY_EVENT_STATUS */ +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE 0xF398U +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__MASKFULL 0x000000000000007FULL +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_SHIFT 6U +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_CLRMSK 0xFFFFFFBFU +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_EN 0x00000040U +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT 5U +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_EN 0x00000020U +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT 4U +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN 0x00000010U +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_SHIFT 3U +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_EN 0x00000008U +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_SHIFT 2U +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN 0x00000004U +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_SHIFT 1U +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_EN 0x00000002U +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_SHIFT 0U +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_EN 0x00000001U + +/* Register ROGUE_CR_SAFETY_EVENT_CLEAR */ +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE 0xF3A0U +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__MASKFULL 0x000000000000007FULL +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_SHIFT 6U +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_CLRMSK 0xFFFFFFBFU +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_EN 0x00000040U +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT 5U +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_EN 0x00000020U +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT 4U +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_EN 0x00000010U +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_SHIFT 3U +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_EN 0x00000008U +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_SHIFT 2U +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_EN 0x00000004U +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_SHIFT 1U +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_EN 0x00000002U +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_SHIFT 0U +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_EN 0x00000001U + +/* Register ROGUE_CR_MTS_SAFETY_EVENT_ENABLE */ +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE 0xF3D8U +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL 0x000000000000007FULL +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_SHIFT 6U +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_CLRMSK 0xFFFFFFBFU +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_EN 0x00000040U +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT 5U +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK 0xFFFFFFDFU +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_EN 0x00000020U +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT 4U +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK 0xFFFFFFEFU +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_EN 0x00000010U +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_SHIFT 3U +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_CLRMSK 0xFFFFFFF7U +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_EN 0x00000008U +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_SHIFT 2U +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_CLRMSK 0xFFFFFFFBU +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_EN 0x00000004U +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_SHIFT 1U +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_CLRMSK 0xFFFFFFFDU +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_EN 0x00000002U +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_SHIFT 0U +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_CLRMSK 0xFFFFFFFEU +#define ROGUE_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_EN 0x00000001U + +/* clang-format on */ + +#endif /* PVR_ROGUE_CR_DEFS_H */ diff --git a/drivers/gpu/drm/imagination/pvr_rogue_cr_defs_client.h b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs_client.h new file mode 100644 index 0000000000..46186b56ef --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_rogue_cr_defs_client.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_ROGUE_CR_DEFS_CLIENT_H +#define PVR_ROGUE_CR_DEFS_CLIENT_H + +/* clang-format off */ + +/* + * This register controls the anti-aliasing mode of the Tiling Co-Processor, independent control is + * provided in both X & Y axis. + * This register needs to be set based on the ISP Samples Per Pixel a core supports. + * + * When ISP Samples Per Pixel = 1: + * 2xmsaa is achieved by enabling Y - TE does AA on Y plane only + * 4xmsaa is achieved by enabling Y and X - TE does AA on X and Y plane + * 8xmsaa not supported by XE cores + * + * When ISP Samples Per Pixel = 2: + * 2xmsaa is achieved by enabling X2 - does not affect TE + * 4xmsaa is achieved by enabling Y and X2 - TE does AA on Y plane only + * 8xmsaa is achieved by enabling Y, X and X2 - TE does AA on X and Y plane + * 8xmsaa not supported by XE cores + * + * When ISP Samples Per Pixel = 4: + * 2xmsaa is achieved by enabling X2 - does not affect TE + * 4xmsaa is achieved by enabling Y2 and X2 - TE does AA on Y plane only + * 8xmsaa not supported by XE cores + */ +/* Register ROGUE_CR_TE_AA */ +#define ROGUE_CR_TE_AA 0x0C00U +#define ROGUE_CR_TE_AA_MASKFULL 0x000000000000000Full +/* Y2 + * Indicates 4xmsaa when X2 and Y2 are set to 1. This does not affect TE and is only used within + * TPW. + */ +#define ROGUE_CR_TE_AA_Y2_SHIFT 3 +#define ROGUE_CR_TE_AA_Y2_CLRMSK 0xFFFFFFF7 +#define ROGUE_CR_TE_AA_Y2_EN 0x00000008 +/* Y + * Anti-Aliasing in Y Plane Enabled + */ +#define ROGUE_CR_TE_AA_Y_SHIFT 2 +#define ROGUE_CR_TE_AA_Y_CLRMSK 0xFFFFFFFB +#define ROGUE_CR_TE_AA_Y_EN 0x00000004 +/* X + * Anti-Aliasing in X Plane Enabled + */ +#define ROGUE_CR_TE_AA_X_SHIFT 1 +#define ROGUE_CR_TE_AA_X_CLRMSK 0xFFFFFFFD +#define ROGUE_CR_TE_AA_X_EN 0x00000002 +/* X2 + * 2x Anti-Aliasing Enabled, affects PPP only + */ +#define ROGUE_CR_TE_AA_X2_SHIFT (0U) +#define ROGUE_CR_TE_AA_X2_CLRMSK (0xFFFFFFFEU) +#define ROGUE_CR_TE_AA_X2_EN (0x00000001U) + +/* MacroTile Boundaries X Plane */ +/* Register ROGUE_CR_TE_MTILE1 */ +#define ROGUE_CR_TE_MTILE1 0x0C08 +#define ROGUE_CR_TE_MTILE1_MASKFULL 0x0000000007FFFFFFull +/* X1 default: 0x00000004 + * X1 MacroTile boundary, left tile X for second column of macrotiles (16MT mode) - 32 pixels across + * tile + */ +#define ROGUE_CR_TE_MTILE1_X1_SHIFT 18 +#define ROGUE_CR_TE_MTILE1_X1_CLRMSK 0xF803FFFF +/* X2 default: 0x00000008 + * X2 MacroTile boundary, left tile X for third(16MT) column of macrotiles - 32 pixels across tile + */ +#define ROGUE_CR_TE_MTILE1_X2_SHIFT 9U +#define ROGUE_CR_TE_MTILE1_X2_CLRMSK 0xFFFC01FF +/* X3 default: 0x0000000c + * X3 MacroTile boundary, left tile X for fourth column of macrotiles (16MT) - 32 pixels across tile + */ +#define ROGUE_CR_TE_MTILE1_X3_SHIFT 0 +#define ROGUE_CR_TE_MTILE1_X3_CLRMSK 0xFFFFFE00 + +/* MacroTile Boundaries Y Plane. */ +/* Register ROGUE_CR_TE_MTILE2 */ +#define ROGUE_CR_TE_MTILE2 0x0C10 +#define ROGUE_CR_TE_MTILE2_MASKFULL 0x0000000007FFFFFFull +/* Y1 default: 0x00000004 + * X1 MacroTile boundary, ltop tile Y for second column of macrotiles (16MT mode) - 32 pixels tile + * height + */ +#define ROGUE_CR_TE_MTILE2_Y1_SHIFT 18 +#define ROGUE_CR_TE_MTILE2_Y1_CLRMSK 0xF803FFFF +/* Y2 default: 0x00000008 + * X2 MacroTile boundary, top tile Y for third(16MT) column of macrotiles - 32 pixels tile height + */ +#define ROGUE_CR_TE_MTILE2_Y2_SHIFT 9 +#define ROGUE_CR_TE_MTILE2_Y2_CLRMSK 0xFFFC01FF +/* Y3 default: 0x0000000c + * X3 MacroTile boundary, top tile Y for fourth column of macrotiles (16MT) - 32 pixels tile height + */ +#define ROGUE_CR_TE_MTILE2_Y3_SHIFT 0 +#define ROGUE_CR_TE_MTILE2_Y3_CLRMSK 0xFFFFFE00 + +/* + * In order to perform the tiling operation and generate the display list the maximum screen size + * must be configured in terms of the number of tiles in X & Y axis. + */ + +/* Register ROGUE_CR_TE_SCREEN */ +#define ROGUE_CR_TE_SCREEN 0x0C18U +#define ROGUE_CR_TE_SCREEN_MASKFULL 0x00000000001FF1FFull +/* YMAX default: 0x00000010 + * Maximum Y tile address visible on screen, 32 pixel tile height, 16Kx16K max screen size + */ +#define ROGUE_CR_TE_SCREEN_YMAX_SHIFT 12 +#define ROGUE_CR_TE_SCREEN_YMAX_CLRMSK 0xFFE00FFF +/* XMAX default: 0x00000010 + * Maximum X tile address visible on screen, 32 pixel tile width, 16Kx16K max screen size + */ +#define ROGUE_CR_TE_SCREEN_XMAX_SHIFT 0 +#define ROGUE_CR_TE_SCREEN_XMAX_CLRMSK 0xFFFFFE00 + +/* + * In order to perform the tiling operation and generate the display list the maximum screen size + * must be configured in terms of the number of pixels in X & Y axis since this may not be the same + * as the number of tiles defined in the RGX_CR_TE_SCREEN register. + */ +/* Register ROGUE_CR_PPP_SCREEN */ +#define ROGUE_CR_PPP_SCREEN 0x0C98 +#define ROGUE_CR_PPP_SCREEN_MASKFULL 0x000000007FFF7FFFull +/* PIXYMAX + * Screen height in pixels. (16K x 16K max screen size) + */ +#define ROGUE_CR_PPP_SCREEN_PIXYMAX_SHIFT 16 +#define ROGUE_CR_PPP_SCREEN_PIXYMAX_CLRMSK 0x8000FFFF +/* PIXXMAX + * Screen width in pixels.(16K x 16K max screen size) + */ +#define ROGUE_CR_PPP_SCREEN_PIXXMAX_SHIFT 0 +#define ROGUE_CR_PPP_SCREEN_PIXXMAX_CLRMSK 0xFFFF8000 + +/* Register ROGUE_CR_ISP_MTILE_SIZE */ +#define ROGUE_CR_ISP_MTILE_SIZE 0x0F18 +#define ROGUE_CR_ISP_MTILE_SIZE_MASKFULL 0x0000000003FF03FFull +/* X + * Macrotile width, in tiles. A value of zero corresponds to the maximum size + */ +#define ROGUE_CR_ISP_MTILE_SIZE_X_SHIFT 16 +#define ROGUE_CR_ISP_MTILE_SIZE_X_CLRMSK 0xFC00FFFF +#define ROGUE_CR_ISP_MTILE_SIZE_X_ALIGNSHIFT 0 +#define ROGUE_CR_ISP_MTILE_SIZE_X_ALIGNSIZE 1 +/* Y + * Macrotile height, in tiles. A value of zero corresponds to the maximum size + */ +#define ROGUE_CR_ISP_MTILE_SIZE_Y_SHIFT 0 +#define ROGUE_CR_ISP_MTILE_SIZE_Y_CLRMSK 0xFFFFFC00 +#define ROGUE_CR_ISP_MTILE_SIZE_Y_ALIGNSHIFT 0 +#define ROGUE_CR_ISP_MTILE_SIZE_Y_ALIGNSIZE 1 + +/* clang-format on */ + +#endif /* PVR_ROGUE_CR_DEFS_CLIENT_H */ diff --git a/drivers/gpu/drm/imagination/pvr_rogue_defs.h b/drivers/gpu/drm/imagination/pvr_rogue_defs.h new file mode 100644 index 0000000000..932b016860 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_rogue_defs.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_ROGUE_DEFS_H +#define PVR_ROGUE_DEFS_H + +#include "pvr_rogue_cr_defs.h" + +#include + +/* + ****************************************************************************** + * ROGUE Defines + ****************************************************************************** + */ + +#define ROGUE_FW_MAX_NUM_OS (8U) +#define ROGUE_FW_HOST_OS (0U) +#define ROGUE_FW_GUEST_OSID_START (1U) + +#define ROGUE_FW_THREAD_0 (0U) +#define ROGUE_FW_THREAD_1 (1U) + +#define GET_ROGUE_CACHE_LINE_SIZE(x) ((((s32)(x)) > 0) ? ((x) / 8) : (0)) + +#define MAX_HW_GEOM_FRAG_CONTEXTS 2U + +#define ROGUE_CR_CLK_CTRL_ALL_ON \ + (0x5555555555555555ull & ROGUE_CR_CLK_CTRL_MASKFULL) +#define ROGUE_CR_CLK_CTRL_ALL_AUTO \ + (0xaaaaaaaaaaaaaaaaull & ROGUE_CR_CLK_CTRL_MASKFULL) +#define ROGUE_CR_CLK_CTRL2_ALL_ON \ + (0x5555555555555555ull & ROGUE_CR_CLK_CTRL2_MASKFULL) +#define ROGUE_CR_CLK_CTRL2_ALL_AUTO \ + (0xaaaaaaaaaaaaaaaaull & ROGUE_CR_CLK_CTRL2_MASKFULL) + +#define ROGUE_CR_SOFT_RESET_DUST_n_CORE_EN \ + (ROGUE_CR_SOFT_RESET_DUST_A_CORE_EN | \ + ROGUE_CR_SOFT_RESET_DUST_B_CORE_EN | \ + ROGUE_CR_SOFT_RESET_DUST_C_CORE_EN | \ + ROGUE_CR_SOFT_RESET_DUST_D_CORE_EN | \ + ROGUE_CR_SOFT_RESET_DUST_E_CORE_EN | \ + ROGUE_CR_SOFT_RESET_DUST_F_CORE_EN | \ + ROGUE_CR_SOFT_RESET_DUST_G_CORE_EN | \ + ROGUE_CR_SOFT_RESET_DUST_H_CORE_EN) + +/* SOFT_RESET Rascal and DUSTs bits */ +#define ROGUE_CR_SOFT_RESET_RASCALDUSTS_EN \ + (ROGUE_CR_SOFT_RESET_RASCAL_CORE_EN | \ + ROGUE_CR_SOFT_RESET_DUST_n_CORE_EN) + +/* SOFT_RESET steps as defined in the TRM */ +#define ROGUE_S7_SOFT_RESET_DUSTS (ROGUE_CR_SOFT_RESET_DUST_n_CORE_EN) + +#define ROGUE_S7_SOFT_RESET_JONES \ + (ROGUE_CR_SOFT_RESET_PM_EN | ROGUE_CR_SOFT_RESET_VDM_EN | \ + ROGUE_CR_SOFT_RESET_ISP_EN) + +#define ROGUE_S7_SOFT_RESET_JONES_ALL \ + (ROGUE_S7_SOFT_RESET_JONES | ROGUE_CR_SOFT_RESET_BIF_EN | \ + ROGUE_CR_SOFT_RESET_SLC_EN | ROGUE_CR_SOFT_RESET_GARTEN_EN) + +#define ROGUE_S7_SOFT_RESET2 \ + (ROGUE_CR_SOFT_RESET2_BLACKPEARL_EN | ROGUE_CR_SOFT_RESET2_PIXEL_EN | \ + ROGUE_CR_SOFT_RESET2_CDM_EN | ROGUE_CR_SOFT_RESET2_VERTEX_EN) + +#define ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT (12U) +#define ROGUE_BIF_PM_PHYSICAL_PAGE_SIZE \ + BIT(ROGUE_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT) + +#define ROGUE_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT (14U) +#define ROGUE_BIF_PM_VIRTUAL_PAGE_SIZE BIT(ROGUE_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT) + +#define ROGUE_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE (16U) + +/* + * To get the number of required Dusts, divide the number of + * clusters by 2 and round up + */ +#define ROGUE_REQ_NUM_DUSTS(CLUSTERS) (((CLUSTERS) + 1U) / 2U) + +/* + * To get the number of required Bernado/Phantom(s), divide + * the number of clusters by 4 and round up + */ +#define ROGUE_REQ_NUM_PHANTOMS(CLUSTERS) (((CLUSTERS) + 3U) / 4U) +#define ROGUE_REQ_NUM_BERNADOS(CLUSTERS) (((CLUSTERS) + 3U) / 4U) +#define ROGUE_REQ_NUM_BLACKPEARLS(CLUSTERS) (((CLUSTERS) + 3U) / 4U) + +/* + * FW MMU contexts + */ +#define MMU_CONTEXT_MAPPING_FWPRIV (0x0) /* FW code/private data */ +#define MMU_CONTEXT_MAPPING_FWIF (0x0) /* Host/FW data */ + +/* + * Utility macros to calculate CAT_BASE register addresses + */ +#define BIF_CAT_BASEX(n) \ + (ROGUE_CR_BIF_CAT_BASE0 + \ + (n) * (ROGUE_CR_BIF_CAT_BASE1 - ROGUE_CR_BIF_CAT_BASE0)) + +#define FWCORE_MEM_CAT_BASEX(n) \ + (ROGUE_CR_FWCORE_MEM_CAT_BASE0 + \ + (n) * (ROGUE_CR_FWCORE_MEM_CAT_BASE1 - \ + ROGUE_CR_FWCORE_MEM_CAT_BASE0)) + +/* + * FWCORE wrapper register defines + */ +#define FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT \ + ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_SHIFT +#define FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_CLRMSK \ + ROGUE_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_CLRMSK +#define FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT (12U) + +#define ROGUE_MAX_COMPUTE_SHARED_REGISTERS (2 * 1024) +#define ROGUE_MAX_VERTEX_SHARED_REGISTERS 1024 +#define ROGUE_MAX_PIXEL_SHARED_REGISTERS 1024 +#define ROGUE_CSRM_LINE_SIZE_IN_DWORDS (64 * 4 * 4) + +#define ROGUE_CDMCTRL_USC_COMMON_SIZE_ALIGNSIZE 64 +#define ROGUE_CDMCTRL_USC_COMMON_SIZE_UPPER 256 + +/* + * The maximum amount of local memory which can be allocated by a single kernel + * (in dwords/32-bit registers). + * + * ROGUE_CDMCTRL_USC_COMMON_SIZE_ALIGNSIZE is in bytes so we divide by four. + */ +#define ROGUE_MAX_PER_KERNEL_LOCAL_MEM_SIZE_REGS ((ROGUE_CDMCTRL_USC_COMMON_SIZE_ALIGNSIZE * \ + ROGUE_CDMCTRL_USC_COMMON_SIZE_UPPER) >> 2) + +/* + ****************************************************************************** + * WA HWBRNs + ****************************************************************************** + */ + +/* GPU CR timer tick in GPU cycles */ +#define ROGUE_CRTIME_TICK_IN_CYCLES (256U) + +/* for nohw multicore return max cores possible to client */ +#define ROGUE_MULTICORE_MAX_NOHW_CORES (4U) + +/* + * If the size of the SLC is less than this value then the TPU bypasses the SLC. + */ +#define ROGUE_TPU_CACHED_SLC_SIZE_THRESHOLD (128U * 1024U) + +/* + * If the size of the SLC is bigger than this value then the TCU must not be + * bypassed in the SLC. + * In XE_MEMORY_HIERARCHY cores, the TCU is bypassed by default. + */ +#define ROGUE_TCU_CACHED_SLC_SIZE_THRESHOLD (32U * 1024U) + +/* + * Register used by the FW to track the current boot stage (not used in MIPS) + */ +#define ROGUE_FW_BOOT_STAGE_REGISTER (ROGUE_CR_POWER_ESTIMATE_RESULT) + +/* + * Virtualisation definitions + */ +#define ROGUE_VIRTUALISATION_REG_SIZE_PER_OS \ + (ROGUE_CR_MTS_SCHEDULE1 - ROGUE_CR_MTS_SCHEDULE) + +/* + * Macro used to indicate which version of HWPerf is active + */ +#define ROGUE_FEATURE_HWPERF_ROGUE + +/* + * Maximum number of cores supported by TRP + */ +#define ROGUE_TRP_MAX_NUM_CORES (4U) + +#endif /* PVR_ROGUE_DEFS_H */ diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif.h new file mode 100644 index 0000000000..172886be4c --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif.h @@ -0,0 +1,2188 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_ROGUE_FWIF_H +#define PVR_ROGUE_FWIF_H + +#include +#include +#include +#include +#include + +#include "pvr_rogue_defs.h" +#include "pvr_rogue_fwif_common.h" +#include "pvr_rogue_fwif_shared.h" + +/* + **************************************************************************** + * Logging type + **************************************************************************** + */ +#define ROGUE_FWIF_LOG_TYPE_NONE 0x00000000U +#define ROGUE_FWIF_LOG_TYPE_TRACE 0x00000001U +#define ROGUE_FWIF_LOG_TYPE_GROUP_MAIN 0x00000002U +#define ROGUE_FWIF_LOG_TYPE_GROUP_MTS 0x00000004U +#define ROGUE_FWIF_LOG_TYPE_GROUP_CLEANUP 0x00000008U +#define ROGUE_FWIF_LOG_TYPE_GROUP_CSW 0x00000010U +#define ROGUE_FWIF_LOG_TYPE_GROUP_BIF 0x00000020U +#define ROGUE_FWIF_LOG_TYPE_GROUP_PM 0x00000040U +#define ROGUE_FWIF_LOG_TYPE_GROUP_RTD 0x00000080U +#define ROGUE_FWIF_LOG_TYPE_GROUP_SPM 0x00000100U +#define ROGUE_FWIF_LOG_TYPE_GROUP_POW 0x00000200U +#define ROGUE_FWIF_LOG_TYPE_GROUP_HWR 0x00000400U +#define ROGUE_FWIF_LOG_TYPE_GROUP_HWP 0x00000800U +#define ROGUE_FWIF_LOG_TYPE_GROUP_RPM 0x00001000U +#define ROGUE_FWIF_LOG_TYPE_GROUP_DMA 0x00002000U +#define ROGUE_FWIF_LOG_TYPE_GROUP_MISC 0x00004000U +#define ROGUE_FWIF_LOG_TYPE_GROUP_DEBUG 0x80000000U +#define ROGUE_FWIF_LOG_TYPE_GROUP_MASK 0x80007FFEU +#define ROGUE_FWIF_LOG_TYPE_MASK 0x80007FFFU + +/* String used in pvrdebug -h output */ +#define ROGUE_FWIF_LOG_GROUPS_STRING_LIST \ + "main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp,rpm,dma,misc,debug" + +/* Table entry to map log group strings to log type value */ +struct rogue_fwif_log_group_map_entry { + const char *log_group_name; + u32 log_group_type; +}; + +/* + **************************************************************************** + * ROGUE FW signature checks + **************************************************************************** + */ +#define ROGUE_FW_SIG_BUFFER_SIZE_MIN (8192) + +#define ROGUE_FWIF_TIMEDIFF_ID ((0x1UL << 28) | ROGUE_CR_TIMER) + +/* + **************************************************************************** + * Trace Buffer + **************************************************************************** + */ + +/* Default size of ROGUE_FWIF_TRACEBUF_SPACE in DWords */ +#define ROGUE_FW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS 12000U +#define ROGUE_FW_TRACE_BUFFER_ASSERT_SIZE 200U +#define ROGUE_FW_THREAD_NUM 1U +#define ROGUE_FW_THREAD_MAX 2U + +#define ROGUE_FW_POLL_TYPE_SET 0x80000000U + +struct rogue_fwif_file_info_buf { + char path[ROGUE_FW_TRACE_BUFFER_ASSERT_SIZE]; + char info[ROGUE_FW_TRACE_BUFFER_ASSERT_SIZE]; + u32 line_num; + u32 padding; +} __aligned(8); + +struct rogue_fwif_tracebuf_space { + u32 trace_pointer; + + u32 trace_buffer_fw_addr; + + /* To be used by host when reading from trace buffer */ + u32 *trace_buffer; + + struct rogue_fwif_file_info_buf assert_buf; +} __aligned(8); + +/* Total number of FW fault logs stored */ +#define ROGUE_FWIF_FWFAULTINFO_MAX (8U) + +struct rogue_fw_fault_info { + aligned_u64 cr_timer; + aligned_u64 os_timer; + + u32 data __aligned(8); + u32 reserved; + struct rogue_fwif_file_info_buf fault_buf; +} __aligned(8); + +enum rogue_fwif_pow_state { + ROGUE_FWIF_POW_OFF, /* idle and ready to full power down */ + ROGUE_FWIF_POW_ON, /* running HW commands */ + ROGUE_FWIF_POW_FORCED_IDLE, /* forced idle */ + ROGUE_FWIF_POW_IDLE, /* idle waiting for host handshake */ +}; + +/* Firmware HWR states */ +/* The HW state is ok or locked up */ +#define ROGUE_FWIF_HWR_HARDWARE_OK BIT(0) +/* Tells if a HWR reset is in progress */ +#define ROGUE_FWIF_HWR_RESET_IN_PROGRESS BIT(1) +/* A DM unrelated lockup has been detected */ +#define ROGUE_FWIF_HWR_GENERAL_LOCKUP BIT(3) +/* At least one DM is running without being close to a lockup */ +#define ROGUE_FWIF_HWR_DM_RUNNING_OK BIT(4) +/* At least one DM is close to lockup */ +#define ROGUE_FWIF_HWR_DM_STALLING BIT(5) +/* The FW has faulted and needs to restart */ +#define ROGUE_FWIF_HWR_FW_FAULT BIT(6) +/* The FW has requested the host to restart it */ +#define ROGUE_FWIF_HWR_RESTART_REQUESTED BIT(7) + +#define ROGUE_FWIF_PHR_STATE_SHIFT (8U) +/* The FW has requested the host to restart it, per PHR configuration */ +#define ROGUE_FWIF_PHR_RESTART_REQUESTED ((1) << ROGUE_FWIF_PHR_STATE_SHIFT) +/* A PHR triggered GPU reset has just finished */ +#define ROGUE_FWIF_PHR_RESTART_FINISHED ((2) << ROGUE_FWIF_PHR_STATE_SHIFT) +#define ROGUE_FWIF_PHR_RESTART_MASK \ + (ROGUE_FWIF_PHR_RESTART_REQUESTED | ROGUE_FWIF_PHR_RESTART_FINISHED) + +#define ROGUE_FWIF_PHR_MODE_OFF (0UL) +#define ROGUE_FWIF_PHR_MODE_RD_RESET (1UL) +#define ROGUE_FWIF_PHR_MODE_FULL_RESET (2UL) + +/* Firmware per-DM HWR states */ +/* DM is working if all flags are cleared */ +#define ROGUE_FWIF_DM_STATE_WORKING (0) +/* DM is idle and ready for HWR */ +#define ROGUE_FWIF_DM_STATE_READY_FOR_HWR BIT(0) +/* DM need to skip to next cmd before resuming processing */ +#define ROGUE_FWIF_DM_STATE_NEEDS_SKIP BIT(2) +/* DM need partial render cleanup before resuming processing */ +#define ROGUE_FWIF_DM_STATE_NEEDS_PR_CLEANUP BIT(3) +/* DM need to increment Recovery Count once fully recovered */ +#define ROGUE_FWIF_DM_STATE_NEEDS_TRACE_CLEAR BIT(4) +/* DM was identified as locking up and causing HWR */ +#define ROGUE_FWIF_DM_STATE_GUILTY_LOCKUP BIT(5) +/* DM was innocently affected by another lockup which caused HWR */ +#define ROGUE_FWIF_DM_STATE_INNOCENT_LOCKUP BIT(6) +/* DM was identified as over-running and causing HWR */ +#define ROGUE_FWIF_DM_STATE_GUILTY_OVERRUNING BIT(7) +/* DM was innocently affected by another DM over-running which caused HWR */ +#define ROGUE_FWIF_DM_STATE_INNOCENT_OVERRUNING BIT(8) +/* DM was forced into HWR as it delayed more important workloads */ +#define ROGUE_FWIF_DM_STATE_HARD_CONTEXT_SWITCH BIT(9) +/* DM was forced into HWR due to an uncorrected GPU ECC error */ +#define ROGUE_FWIF_DM_STATE_GPU_ECC_HWR BIT(10) + +/* Firmware's connection state */ +enum rogue_fwif_connection_fw_state { + /* Firmware is offline */ + ROGUE_FW_CONNECTION_FW_OFFLINE = 0, + /* Firmware is initialised */ + ROGUE_FW_CONNECTION_FW_READY, + /* Firmware connection is fully established */ + ROGUE_FW_CONNECTION_FW_ACTIVE, + /* Firmware is clearing up connection data*/ + ROGUE_FW_CONNECTION_FW_OFFLOADING, + ROGUE_FW_CONNECTION_FW_STATE_COUNT +}; + +/* OS' connection state */ +enum rogue_fwif_connection_os_state { + /* OS is offline */ + ROGUE_FW_CONNECTION_OS_OFFLINE = 0, + /* OS's KM driver is setup and waiting */ + ROGUE_FW_CONNECTION_OS_READY, + /* OS connection is fully established */ + ROGUE_FW_CONNECTION_OS_ACTIVE, + ROGUE_FW_CONNECTION_OS_STATE_COUNT +}; + +struct rogue_fwif_os_runtime_flags { + unsigned int os_state : 3; + unsigned int fl_ok : 1; + unsigned int fl_grow_pending : 1; + unsigned int isolated_os : 1; + unsigned int reserved : 26; +}; + +#define PVR_SLR_LOG_ENTRIES 10 +/* MAX_CLIENT_CCB_NAME not visible to this header */ +#define PVR_SLR_LOG_STRLEN 30 + +struct rogue_fwif_slr_entry { + aligned_u64 timestamp; + u32 fw_ctx_addr; + u32 num_ufos; + char ccb_name[PVR_SLR_LOG_STRLEN]; + char padding[2]; +} __aligned(8); + +#define MAX_THREAD_NUM 2 + +/* firmware trace control data */ +struct rogue_fwif_tracebuf { + u32 log_type; + struct rogue_fwif_tracebuf_space tracebuf[MAX_THREAD_NUM]; + /* + * Member initialised only when sTraceBuf is actually allocated (in + * ROGUETraceBufferInitOnDemandResources) + */ + u32 tracebuf_size_in_dwords; + /* Compatibility and other flags */ + u32 tracebuf_flags; +} __aligned(8); + +/* firmware system data shared with the Host driver */ +struct rogue_fwif_sysdata { + /* Configuration flags from host */ + u32 config_flags; + /* Extended configuration flags from host */ + u32 config_flags_ext; + enum rogue_fwif_pow_state pow_state; + u32 hw_perf_ridx; + u32 hw_perf_widx; + u32 hw_perf_wrap_count; + /* Constant after setup, needed in FW */ + u32 hw_perf_size; + /* The number of times the FW drops a packet due to buffer full */ + u32 hw_perf_drop_count; + + /* + * ui32HWPerfUt, ui32FirstDropOrdinal, ui32LastDropOrdinal only valid + * when FW is built with ROGUE_HWPERF_UTILIZATION & + * ROGUE_HWPERF_DROP_TRACKING defined in rogue_fw_hwperf.c + */ + /* Buffer utilisation, high watermark of bytes in use */ + u32 hw_perf_ut; + /* The ordinal of the first packet the FW dropped */ + u32 first_drop_ordinal; + /* The ordinal of the last packet the FW dropped */ + u32 last_drop_ordinal; + /* State flags for each Operating System mirrored from Fw coremem */ + struct rogue_fwif_os_runtime_flags + os_runtime_flags_mirror[ROGUE_FW_MAX_NUM_OS]; + + struct rogue_fw_fault_info fault_info[ROGUE_FWIF_FWFAULTINFO_MAX]; + u32 fw_faults; + u32 cr_poll_addr[MAX_THREAD_NUM]; + u32 cr_poll_mask[MAX_THREAD_NUM]; + u32 cr_poll_count[MAX_THREAD_NUM]; + aligned_u64 start_idle_time; + +#if defined(SUPPORT_ROGUE_FW_STATS_FRAMEWORK) +# define ROGUE_FWIF_STATS_FRAMEWORK_LINESIZE (8) +# define ROGUE_FWIF_STATS_FRAMEWORK_MAX \ + (2048 * ROGUE_FWIF_STATS_FRAMEWORK_LINESIZE) + u32 fw_stats_buf[ROGUE_FWIF_STATS_FRAMEWORK_MAX] __aligned(8); +#endif + u32 hwr_state_flags; + u32 hwr_recovery_flags[PVR_FWIF_DM_MAX]; + /* Compatibility and other flags */ + u32 fw_sys_data_flags; + /* Identify whether MC config is P-P or P-S */ + u32 mc_config; +} __aligned(8); + +/* per-os firmware shared data */ +struct rogue_fwif_osdata { + /* Configuration flags from an OS */ + u32 fw_os_config_flags; + /* Markers to signal that the host should perform a full sync check */ + u32 fw_sync_check_mark; + u32 host_sync_check_mark; + + u32 forced_updates_requested; + u8 slr_log_wp; + struct rogue_fwif_slr_entry slr_log_first; + struct rogue_fwif_slr_entry slr_log[PVR_SLR_LOG_ENTRIES]; + aligned_u64 last_forced_update_time; + + /* Interrupt count from Threads > */ + u32 interrupt_count[MAX_THREAD_NUM]; + u32 kccb_cmds_executed; + u32 power_sync_fw_addr; + /* Compatibility and other flags */ + u32 fw_os_data_flags; + u32 padding; +} __aligned(8); + +/* Firmware trace time-stamp field breakup */ + +/* ROGUE_CR_TIMER register read (48 bits) value*/ +#define ROGUE_FWT_TIMESTAMP_TIME_SHIFT (0U) +#define ROGUE_FWT_TIMESTAMP_TIME_CLRMSK (0xFFFF000000000000ull) + +/* Extra debug-info (16 bits) */ +#define ROGUE_FWT_TIMESTAMP_DEBUG_INFO_SHIFT (48U) +#define ROGUE_FWT_TIMESTAMP_DEBUG_INFO_CLRMSK ~ROGUE_FWT_TIMESTAMP_TIME_CLRMSK + +/* Debug-info sub-fields */ +/* + * Bit 0: ROGUE_CR_EVENT_STATUS_MMU_PAGE_FAULT bit from ROGUE_CR_EVENT_STATUS + * register + */ +#define ROGUE_FWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT (0U) +#define ROGUE_FWT_DEBUG_INFO_MMU_PAGE_FAULT_SET \ + BIT(ROGUE_FWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT) + +/* Bit 1: ROGUE_CR_BIF_MMU_ENTRY_PENDING bit from ROGUE_CR_BIF_MMU_ENTRY register */ +#define ROGUE_FWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT (1U) +#define ROGUE_FWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET \ + BIT(ROGUE_FWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT) + +/* Bit 2: ROGUE_CR_SLAVE_EVENT register is non-zero */ +#define ROGUE_FWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT (2U) +#define ROGUE_FWT_DEBUG_INFO_SLAVE_EVENTS_SET \ + BIT(ROGUE_FWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT) + +/* Bit 3-15: Unused bits */ + +#define ROGUE_FWT_DEBUG_INFO_STR_MAXLEN 64 +#define ROGUE_FWT_DEBUG_INFO_STR_PREPEND " (debug info: " +#define ROGUE_FWT_DEBUG_INFO_STR_APPEND ")" + +/* + ****************************************************************************** + * HWR Data + ****************************************************************************** + */ +enum rogue_hwrtype { + ROGUE_HWRTYPE_UNKNOWNFAILURE = 0, + ROGUE_HWRTYPE_OVERRUN = 1, + ROGUE_HWRTYPE_POLLFAILURE = 2, + ROGUE_HWRTYPE_BIF0FAULT = 3, + ROGUE_HWRTYPE_BIF1FAULT = 4, + ROGUE_HWRTYPE_TEXASBIF0FAULT = 5, + ROGUE_HWRTYPE_MMUFAULT = 6, + ROGUE_HWRTYPE_MMUMETAFAULT = 7, + ROGUE_HWRTYPE_MIPSTLBFAULT = 8, + ROGUE_HWRTYPE_ECCFAULT = 9, + ROGUE_HWRTYPE_MMURISCVFAULT = 10, +}; + +#define ROGUE_FWIF_HWRTYPE_BIF_BANK_GET(hwr_type) \ + (((hwr_type) == ROGUE_HWRTYPE_BIF0FAULT) ? 0 : 1) + +#define ROGUE_FWIF_HWRTYPE_PAGE_FAULT_GET(hwr_type) \ + ((((hwr_type) == ROGUE_HWRTYPE_BIF0FAULT) || \ + ((hwr_type) == ROGUE_HWRTYPE_BIF1FAULT) || \ + ((hwr_type) == ROGUE_HWRTYPE_TEXASBIF0FAULT) || \ + ((hwr_type) == ROGUE_HWRTYPE_MMUFAULT) || \ + ((hwr_type) == ROGUE_HWRTYPE_MMUMETAFAULT) || \ + ((hwr_type) == ROGUE_HWRTYPE_MIPSTLBFAULT) || \ + ((hwr_type) == ROGUE_HWRTYPE_MMURISCVFAULT)) \ + ? true \ + : false) + +struct rogue_bifinfo { + aligned_u64 bif_req_status; + aligned_u64 bif_mmu_status; + aligned_u64 pc_address; /* phys address of the page catalogue */ + aligned_u64 reserved; +}; + +struct rogue_eccinfo { + u32 fault_gpu; +}; + +struct rogue_mmuinfo { + aligned_u64 mmu_status[2]; + aligned_u64 pc_address; /* phys address of the page catalogue */ + aligned_u64 reserved; +}; + +struct rogue_pollinfo { + u32 thread_num; + u32 cr_poll_addr; + u32 cr_poll_mask; + u32 cr_poll_last_value; + aligned_u64 reserved; +} __aligned(8); + +struct rogue_tlbinfo { + u32 bad_addr; + u32 entry_lo; +}; + +struct rogue_hwrinfo { + union { + struct rogue_bifinfo bif_info; + struct rogue_mmuinfo mmu_info; + struct rogue_pollinfo poll_info; + struct rogue_tlbinfo tlb_info; + struct rogue_eccinfo ecc_info; + } hwr_data; + + aligned_u64 cr_timer; + aligned_u64 os_timer; + u32 frame_num; + u32 pid; + u32 active_hwrt_data; + u32 hwr_number; + u32 event_status; + u32 hwr_recovery_flags; + enum rogue_hwrtype hwr_type; + u32 dm; + u32 core_id; + aligned_u64 cr_time_of_kick; + aligned_u64 cr_time_hw_reset_start; + aligned_u64 cr_time_hw_reset_finish; + aligned_u64 cr_time_freelist_ready; + aligned_u64 reserved[2]; +} __aligned(8); + +/* Number of first HWR logs recorded (never overwritten by newer logs) */ +#define ROGUE_FWIF_HWINFO_MAX_FIRST 8U +/* Number of latest HWR logs (older logs are overwritten by newer logs) */ +#define ROGUE_FWIF_HWINFO_MAX_LAST 8U +/* Total number of HWR logs stored in a buffer */ +#define ROGUE_FWIF_HWINFO_MAX \ + (ROGUE_FWIF_HWINFO_MAX_FIRST + ROGUE_FWIF_HWINFO_MAX_LAST) +/* Index of the last log in the HWR log buffer */ +#define ROGUE_FWIF_HWINFO_LAST_INDEX (ROGUE_FWIF_HWINFO_MAX - 1U) + +struct rogue_fwif_hwrinfobuf { + struct rogue_hwrinfo hwr_info[ROGUE_FWIF_HWINFO_MAX]; + u32 hwr_counter; + u32 write_index; + u32 dd_req_count; + u32 hwr_info_buf_flags; /* Compatibility and other flags */ + u32 hwr_dm_locked_up_count[PVR_FWIF_DM_MAX]; + u32 hwr_dm_overran_count[PVR_FWIF_DM_MAX]; + u32 hwr_dm_recovered_count[PVR_FWIF_DM_MAX]; + u32 hwr_dm_false_detect_count[PVR_FWIF_DM_MAX]; +} __aligned(8); + +#define ROGUE_FWIF_CTXSWITCH_PROFILE_FAST_EN (1) +#define ROGUE_FWIF_CTXSWITCH_PROFILE_MEDIUM_EN (2) +#define ROGUE_FWIF_CTXSWITCH_PROFILE_SLOW_EN (3) +#define ROGUE_FWIF_CTXSWITCH_PROFILE_NODELAY_EN (4) + +#define ROGUE_FWIF_CDM_ARBITRATION_TASK_DEMAND_EN (1) +#define ROGUE_FWIF_CDM_ARBITRATION_ROUND_ROBIN_EN (2) + +#define ROGUE_FWIF_ISP_SCHEDMODE_VER1_IPP (1) +#define ROGUE_FWIF_ISP_SCHEDMODE_VER2_ISP (2) +/* + ****************************************************************************** + * ROGUE firmware Init Config Data + ****************************************************************************** + */ + +/* Flag definitions affecting the firmware globally */ +#define ROGUE_FWIF_INICFG_CTXSWITCH_MODE_RAND BIT(0) +#define ROGUE_FWIF_INICFG_CTXSWITCH_SRESET_EN BIT(1) +#define ROGUE_FWIF_INICFG_HWPERF_EN BIT(2) +#define ROGUE_FWIF_INICFG_DM_KILL_MODE_RAND_EN BIT(3) +#define ROGUE_FWIF_INICFG_POW_RASCALDUST BIT(4) +/* Bit 5 is reserved. */ +#define ROGUE_FWIF_INICFG_FBCDC_V3_1_EN BIT(6) +#define ROGUE_FWIF_INICFG_CHECK_MLIST_EN BIT(7) +#define ROGUE_FWIF_INICFG_DISABLE_CLKGATING_EN BIT(8) +/* Bit 9 is reserved. */ +/* Bit 10 is reserved. */ +/* Bit 11 is reserved. */ +#define ROGUE_FWIF_INICFG_REGCONFIG_EN BIT(12) +#define ROGUE_FWIF_INICFG_ASSERT_ON_OUTOFMEMORY BIT(13) +#define ROGUE_FWIF_INICFG_HWP_DISABLE_FILTER BIT(14) +/* Bit 15 is reserved. */ +#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT (16) +#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_FAST \ + (ROGUE_FWIF_CTXSWITCH_PROFILE_FAST_EN \ + << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM \ + (ROGUE_FWIF_CTXSWITCH_PROFILE_MEDIUM_EN \ + << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SLOW \ + (ROGUE_FWIF_CTXSWITCH_PROFILE_SLOW_EN \ + << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_NODELAY \ + (ROGUE_FWIF_CTXSWITCH_PROFILE_NODELAY_EN \ + << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_MASK \ + (7 << ROGUE_FWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) +#define ROGUE_FWIF_INICFG_DISABLE_DM_OVERLAP BIT(19) +#define ROGUE_FWIF_INICFG_ASSERT_ON_HWR_TRIGGER BIT(20) +#define ROGUE_FWIF_INICFG_FABRIC_COHERENCY_ENABLED BIT(21) +#define ROGUE_FWIF_INICFG_VALIDATE_IRQ BIT(22) +#define ROGUE_FWIF_INICFG_DISABLE_PDP_EN BIT(23) +#define ROGUE_FWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN BIT(24) +#define ROGUE_FWIF_INICFG_WORKEST BIT(25) +#define ROGUE_FWIF_INICFG_PDVFS BIT(26) +#define ROGUE_FWIF_INICFG_CDM_ARBITRATION_SHIFT (27) +#define ROGUE_FWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND \ + (ROGUE_FWIF_CDM_ARBITRATION_TASK_DEMAND_EN \ + << ROGUE_FWIF_INICFG_CDM_ARBITRATION_SHIFT) +#define ROGUE_FWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN \ + (ROGUE_FWIF_CDM_ARBITRATION_ROUND_ROBIN_EN \ + << ROGUE_FWIF_INICFG_CDM_ARBITRATION_SHIFT) +#define ROGUE_FWIF_INICFG_CDM_ARBITRATION_MASK \ + (3 << ROGUE_FWIF_INICFG_CDM_ARBITRATION_SHIFT) +#define ROGUE_FWIF_INICFG_ISPSCHEDMODE_SHIFT (29) +#define ROGUE_FWIF_INICFG_ISPSCHEDMODE_NONE (0) +#define ROGUE_FWIF_INICFG_ISPSCHEDMODE_VER1_IPP \ + (ROGUE_FWIF_ISP_SCHEDMODE_VER1_IPP \ + << ROGUE_FWIF_INICFG_ISPSCHEDMODE_SHIFT) +#define ROGUE_FWIF_INICFG_ISPSCHEDMODE_VER2_ISP \ + (ROGUE_FWIF_ISP_SCHEDMODE_VER2_ISP \ + << ROGUE_FWIF_INICFG_ISPSCHEDMODE_SHIFT) +#define ROGUE_FWIF_INICFG_ISPSCHEDMODE_MASK \ + (ROGUE_FWIF_INICFG_ISPSCHEDMODE_VER1_IPP | \ + ROGUE_FWIF_INICFG_ISPSCHEDMODE_VER2_ISP) +#define ROGUE_FWIF_INICFG_VALIDATE_SOCUSC_TIMER BIT(31) + +#define ROGUE_FWIF_INICFG_ALL (0xFFFFFFFFU) + +/* Extended Flag definitions affecting the firmware globally */ +#define ROGUE_FWIF_INICFG_EXT_TFBC_CONTROL_SHIFT (0) +/* [7] YUV10 override + * [6:4] Quality + * [3] Quality enable + * [2:1] Compression scheme + * [0] Lossy group + */ +#define ROGUE_FWIF_INICFG_EXT_TFBC_CONTROL_MASK (0xFF) +#define ROGUE_FWIF_INICFG_EXT_ALL (ROGUE_FWIF_INICFG_EXT_TFBC_CONTROL_MASK) + +/* Flag definitions affecting only workloads submitted by a particular OS */ +#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_TDM_EN BIT(0) +#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_GEOM_EN BIT(1) +#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_FRAG_EN BIT(2) +#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_CDM_EN BIT(3) + +#define ROGUE_FWIF_INICFG_OS_LOW_PRIO_CS_TDM BIT(4) +#define ROGUE_FWIF_INICFG_OS_LOW_PRIO_CS_GEOM BIT(5) +#define ROGUE_FWIF_INICFG_OS_LOW_PRIO_CS_FRAG BIT(6) +#define ROGUE_FWIF_INICFG_OS_LOW_PRIO_CS_CDM BIT(7) + +#define ROGUE_FWIF_INICFG_OS_ALL (0xFF) + +#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_DM_ALL \ + (ROGUE_FWIF_INICFG_OS_CTXSWITCH_TDM_EN | \ + ROGUE_FWIF_INICFG_OS_CTXSWITCH_GEOM_EN | \ + ROGUE_FWIF_INICFG_OS_CTXSWITCH_FRAG_EN | \ + ROGUE_FWIF_INICFG_OS_CTXSWITCH_CDM_EN) + +#define ROGUE_FWIF_INICFG_OS_CTXSWITCH_CLRMSK \ + ~(ROGUE_FWIF_INICFG_OS_CTXSWITCH_DM_ALL) + +#define ROGUE_FWIF_FILTCFG_TRUNCATE_HALF BIT(3) +#define ROGUE_FWIF_FILTCFG_TRUNCATE_INT BIT(2) +#define ROGUE_FWIF_FILTCFG_NEW_FILTER_MODE BIT(1) + +enum rogue_activepm_conf { + ROGUE_ACTIVEPM_FORCE_OFF = 0, + ROGUE_ACTIVEPM_FORCE_ON = 1, + ROGUE_ACTIVEPM_DEFAULT = 2 +}; + +enum rogue_rd_power_island_conf { + ROGUE_RD_POWER_ISLAND_FORCE_OFF = 0, + ROGUE_RD_POWER_ISLAND_FORCE_ON = 1, + ROGUE_RD_POWER_ISLAND_DEFAULT = 2 +}; + +struct rogue_fw_register_list { + /* Register number */ + u16 reg_num; + /* Indirect register number (or 0 if not used) */ + u16 indirect_reg_num; + /* Start value for indirect register */ + u16 indirect_start_val; + /* End value for indirect register */ + u16 indirect_end_val; +}; + +struct rogue_fwif_dllist_node { + u32 p; + u32 n; +}; + +/* + * This number is used to represent an invalid page catalogue physical address + */ +#define ROGUE_FWIF_INVALID_PC_PHYADDR 0xFFFFFFFFFFFFFFFFLLU + +/* This number is used to represent unallocated page catalog base register */ +#define ROGUE_FW_BIF_INVALID_PCSET 0xFFFFFFFFU + +/* Firmware memory context. */ +struct rogue_fwif_fwmemcontext { + /* device physical address of context's page catalogue */ + aligned_u64 pc_dev_paddr; + /* + * associated page catalog base register (ROGUE_FW_BIF_INVALID_PCSET == + * unallocated) + */ + u32 page_cat_base_reg_set; + /* breakpoint address */ + u32 breakpoint_addr; + /* breakpoint handler address */ + u32 bp_handler_addr; + /* DM and enable control for BP */ + u32 breakpoint_ctl; + /* Compatibility and other flags */ + u32 fw_mem_ctx_flags; + u32 padding; +} __aligned(8); + +/* + * FW context state flags + */ +#define ROGUE_FWIF_CONTEXT_FLAGS_NEED_RESUME (0x00000001U) +#define ROGUE_FWIF_CONTEXT_FLAGS_MC_NEED_RESUME_MASKFULL (0x000000FFU) +#define ROGUE_FWIF_CONTEXT_FLAGS_TDM_HEADER_STALE (0x00000100U) +#define ROGUE_FWIF_CONTEXT_FLAGS_LAST_KICK_SECURE (0x00000200U) + +#define ROGUE_NUM_GEOM_CORES_MAX 4 + +/* + * FW-accessible TA state which must be written out to memory on context store + */ +struct rogue_fwif_geom_ctx_state_per_geom { + /* To store in mid-TA */ + aligned_u64 geom_reg_vdm_call_stack_pointer; + /* Initial value (in case is 'lost' due to a lock-up */ + aligned_u64 geom_reg_vdm_call_stack_pointer_init; + u32 geom_reg_vbs_so_prim[4]; + u16 geom_current_idx; + u16 padding[3]; +} __aligned(8); + +struct rogue_fwif_geom_ctx_state { + /* FW-accessible TA state which must be written out to memory on context store */ + struct rogue_fwif_geom_ctx_state_per_geom geom_core[ROGUE_NUM_GEOM_CORES_MAX]; +} __aligned(8); + +/* + * FW-accessible ISP state which must be written out to memory on context store + */ +struct rogue_fwif_frag_ctx_state { + u32 frag_reg_pm_deallocated_mask_status; + u32 frag_reg_dm_pds_mtilefree_status; + /* Compatibility and other flags */ + u32 ctx_state_flags; + /* + * frag_reg_isp_store should be the last element of the structure as this + * is an array whose size is determined at runtime after detecting the + * ROGUE core + */ + u32 frag_reg_isp_store[]; +} __aligned(8); + +#define ROGUE_FWIF_CTX_USING_BUFFER_A (0) +#define ROGUE_FWIF_CTX_USING_BUFFER_B (1U) + +struct rogue_fwif_compute_ctx_state { + u32 ctx_state_flags; /* Target buffer and other flags */ +}; + +struct rogue_fwif_fwcommoncontext { + /* CCB details for this firmware context */ + u32 ccbctl_fw_addr; /* CCB control */ + u32 ccb_fw_addr; /* CCB base */ + struct rogue_fwif_dma_addr ccb_meta_dma_addr; + + /* Context suspend state */ + /* geom/frag context suspend state, read/written by FW */ + u32 context_state_addr __aligned(8); + + /* Flags e.g. for context switching */ + u32 fw_com_ctx_flags; + u32 priority; + u32 priority_seq_num; + + /* Framework state */ + /* Register updates for Framework */ + u32 rf_cmd_addr __aligned(8); + + /* Statistic updates waiting to be passed back to the host... */ + /* True when some stats are pending */ + bool stats_pending __aligned(4); + /* Number of stores on this context since last update */ + s32 stats_num_stores; + /* Number of OOMs on this context since last update */ + s32 stats_num_out_of_memory; + /* Number of PRs on this context since last update */ + s32 stats_num_partial_renders; + /* Data Master type */ + u32 dm; + /* Device Virtual Address of the signal the context is waiting on */ + aligned_u64 wait_signal_address; + /* List entry for the wait-signal list */ + struct rogue_fwif_dllist_node wait_signal_node __aligned(8); + /* List entry for the buffer stalled list */ + struct rogue_fwif_dllist_node buf_stalled_node __aligned(8); + /* Address of the circular buffer queue pointers */ + aligned_u64 cbuf_queue_ctrl_addr; + + aligned_u64 robustness_address; + /* Max HWR deadline limit in ms */ + u32 max_deadline_ms; + /* Following HWR circular buffer read-offset needs resetting */ + bool read_offset_needs_reset; + + /* List entry for the waiting list */ + struct rogue_fwif_dllist_node waiting_node __aligned(8); + /* List entry for the run list */ + struct rogue_fwif_dllist_node run_node __aligned(8); + /* UFO that last failed (or NULL) */ + struct rogue_fwif_ufo last_failed_ufo; + + /* Memory context */ + u32 fw_mem_context_fw_addr; + + /* References to the host side originators */ + /* the Server Common Context */ + u32 server_common_context_id; + /* associated process ID */ + u32 pid; + + /* True when Geom DM OOM is not allowed */ + bool geom_oom_disabled __aligned(4); +} __aligned(8); + +/* Firmware render context. */ +struct rogue_fwif_fwrendercontext { + /* Geometry firmware context. */ + struct rogue_fwif_fwcommoncontext geom_context; + /* Fragment firmware context. */ + struct rogue_fwif_fwcommoncontext frag_context; + + struct rogue_fwif_static_rendercontext_state static_render_context_state; + + /* Number of commands submitted to the WorkEst FW CCB */ + u32 work_est_ccb_submitted; + + /* Compatibility and other flags */ + u32 fw_render_ctx_flags; +} __aligned(8); + +/* Firmware compute context. */ +struct rogue_fwif_fwcomputecontext { + /* Firmware context for the CDM */ + struct rogue_fwif_fwcommoncontext cdm_context; + + struct rogue_fwif_static_computecontext_state + static_compute_context_state; + + /* Number of commands submitted to the WorkEst FW CCB */ + u32 work_est_ccb_submitted; + + /* Compatibility and other flags */ + u32 compute_ctx_flags; + + u32 wgp_state; + u32 wgp_checksum; + u32 core_mask_a; + u32 core_mask_b; +} __aligned(8); + +/* Firmware TDM context. */ +struct rogue_fwif_fwtdmcontext { + /* Firmware context for the TDM */ + struct rogue_fwif_fwcommoncontext tdm_context; + + /* Number of commands submitted to the WorkEst FW CCB */ + u32 work_est_ccb_submitted; +} __aligned(8); + +/* Firmware TQ3D context. */ +struct rogue_fwif_fwtransfercontext { + /* Firmware context for TQ3D. */ + struct rogue_fwif_fwcommoncontext tq_context; +} __aligned(8); + +/* + ****************************************************************************** + * Defines for CMD_TYPE corruption detection and forward compatibility check + ****************************************************************************** + */ + +/* + * CMD_TYPE 32bit contains: + * 31:16 Reserved for magic value to detect corruption (16 bits) + * 15 Reserved for ROGUE_CCB_TYPE_TASK (1 bit) + * 14:0 Bits available for CMD_TYPEs (15 bits) + */ + +/* Magic value to detect corruption */ +#define ROGUE_CMD_MAGIC_DWORD (0x2ABC) +#define ROGUE_CMD_MAGIC_DWORD_MASK (0xFFFF0000U) +#define ROGUE_CMD_MAGIC_DWORD_SHIFT (16U) +#define ROGUE_CMD_MAGIC_DWORD_SHIFTED \ + (ROGUE_CMD_MAGIC_DWORD << ROGUE_CMD_MAGIC_DWORD_SHIFT) + +/* Kernel CCB control for ROGUE */ +struct rogue_fwif_ccb_ctl { + /* write offset into array of commands (MUST be aligned to 16 bytes!) */ + u32 write_offset; + /* Padding to ensure read and write offsets are in separate cache lines. */ + u8 padding[128 - sizeof(u32)]; + /* read offset into array of commands */ + u32 read_offset; + /* Offset wrapping mask (Total capacity of the CCB - 1) */ + u32 wrap_mask; + /* size of each command in bytes */ + u32 cmd_size; + u32 padding2; +} __aligned(8); + +/* Kernel CCB command structure for ROGUE */ + +#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_PT (0x1U) /* MMU_CTRL_INVAL_PT_EN */ +#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_PD (0x2U) /* MMU_CTRL_INVAL_PD_EN */ +#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_PC (0x4U) /* MMU_CTRL_INVAL_PC_EN */ + +/* + * can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT + * bit from BIF_CTRL reg + */ +#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10) +/* BIF_CTRL_INVAL_TLB1_EN */ +#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_TLB \ + (ROGUE_FWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8) +/* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */ +#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) + +/* indicates FW should interrupt the host */ +#define ROGUE_FWIF_MMUCACHEDATA_FLAGS_INTERRUPT (0x4000000U) + +struct rogue_fwif_mmucachedata { + u32 cache_flags; + u32 mmu_cache_sync_fw_addr; + u32 mmu_cache_sync_update_value; +}; + +#define ROGUE_FWIF_BPDATA_FLAGS_ENABLE BIT(0) +#define ROGUE_FWIF_BPDATA_FLAGS_WRITE BIT(1) +#define ROGUE_FWIF_BPDATA_FLAGS_CTL BIT(2) +#define ROGUE_FWIF_BPDATA_FLAGS_REGS BIT(3) + +struct rogue_fwif_bpdata { + /* Memory context */ + u32 fw_mem_context_fw_addr; + /* Breakpoint address */ + u32 bp_addr; + /* Breakpoint handler */ + u32 bp_handler_addr; + /* Breakpoint control */ + u32 bp_dm; + u32 bp_data_flags; + /* Number of temporary registers to overallocate */ + u32 temp_regs; + /* Number of shared registers to overallocate */ + u32 shared_regs; + /* DM associated with the breakpoint */ + u32 dm; +}; + +#define ROGUE_FWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS \ + (ROGUE_FWIF_PRBUFFER_MAXSUPPORTED + 1U) /* +1 is RTDATASET cleanup */ + +struct rogue_fwif_kccb_cmd_kick_data { + /* address of the firmware context */ + u32 context_fw_addr; + /* Client CCB woff update */ + u32 client_woff_update; + /* Client CCB wrap mask update after CCCB growth */ + u32 client_wrap_mask_update; + /* number of CleanupCtl pointers attached */ + u32 num_cleanup_ctl; + /* CleanupCtl structures associated with command */ + u32 cleanup_ctl_fw_addr + [ROGUE_FWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS]; + /* + * offset to the CmdHeader which houses the workload estimation kick + * data. + */ + u32 work_est_cmd_header_offset; +}; + +struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data { + struct rogue_fwif_kccb_cmd_kick_data geom_cmd_kick_data; + struct rogue_fwif_kccb_cmd_kick_data frag_cmd_kick_data; +}; + +struct rogue_fwif_kccb_cmd_force_update_data { + /* address of the firmware context */ + u32 context_fw_addr; + /* Client CCB fence offset */ + u32 ccb_fence_offset; +}; + +enum rogue_fwif_cleanup_type { + /* FW common context cleanup */ + ROGUE_FWIF_CLEANUP_FWCOMMONCONTEXT, + /* FW HW RT data cleanup */ + ROGUE_FWIF_CLEANUP_HWRTDATA, + /* FW freelist cleanup */ + ROGUE_FWIF_CLEANUP_FREELIST, + /* FW ZS Buffer cleanup */ + ROGUE_FWIF_CLEANUP_ZSBUFFER, +}; + +struct rogue_fwif_cleanup_request { + /* Cleanup type */ + enum rogue_fwif_cleanup_type cleanup_type; + union { + /* FW common context to cleanup */ + u32 context_fw_addr; + /* HW RT to cleanup */ + u32 hwrt_data_fw_addr; + /* Freelist to cleanup */ + u32 freelist_fw_addr; + /* ZS Buffer to cleanup */ + u32 zs_buffer_fw_addr; + } cleanup_data; +}; + +enum rogue_fwif_power_type { + ROGUE_FWIF_POW_OFF_REQ = 1, + ROGUE_FWIF_POW_FORCED_IDLE_REQ, + ROGUE_FWIF_POW_NUM_UNITS_CHANGE, + ROGUE_FWIF_POW_APM_LATENCY_CHANGE +}; + +enum rogue_fwif_power_force_idle_type { + ROGUE_FWIF_POWER_FORCE_IDLE = 1, + ROGUE_FWIF_POWER_CANCEL_FORCED_IDLE, + ROGUE_FWIF_POWER_HOST_TIMEOUT, +}; + +struct rogue_fwif_power_request { + /* Type of power request */ + enum rogue_fwif_power_type pow_type; + union { + /* Number of active Dusts */ + u32 num_of_dusts; + /* If the operation is mandatory */ + bool forced __aligned(4); + /* + * Type of Request. Consolidating Force Idle, Cancel Forced + * Idle, Host Timeout + */ + enum rogue_fwif_power_force_idle_type pow_request_type; + } power_req_data; +}; + +struct rogue_fwif_slcflushinvaldata { + /* Context to fence on (only useful when bDMContext == TRUE) */ + u32 context_fw_addr; + /* Invalidate the cache as well as flushing */ + bool inval __aligned(4); + /* The data to flush/invalidate belongs to a specific DM context */ + bool dm_context __aligned(4); + /* Optional address of range (only useful when bDMContext == FALSE) */ + aligned_u64 address; + /* Optional size of range (only useful when bDMContext == FALSE) */ + aligned_u64 size; +}; + +enum rogue_fwif_hwperf_update_config { + ROGUE_FWIF_HWPERF_CTRL_TOGGLE = 0, + ROGUE_FWIF_HWPERF_CTRL_SET = 1, + ROGUE_FWIF_HWPERF_CTRL_EMIT_FEATURES_EV = 2 +}; + +struct rogue_fwif_hwperf_ctrl { + enum rogue_fwif_hwperf_update_config opcode; /* Control operation code */ + aligned_u64 mask; /* Mask of events to toggle */ +}; + +struct rogue_fwif_hwperf_config_enable_blks { + /* Number of ROGUE_HWPERF_CONFIG_MUX_CNTBLK in the array */ + u32 num_blocks; + /* Address of the ROGUE_HWPERF_CONFIG_MUX_CNTBLK array */ + u32 block_configs_fw_addr; +}; + +struct rogue_fwif_hwperf_config_da_blks { + /* Number of ROGUE_HWPERF_CONFIG_CNTBLK in the array */ + u32 num_blocks; + /* Address of the ROGUE_HWPERF_CONFIG_CNTBLK array */ + u32 block_configs_fw_addr; +}; + +struct rogue_fwif_coreclkspeedchange_data { + u32 new_clock_speed; /* New clock speed */ +}; + +#define ROGUE_FWIF_HWPERF_CTRL_BLKS_MAX 16 + +struct rogue_fwif_hwperf_ctrl_blks { + bool enable; + /* Number of block IDs in the array */ + u32 num_blocks; + /* Array of ROGUE_HWPERF_CNTBLK_ID values */ + u16 block_ids[ROGUE_FWIF_HWPERF_CTRL_BLKS_MAX]; +}; + +struct rogue_fwif_hwperf_select_custom_cntrs { + u16 custom_block; + u16 num_counters; + u32 custom_counter_ids_fw_addr; +}; + +struct rogue_fwif_zsbuffer_backing_data { + u32 zs_buffer_fw_addr; /* ZS-Buffer FW address */ + + bool done __aligned(4); /* action backing/unbacking succeeded */ +}; + +struct rogue_fwif_freelist_gs_data { + /* Freelist FW address */ + u32 freelist_fw_addr; + /* Amount of the Freelist change */ + u32 delta_pages; + /* New amount of pages on the freelist (including ready pages) */ + u32 new_pages; + /* Number of ready pages to be held in reserve until OOM */ + u32 ready_pages; +}; + +#define MAX_FREELISTS_SIZE 3 +#define MAX_HW_GEOM_FRAG_CONTEXTS_SIZE 3 + +#define ROGUE_FWIF_MAX_FREELISTS_TO_RECONSTRUCT \ + (MAX_HW_GEOM_FRAG_CONTEXTS_SIZE * MAX_FREELISTS_SIZE * 2U) +#define ROGUE_FWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG 0x80000000U + +struct rogue_fwif_freelists_reconstruction_data { + u32 freelist_count; + u32 freelist_ids[ROGUE_FWIF_MAX_FREELISTS_TO_RECONSTRUCT]; +}; + +struct rogue_fwif_write_offset_update_data { + /* + * Context to that may need to be resumed following write offset update + */ + u32 context_fw_addr; +} __aligned(8); + +/* + ****************************************************************************** + * Proactive DVFS Structures + ****************************************************************************** + */ +#define NUM_OPP_VALUES 16 + +struct pdvfs_opp { + u32 volt; /* V */ + u32 freq; /* Hz */ +} __aligned(8); + +struct rogue_fwif_pdvfs_opp { + struct pdvfs_opp opp_values[NUM_OPP_VALUES]; + u32 min_opp_point; + u32 max_opp_point; +} __aligned(8); + +struct rogue_fwif_pdvfs_max_freq_data { + u32 max_opp_point; +} __aligned(8); + +struct rogue_fwif_pdvfs_min_freq_data { + u32 min_opp_point; +} __aligned(8); + +/* + ****************************************************************************** + * Register configuration structures + ****************************************************************************** + */ + +#define ROGUE_FWIF_REG_CFG_MAX_SIZE 512 + +enum rogue_fwif_regdata_cmd_type { + ROGUE_FWIF_REGCFG_CMD_ADD = 101, + ROGUE_FWIF_REGCFG_CMD_CLEAR = 102, + ROGUE_FWIF_REGCFG_CMD_ENABLE = 103, + ROGUE_FWIF_REGCFG_CMD_DISABLE = 104 +}; + +enum rogue_fwif_reg_cfg_type { + /* Sidekick power event */ + ROGUE_FWIF_REG_CFG_TYPE_PWR_ON = 0, + /* Rascal / dust power event */ + ROGUE_FWIF_REG_CFG_TYPE_DUST_CHANGE, + /* Geometry kick */ + ROGUE_FWIF_REG_CFG_TYPE_GEOM, + /* Fragment kick */ + ROGUE_FWIF_REG_CFG_TYPE_FRAG, + /* Compute kick */ + ROGUE_FWIF_REG_CFG_TYPE_CDM, + /* TLA kick */ + ROGUE_FWIF_REG_CFG_TYPE_TLA, + /* TDM kick */ + ROGUE_FWIF_REG_CFG_TYPE_TDM, + /* Applies to all types. Keep as last element */ + ROGUE_FWIF_REG_CFG_TYPE_ALL +}; + +struct rogue_fwif_reg_cfg_rec { + u64 sddr; + u64 mask; + u64 value; +}; + +struct rogue_fwif_regconfig_data { + enum rogue_fwif_regdata_cmd_type cmd_type; + enum rogue_fwif_reg_cfg_type reg_config_type; + struct rogue_fwif_reg_cfg_rec reg_config __aligned(8); +}; + +struct rogue_fwif_reg_cfg { + /* + * PDump WRW command write granularity is 32 bits. + * Add padding to ensure array size is 32 bit granular. + */ + u8 num_regs_type[ALIGN((u32)ROGUE_FWIF_REG_CFG_TYPE_ALL, + sizeof(u32))] __aligned(8); + struct rogue_fwif_reg_cfg_rec + reg_configs[ROGUE_FWIF_REG_CFG_MAX_SIZE] __aligned(8); +} __aligned(8); + +enum rogue_fwif_os_state_change { + ROGUE_FWIF_OS_ONLINE = 1, + ROGUE_FWIF_OS_OFFLINE +}; + +struct rogue_fwif_os_state_change_data { + u32 osid; + enum rogue_fwif_os_state_change new_os_state; +} __aligned(8); + +enum rogue_fwif_counter_dump_request { + ROGUE_FWIF_PWR_COUNTER_DUMP_START = 1, + ROGUE_FWIF_PWR_COUNTER_DUMP_STOP, + ROGUE_FWIF_PWR_COUNTER_DUMP_SAMPLE, +}; + +struct rogue_fwif_counter_dump_data { + enum rogue_fwif_counter_dump_request counter_dump_request; +} __aligned(8); + +enum rogue_fwif_kccb_cmd_type { + /* Common commands */ + ROGUE_FWIF_KCCB_CMD_KICK = 101U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + ROGUE_FWIF_KCCB_CMD_MMUCACHE = 102U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + ROGUE_FWIF_KCCB_CMD_BP = 103U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* SLC flush and invalidation request */ + ROGUE_FWIF_KCCB_CMD_SLCFLUSHINVAL = 105U | + ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* + * Requests cleanup of a FW resource (type specified in the command + * data) + */ + ROGUE_FWIF_KCCB_CMD_CLEANUP = 106U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* Power request */ + ROGUE_FWIF_KCCB_CMD_POW = 107U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* Backing for on-demand ZS-Buffer done */ + ROGUE_FWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE = + 108U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* Unbacking for on-demand ZS-Buffer done */ + ROGUE_FWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE = + 109U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* Freelist Grow done */ + ROGUE_FWIF_KCCB_CMD_FREELIST_GROW_UPDATE = + 110U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* Freelists Reconstruction done */ + ROGUE_FWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE = + 112U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* + * Informs the firmware that the host has added more data to a CDM2 + * Circular Buffer + */ + ROGUE_FWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE = + 114U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* Health check request */ + ROGUE_FWIF_KCCB_CMD_HEALTH_CHECK = 115U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* Forcing signalling of all unmet UFOs for a given CCB offset */ + ROGUE_FWIF_KCCB_CMD_FORCE_UPDATE = 116U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + + /* There is a geometry and a fragment command in this single kick */ + ROGUE_FWIF_KCCB_CMD_COMBINED_GEOM_FRAG_KICK = 117U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* Informs the FW that a Guest OS has come online / offline. */ + ROGUE_FWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE = 118U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + + /* Commands only permitted to the native or host OS */ + ROGUE_FWIF_KCCB_CMD_REGCONFIG = 200U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + + /* Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */ + ROGUE_FWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG = 201U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + + /* Enable or disable multiple HWPerf blocks (reusing existing configuration) */ + ROGUE_FWIF_KCCB_CMD_HWPERF_CTRL_BLKS = 203U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* Core clock speed change event */ + ROGUE_FWIF_KCCB_CMD_CORECLKSPEEDCHANGE = 204U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + + /* + * Ask the firmware to update its cached ui32LogType value from the (shared) + * tracebuf control structure + */ + ROGUE_FWIF_KCCB_CMD_LOGTYPE_UPDATE = 206U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* Set a maximum frequency/OPP point */ + ROGUE_FWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ = 207U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* + * Changes the relative scheduling priority for a particular OSid. It can + * only be serviced for the Host DDK + */ + ROGUE_FWIF_KCCB_CMD_OSID_PRIORITY_CHANGE = 208U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* Set or clear firmware state flags */ + ROGUE_FWIF_KCCB_CMD_STATEFLAGS_CTRL = 209U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + + /* Set a minimum frequency/OPP point */ + ROGUE_FWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ = 212U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* Configure Periodic Hardware Reset behaviour */ + ROGUE_FWIF_KCCB_CMD_PHR_CFG = 213U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + + /* Configure Safety Firmware Watchdog */ + ROGUE_FWIF_KCCB_CMD_WDG_CFG = 215U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* Controls counter dumping in the FW */ + ROGUE_FWIF_KCCB_CMD_COUNTER_DUMP = 216U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* Configure, clear and enable multiple HWPerf blocks */ + ROGUE_FWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS = 217U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* Configure the custom counters for HWPerf */ + ROGUE_FWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS = 218U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + + /* Configure directly addressable counters for HWPerf */ + ROGUE_FWIF_KCCB_CMD_HWPERF_CONFIG_BLKS = 220U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, +}; + +#define ROGUE_FWIF_LAST_ALLOWED_GUEST_KCCB_CMD \ + (ROGUE_FWIF_KCCB_CMD_REGCONFIG - 1) + +/* Kernel CCB command packet */ +struct rogue_fwif_kccb_cmd { + /* Command type */ + enum rogue_fwif_kccb_cmd_type cmd_type; + /* Compatibility and other flags */ + u32 kccb_flags; + + /* + * NOTE: Make sure that uCmdData is the last member of this struct + * This is to calculate actual command size for device mem copy. + * (Refer ROGUEGetCmdMemCopySize()) + */ + union { + /* Data for Kick command */ + struct rogue_fwif_kccb_cmd_kick_data cmd_kick_data; + /* Data for combined geom/frag Kick command */ + struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data + combined_geom_frag_cmd_kick_data; + /* Data for MMU cache command */ + struct rogue_fwif_mmucachedata mmu_cache_data; + /* Data for Breakpoint Commands */ + struct rogue_fwif_bpdata bp_data; + /* Data for SLC Flush/Inval commands */ + struct rogue_fwif_slcflushinvaldata slc_flush_inval_data; + /* Data for cleanup commands */ + struct rogue_fwif_cleanup_request cleanup_data; + /* Data for power request commands */ + struct rogue_fwif_power_request pow_data; + /* Data for HWPerf control command */ + struct rogue_fwif_hwperf_ctrl hw_perf_ctrl; + /* + * Data for HWPerf configure, clear and enable performance + * counter block command + */ + struct rogue_fwif_hwperf_config_enable_blks + hw_perf_cfg_enable_blks; + /* + * Data for HWPerf enable or disable performance counter block + * commands + */ + struct rogue_fwif_hwperf_ctrl_blks hw_perf_ctrl_blks; + /* Data for HWPerf configure the custom counters to read */ + struct rogue_fwif_hwperf_select_custom_cntrs + hw_perf_select_cstm_cntrs; + /* Data for HWPerf configure Directly Addressable blocks */ + struct rogue_fwif_hwperf_config_da_blks hw_perf_cfg_da_blks; + /* Data for core clock speed change */ + struct rogue_fwif_coreclkspeedchange_data + core_clk_speed_change_data; + /* Feedback for Z/S Buffer backing/unbacking */ + struct rogue_fwif_zsbuffer_backing_data zs_buffer_backing_data; + /* Feedback for Freelist grow/shrink */ + struct rogue_fwif_freelist_gs_data free_list_gs_data; + /* Feedback for Freelists reconstruction*/ + struct rogue_fwif_freelists_reconstruction_data + free_lists_reconstruction_data; + /* Data for custom register configuration */ + struct rogue_fwif_regconfig_data reg_config_data; + /* Data for informing the FW about the write offset update */ + struct rogue_fwif_write_offset_update_data + write_offset_update_data; + /* Data for setting the max frequency/OPP */ + struct rogue_fwif_pdvfs_max_freq_data pdvfs_max_freq_data; + /* Data for setting the min frequency/OPP */ + struct rogue_fwif_pdvfs_min_freq_data pdvfs_min_freq_data; + /* Data for updating the Guest Online states */ + struct rogue_fwif_os_state_change_data cmd_os_online_state_data; + /* Dev address for TBI buffer allocated on demand */ + u32 tbi_buffer_fw_addr; + /* Data for dumping of register ranges */ + struct rogue_fwif_counter_dump_data counter_dump_config_data; + /* Data for signalling all unmet fences for a given CCB */ + struct rogue_fwif_kccb_cmd_force_update_data force_update_data; + } cmd_data __aligned(8); +} __aligned(8); + +PVR_FW_STRUCT_SIZE_ASSERT(struct rogue_fwif_kccb_cmd); + +/* + ****************************************************************************** + * Firmware CCB command structure for ROGUE + ****************************************************************************** + */ + +struct rogue_fwif_fwccb_cmd_zsbuffer_backing_data { + u32 zs_buffer_id; +}; + +struct rogue_fwif_fwccb_cmd_freelist_gs_data { + u32 freelist_id; +}; + +struct rogue_fwif_fwccb_cmd_freelists_reconstruction_data { + u32 freelist_count; + u32 hwr_counter; + u32 freelist_ids[ROGUE_FWIF_MAX_FREELISTS_TO_RECONSTRUCT]; +}; + +/* 1 if a page fault happened */ +#define ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_PF BIT(0) +/* 1 if applicable to all contexts */ +#define ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS BIT(1) + +struct rogue_fwif_fwccb_cmd_context_reset_data { + /* Context affected by the reset */ + u32 server_common_context_id; + /* Reason for reset */ + enum rogue_context_reset_reason reset_reason; + /* Data Master affected by the reset */ + u32 dm; + /* Job ref running at the time of reset */ + u32 reset_job_ref; + /* ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_FLAG bitfield */ + u32 flags; + /* At what page catalog address */ + aligned_u64 pc_address; + /* Page fault address (only when applicable) */ + aligned_u64 fault_address; +}; + +struct rogue_fwif_fwccb_cmd_fw_pagefault_data { + /* Page fault address */ + u64 fw_fault_addr; +}; + +enum rogue_fwif_fwccb_cmd_type { + /* Requests ZSBuffer to be backed with physical pages */ + ROGUE_FWIF_FWCCB_CMD_ZSBUFFER_BACKING = 101U | + ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* Requests ZSBuffer to be unbacked */ + ROGUE_FWIF_FWCCB_CMD_ZSBUFFER_UNBACKING = 102U | + ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* Requests an on-demand freelist grow/shrink */ + ROGUE_FWIF_FWCCB_CMD_FREELIST_GROW = 103U | + ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* Requests freelists reconstruction */ + ROGUE_FWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION = + 104U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* Notifies host of a HWR event on a context */ + ROGUE_FWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION = + 105U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* Requests an on-demand debug dump */ + ROGUE_FWIF_FWCCB_CMD_DEBUG_DUMP = 106U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + /* Requests an on-demand update on process stats */ + ROGUE_FWIF_FWCCB_CMD_UPDATE_STATS = 107U | + ROGUE_CMD_MAGIC_DWORD_SHIFTED, + + ROGUE_FWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE = + 108U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + ROGUE_FWIF_FWCCB_CMD_REQUEST_GPU_RESTART = + 109U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, + + /* Notifies host of a FW pagefault */ + ROGUE_FWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION = + 112U | ROGUE_CMD_MAGIC_DWORD_SHIFTED, +}; + +enum rogue_fwif_fwccb_cmd_update_stats_type { + /* + * PVRSRVStatsUpdateRenderContextStats should increase the value of the + * ui32TotalNumPartialRenders stat + */ + ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS = 1, + /* + * PVRSRVStatsUpdateRenderContextStats should increase the value of the + * ui32TotalNumOutOfMemory stat + */ + ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY, + /* + * PVRSRVStatsUpdateRenderContextStats should increase the value of the + * ui32NumGeomStores stat + */ + ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_GEOM_STORES, + /* + * PVRSRVStatsUpdateRenderContextStats should increase the value of the + * ui32NumFragStores stat + */ + ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_FRAG_STORES, + /* + * PVRSRVStatsUpdateRenderContextStats should increase the value of the + * ui32NumCDMStores stat + */ + ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES, + /* + * PVRSRVStatsUpdateRenderContextStats should increase the value of the + * ui32NumTDMStores stat + */ + ROGUE_FWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES +}; + +struct rogue_fwif_fwccb_cmd_update_stats_data { + /* Element to update */ + enum rogue_fwif_fwccb_cmd_update_stats_type element_to_update; + /* The pid of the process whose stats are being updated */ + u32 pid_owner; + /* Adjustment to be made to the statistic */ + s32 adjustment_value; +}; + +struct rogue_fwif_fwccb_cmd_core_clk_rate_change_data { + u32 core_clk_rate; +} __aligned(8); + +struct rogue_fwif_fwccb_cmd { + /* Command type */ + enum rogue_fwif_fwccb_cmd_type cmd_type; + /* Compatibility and other flags */ + u32 fwccb_flags; + + union { + /* Data for Z/S-Buffer on-demand (un)backing*/ + struct rogue_fwif_fwccb_cmd_zsbuffer_backing_data + cmd_zs_buffer_backing; + /* Data for on-demand freelist grow/shrink */ + struct rogue_fwif_fwccb_cmd_freelist_gs_data cmd_free_list_gs; + /* Data for freelists reconstruction */ + struct rogue_fwif_fwccb_cmd_freelists_reconstruction_data + cmd_freelists_reconstruction; + /* Data for context reset notification */ + struct rogue_fwif_fwccb_cmd_context_reset_data + cmd_context_reset_notification; + /* Data for updating process stats */ + struct rogue_fwif_fwccb_cmd_update_stats_data + cmd_update_stats_data; + struct rogue_fwif_fwccb_cmd_core_clk_rate_change_data + cmd_core_clk_rate_change; + struct rogue_fwif_fwccb_cmd_fw_pagefault_data cmd_fw_pagefault; + } cmd_data __aligned(8); +} __aligned(8); + +PVR_FW_STRUCT_SIZE_ASSERT(struct rogue_fwif_fwccb_cmd); + +/* + ****************************************************************************** + * Workload estimation Firmware CCB command structure for ROGUE + ****************************************************************************** + */ +struct rogue_fwif_workest_fwccb_cmd { + /* Index for return data array */ + u16 return_data_index; + /* The cycles the workload took on the hardware */ + u32 cycles_taken; +}; + +/* + ****************************************************************************** + * Client CCB commands for ROGUE + ****************************************************************************** + */ + +/* + * Required memory alignment for 64-bit variables accessible by Meta + * (The gcc meta aligns 64-bit variables to 64-bit; therefore, memory shared + * between the host and meta that contains 64-bit variables has to maintain + * this alignment) + */ +#define ROGUE_FWIF_FWALLOC_ALIGN sizeof(u64) + +#define ROGUE_CCB_TYPE_TASK BIT(15) +#define ROGUE_CCB_FWALLOC_ALIGN(size) \ + (((size) + (ROGUE_FWIF_FWALLOC_ALIGN - 1)) & \ + ~(ROGUE_FWIF_FWALLOC_ALIGN - 1)) + +#define ROGUE_FWIF_CCB_CMD_TYPE_GEOM \ + (201U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK) +#define ROGUE_FWIF_CCB_CMD_TYPE_TQ_3D \ + (202U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK) +#define ROGUE_FWIF_CCB_CMD_TYPE_FRAG \ + (203U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK) +#define ROGUE_FWIF_CCB_CMD_TYPE_FRAG_PR \ + (204U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK) +#define ROGUE_FWIF_CCB_CMD_TYPE_CDM \ + (205U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK) +#define ROGUE_FWIF_CCB_CMD_TYPE_TQ_TDM \ + (206U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK) +#define ROGUE_FWIF_CCB_CMD_TYPE_FBSC_INVALIDATE \ + (207U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK) +#define ROGUE_FWIF_CCB_CMD_TYPE_TQ_2D \ + (208U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK) +#define ROGUE_FWIF_CCB_CMD_TYPE_PRE_TIMESTAMP \ + (209U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK) +#define ROGUE_FWIF_CCB_CMD_TYPE_NULL \ + (210U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK) +#define ROGUE_FWIF_CCB_CMD_TYPE_ABORT \ + (211U | ROGUE_CMD_MAGIC_DWORD_SHIFTED | ROGUE_CCB_TYPE_TASK) + +/* Leave a gap between CCB specific commands and generic commands */ +#define ROGUE_FWIF_CCB_CMD_TYPE_FENCE (212U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) +#define ROGUE_FWIF_CCB_CMD_TYPE_UPDATE (213U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) +#define ROGUE_FWIF_CCB_CMD_TYPE_RMW_UPDATE \ + (214U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) +#define ROGUE_FWIF_CCB_CMD_TYPE_FENCE_PR (215U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) +#define ROGUE_FWIF_CCB_CMD_TYPE_PRIORITY (216U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) +/* + * Pre and Post timestamp commands are supposed to sandwich the DM cmd. The + * padding code with the CCB wrap upsets the FW if we don't have the task type + * bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types. + */ +#define ROGUE_FWIF_CCB_CMD_TYPE_POST_TIMESTAMP \ + (217U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) +#define ROGUE_FWIF_CCB_CMD_TYPE_UNFENCED_UPDATE \ + (218U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) +#define ROGUE_FWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE \ + (219U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) + +#define ROGUE_FWIF_CCB_CMD_TYPE_PADDING (221U | ROGUE_CMD_MAGIC_DWORD_SHIFTED) + +struct rogue_fwif_workest_kick_data { + /* Index for the KM Workload estimation return data array */ + u16 return_data_index __aligned(8); + /* Predicted time taken to do the work in cycles */ + u32 cycles_prediction __aligned(8); + /* Deadline for the workload */ + aligned_u64 deadline; +}; + +struct rogue_fwif_ccb_cmd_header { + u32 cmd_type; + u32 cmd_size; + /* + * external job reference - provided by client and used in debug for + * tracking submitted work + */ + u32 ext_job_ref; + /* + * internal job reference - generated by services and used in debug for + * tracking submitted work + */ + u32 int_job_ref; + /* Workload Estimation - Workload Estimation Data */ + struct rogue_fwif_workest_kick_data work_est_kick_data __aligned(8); +}; + +/* + ****************************************************************************** + * Client CCB commands which are only required by the kernel + ****************************************************************************** + */ +struct rogue_fwif_cmd_priority { + s32 priority; +}; + +/* + ****************************************************************************** + * Signature and Checksums Buffer + ****************************************************************************** + */ +struct rogue_fwif_sigbuf_ctl { + /* Ptr to Signature Buffer memory */ + u32 buffer_fw_addr; + /* Amount of space left for storing regs in the buffer */ + u32 left_size_in_regs; +} __aligned(8); + +struct rogue_fwif_counter_dump_ctl { + /* Ptr to counter dump buffer */ + u32 buffer_fw_addr; + /* Amount of space for storing in the buffer */ + u32 size_in_dwords; +} __aligned(8); + +struct rogue_fwif_firmware_gcov_ctl { + /* Ptr to firmware gcov buffer */ + u32 buffer_fw_addr; + /* Amount of space for storing in the buffer */ + u32 size; +} __aligned(8); + +/* + ***************************************************************************** + * ROGUE Compatibility checks + ***************************************************************************** + */ + +/* + * WARNING: Whenever the layout of ROGUE_FWIF_COMPCHECKS_BVNC changes, the + * following define should be increased by 1 to indicate to the compatibility + * logic that layout has changed. + */ +#define ROGUE_FWIF_COMPCHECKS_LAYOUT_VERSION 3 + +struct rogue_fwif_compchecks_bvnc { + /* WARNING: This field must be defined as first one in this structure */ + u32 layout_version; + aligned_u64 bvnc; +} __aligned(8); + +struct rogue_fwif_init_options { + u8 os_count_support; + u8 padding[7]; +} __aligned(8); + +#define ROGUE_FWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(name) \ + struct rogue_fwif_compchecks_bvnc(name) = { \ + ROGUE_FWIF_COMPCHECKS_LAYOUT_VERSION, \ + 0, \ + } + +static inline void rogue_fwif_compchecks_bvnc_init(struct rogue_fwif_compchecks_bvnc *compchecks) +{ + compchecks->layout_version = ROGUE_FWIF_COMPCHECKS_LAYOUT_VERSION; + compchecks->bvnc = 0; +} + +struct rogue_fwif_compchecks { + /* hardware BVNC (from the ROGUE registers) */ + struct rogue_fwif_compchecks_bvnc hw_bvnc; + /* firmware BVNC */ + struct rogue_fwif_compchecks_bvnc fw_bvnc; + /* identifier of the FW processor version */ + u32 fw_processor_version; + /* software DDK version */ + u32 ddk_version; + /* software DDK build no. */ + u32 ddk_build; + /* build options bit-field */ + u32 build_options; + /* initialisation options bit-field */ + struct rogue_fwif_init_options init_options; + /* Information is valid */ + bool updated __aligned(4); + u32 padding; +} __aligned(8); + +/* + ****************************************************************************** + * Updated configuration post FW data init. + ****************************************************************************** + */ +struct rogue_fwif_runtime_cfg { + /* APM latency in ms before signalling IDLE to the host */ + u32 active_pm_latency_ms; + /* Compatibility and other flags */ + u32 runtime_cfg_flags; + /* + * If set, APM latency does not reset to system default each GPU power + * transition + */ + bool active_pm_latency_persistant __aligned(4); + /* Core clock speed, currently only used to calculate timer ticks */ + u32 core_clock_speed; + /* Last number of dusts change requested by the host */ + u32 default_dusts_num_init; + /* Periodic Hardware Reset configuration values */ + u32 phr_mode; + /* New number of milliseconds C/S is allowed to last */ + u32 hcs_deadline_ms; + /* The watchdog period in microseconds */ + u32 wdg_period_us; + /* Array of priorities per OS */ + u32 osid_priority[ROGUE_FW_MAX_NUM_OS]; + /* On-demand allocated HWPerf buffer address, to be passed to the FW */ + u32 hwperf_buf_fw_addr; + + bool padding __aligned(4); +}; + +/* + ***************************************************************************** + * Control data for ROGUE + ***************************************************************************** + */ + +#define ROGUE_FWIF_HWR_DEBUG_DUMP_ALL (99999U) + +enum rogue_fwif_tpu_dm { + ROGUE_FWIF_TPU_DM_PDM = 0, + ROGUE_FWIF_TPU_DM_VDM = 1, + ROGUE_FWIF_TPU_DM_CDM = 2, + ROGUE_FWIF_TPU_DM_TDM = 3, + ROGUE_FWIF_TPU_DM_LAST +}; + +enum rogue_fwif_gpio_val_mode { + /* No GPIO validation */ + ROGUE_FWIF_GPIO_VAL_OFF = 0, + /* + * Simple test case that initiates by sending data via the GPIO and then + * sends back any data received over the GPIO + */ + ROGUE_FWIF_GPIO_VAL_GENERAL = 1, + /* + * More complex test case that writes and reads data across the entire + * GPIO AP address range. + */ + ROGUE_FWIF_GPIO_VAL_AP = 2, + /* Validates the GPIO Testbench. */ + ROGUE_FWIF_GPIO_VAL_TESTBENCH = 5, + /* Send and then receive each byte in the range 0-255. */ + ROGUE_FWIF_GPIO_VAL_LOOPBACK = 6, + /* Send and then receive each power-of-2 byte in the range 0-255. */ + ROGUE_FWIF_GPIO_VAL_LOOPBACK_LITE = 7, + ROGUE_FWIF_GPIO_VAL_LAST +}; + +enum fw_perf_conf { + FW_PERF_CONF_NONE = 0, + FW_PERF_CONF_ICACHE = 1, + FW_PERF_CONF_DCACHE = 2, + FW_PERF_CONF_JTLB_INSTR = 5, + FW_PERF_CONF_INSTRUCTIONS = 6 +}; + +enum fw_boot_stage { + FW_BOOT_STAGE_TLB_INIT_FAILURE = -2, + FW_BOOT_STAGE_NOT_AVAILABLE = -1, + FW_BOOT_NOT_STARTED = 0, + FW_BOOT_BLDR_STARTED = 1, + FW_BOOT_CACHE_DONE, + FW_BOOT_TLB_DONE, + FW_BOOT_MAIN_STARTED, + FW_BOOT_ALIGNCHECKS_DONE, + FW_BOOT_INIT_DONE, +}; + +/* + * Kernel CCB return slot responses. Usage of bit-fields instead of bare + * integers allows FW to possibly pack-in several responses for each single kCCB + * command. + */ +/* Command executed (return status from FW) */ +#define ROGUE_FWIF_KCCB_RTN_SLOT_CMD_EXECUTED BIT(0) +/* A cleanup was requested but resource busy */ +#define ROGUE_FWIF_KCCB_RTN_SLOT_CLEANUP_BUSY BIT(1) +/* Poll failed in FW for a HW operation to complete */ +#define ROGUE_FWIF_KCCB_RTN_SLOT_POLL_FAILURE BIT(2) +/* Reset value of a kCCB return slot (set by host) */ +#define ROGUE_FWIF_KCCB_RTN_SLOT_NO_RESPONSE 0x0U + +struct rogue_fwif_connection_ctl { + /* Fw-Os connection states */ + enum rogue_fwif_connection_fw_state connection_fw_state; + enum rogue_fwif_connection_os_state connection_os_state; + u32 alive_fw_token; + u32 alive_os_token; +} __aligned(8); + +struct rogue_fwif_osinit { + /* Kernel CCB */ + u32 kernel_ccbctl_fw_addr; + u32 kernel_ccb_fw_addr; + u32 kernel_ccb_rtn_slots_fw_addr; + + /* Firmware CCB */ + u32 firmware_ccbctl_fw_addr; + u32 firmware_ccb_fw_addr; + + /* Workload Estimation Firmware CCB */ + u32 work_est_firmware_ccbctl_fw_addr; + u32 work_est_firmware_ccb_fw_addr; + + u32 rogue_fwif_hwr_info_buf_ctl_fw_addr; + + u32 hwr_debug_dump_limit; + + u32 fw_os_data_fw_addr; + + /* Compatibility checks to be populated by the Firmware */ + struct rogue_fwif_compchecks rogue_comp_checks; +} __aligned(8); + +/* BVNC Features */ +struct rogue_hwperf_bvnc_block { + /* Counter block ID, see ROGUE_HWPERF_CNTBLK_ID */ + u16 block_id; + + /* Number of counters in this block type */ + u16 num_counters; + + /* Number of blocks of this type */ + u16 num_blocks; + + u16 reserved; +}; + +#define ROGUE_HWPERF_MAX_BVNC_LEN (24) + +#define ROGUE_HWPERF_MAX_BVNC_BLOCK_LEN (16U) + +/* BVNC Features */ +struct rogue_hwperf_bvnc { + /* BVNC string */ + char bvnc_string[ROGUE_HWPERF_MAX_BVNC_LEN]; + /* See ROGUE_HWPERF_FEATURE_FLAGS */ + u32 bvnc_km_feature_flags; + /* Number of blocks described in aBvncBlocks */ + u16 num_bvnc_blocks; + /* Number of GPU cores present */ + u16 bvnc_gpu_cores; + /* Supported Performance Blocks for BVNC */ + struct rogue_hwperf_bvnc_block + bvnc_blocks[ROGUE_HWPERF_MAX_BVNC_BLOCK_LEN]; +}; + +PVR_FW_STRUCT_SIZE_ASSERT(struct rogue_hwperf_bvnc); + +struct rogue_fwif_sysinit { + /* Fault read address */ + aligned_u64 fault_phys_addr; + + /* PDS execution base */ + aligned_u64 pds_exec_base; + /* UCS execution base */ + aligned_u64 usc_exec_base; + /* FBCDC bindless texture state table base */ + aligned_u64 fbcdc_state_table_base; + aligned_u64 fbcdc_large_state_table_base; + /* Texture state base */ + aligned_u64 texture_heap_base; + + /* Event filter for Firmware events */ + u64 hw_perf_filter; + + aligned_u64 slc3_fence_dev_addr; + + u32 tpu_trilinear_frac_mask[ROGUE_FWIF_TPU_DM_LAST] __aligned(8); + + /* Signature and Checksum Buffers for DMs */ + struct rogue_fwif_sigbuf_ctl sigbuf_ctl[PVR_FWIF_DM_MAX]; + + struct rogue_fwif_pdvfs_opp pdvfs_opp_info; + + struct rogue_fwif_dma_addr coremem_data_store; + + struct rogue_fwif_counter_dump_ctl counter_dump_ctl; + + u32 filter_flags; + + u32 runtime_cfg_fw_addr; + + u32 trace_buf_ctl_fw_addr; + u32 fw_sys_data_fw_addr; + + u32 gpu_util_fw_cb_ctl_fw_addr; + u32 reg_cfg_fw_addr; + u32 hwperf_ctl_fw_addr; + + u32 align_checks; + + /* Core clock speed at FW boot time */ + u32 initial_core_clock_speed; + + /* APM latency in ms before signalling IDLE to the host */ + u32 active_pm_latency_ms; + + /* Flag to be set by the Firmware after successful start */ + bool firmware_started __aligned(4); + + /* Host/FW Trace synchronisation Partition Marker */ + u32 marker_val; + + /* Firmware initialization complete time */ + u32 firmware_started_timestamp; + + u32 jones_disable_mask; + + /* Firmware performance counter config */ + enum fw_perf_conf firmware_perf; + + /* + * FW Pointer to memory containing core clock rate in Hz. + * Firmware (PDVFS) updates the memory when running on non primary FW + * thread to communicate to host driver. + */ + u32 core_clock_rate_fw_addr; + + enum rogue_fwif_gpio_val_mode gpio_validation_mode; + + /* Used in HWPerf for decoding BVNC Features */ + struct rogue_hwperf_bvnc bvnc_km_feature_flags; + + /* Value to write into ROGUE_CR_TFBC_COMPRESSION_CONTROL */ + u32 tfbc_compression_control; +} __aligned(8); + +/* + ***************************************************************************** + * Timer correlation shared data and defines + ***************************************************************************** + */ + +struct rogue_fwif_time_corr { + aligned_u64 os_timestamp; + aligned_u64 os_mono_timestamp; + aligned_u64 cr_timestamp; + + /* + * Utility variable used to convert CR timer deltas to OS timer deltas + * (nS), where the deltas are relative to the timestamps above: + * deltaOS = (deltaCR * K) >> decimal_shift, see full explanation below + */ + aligned_u64 cr_delta_to_os_delta_kns; + + u32 core_clock_speed; + u32 reserved; +} __aligned(8); + +/* + * The following macros are used to help converting FW timestamps to the Host + * time domain. On the FW the ROGUE_CR_TIMER counter is used to keep track of + * time; it increments by 1 every 256 GPU clock ticks, so the general + * formula to perform the conversion is: + * + * [ GPU clock speed in Hz, if (scale == 10^9) then deltaOS is in nS, + * otherwise if (scale == 10^6) then deltaOS is in uS ] + * + * deltaCR * 256 256 * scale + * deltaOS = --------------- * scale = deltaCR * K [ K = --------------- ] + * GPUclockspeed GPUclockspeed + * + * The actual K is multiplied by 2^20 (and deltaCR * K is divided by 2^20) + * to get some better accuracy and to avoid returning 0 in the integer + * division 256000000/GPUfreq if GPUfreq is greater than 256MHz. + * This is the same as keeping K as a decimal number. + * + * The maximum deltaOS is slightly more than 5hrs for all GPU frequencies + * (deltaCR * K is more or less a constant), and it's relative to the base + * OS timestamp sampled as a part of the timer correlation data. + * This base is refreshed on GPU power-on, DVFS transition and periodic + * frequency calibration (executed every few seconds if the FW is doing + * some work), so as long as the GPU is doing something and one of these + * events is triggered then deltaCR * K will not overflow and deltaOS will be + * correct. + */ + +#define ROGUE_FWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT (20) + +#define ROGUE_FWIF_GET_DELTA_OSTIME_NS(delta_cr, k) \ + (((delta_cr) * (k)) >> ROGUE_FWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT) + +/* + ****************************************************************************** + * GPU Utilisation + ****************************************************************************** + */ + +/* See rogue_common.h for a list of GPU states */ +#define ROGUE_FWIF_GPU_UTIL_TIME_MASK \ + (0xFFFFFFFFFFFFFFFFull & ~ROGUE_FWIF_GPU_UTIL_STATE_MASK) + +#define ROGUE_FWIF_GPU_UTIL_GET_TIME(word) \ + ((word)(&ROGUE_FWIF_GPU_UTIL_TIME_MASK)) +#define ROGUE_FWIF_GPU_UTIL_GET_STATE(word) \ + ((word)(&ROGUE_FWIF_GPU_UTIL_STATE_MASK)) + +/* + * The OS timestamps computed by the FW are approximations of the real time, + * which means they could be slightly behind or ahead the real timer on the + * Host. In some cases we can perform subtractions between FW approximated + * timestamps and real OS timestamps, so we need a form of protection against + * negative results if for instance the FW one is a bit ahead of time. + */ +#define ROGUE_FWIF_GPU_UTIL_GET_PERIOD(newtime, oldtime) \ + (((newtime) > (oldtime)) ? ((newtime) - (oldtime)) : 0U) + +#define ROGUE_FWIF_GPU_UTIL_MAKE_WORD(time, state) \ + (ROGUE_FWIF_GPU_UTIL_GET_TIME(time) | \ + ROGUE_FWIF_GPU_UTIL_GET_STATE(state)) + +/* + * The timer correlation array must be big enough to ensure old entries won't be + * overwritten before all the HWPerf events linked to those entries are + * processed by the MISR. The update frequency of this array depends on how fast + * the system can change state (basically how small the APM latency is) and + * perform DVFS transitions. + * + * The minimum size is 2 (not 1) to avoid race conditions between the FW reading + * an entry while the Host is updating it. With 2 entries in the worst case the + * FW will read old data, which is still quite ok if the Host is updating the + * timer correlation at that time. + */ +#define ROGUE_FWIF_TIME_CORR_ARRAY_SIZE 256U +#define ROGUE_FWIF_TIME_CORR_CURR_INDEX(seqcount) \ + ((seqcount) % ROGUE_FWIF_TIME_CORR_ARRAY_SIZE) + +/* Make sure the timer correlation array size is a power of 2 */ +static_assert((ROGUE_FWIF_TIME_CORR_ARRAY_SIZE & + (ROGUE_FWIF_TIME_CORR_ARRAY_SIZE - 1U)) == 0U, + "ROGUE_FWIF_TIME_CORR_ARRAY_SIZE must be a power of two"); + +struct rogue_fwif_gpu_util_fwcb { + struct rogue_fwif_time_corr time_corr[ROGUE_FWIF_TIME_CORR_ARRAY_SIZE]; + u32 time_corr_seq_count; + + /* Compatibility and other flags */ + u32 gpu_util_flags; + + /* Last GPU state + OS time of the last state update */ + aligned_u64 last_word; + + /* Counters for the amount of time the GPU was active/idle/blocked */ + aligned_u64 stats_counters[PVR_FWIF_GPU_UTIL_STATE_NUM]; +} __aligned(8); + +struct rogue_fwif_rta_ctl { + /* Render number */ + u32 render_target_index; + /* index in RTA */ + u32 current_render_target; + /* total active RTs */ + u32 active_render_targets; + /* total active RTs from the first TA kick, for OOM */ + u32 cumul_active_render_targets; + /* Array of valid RT indices */ + u32 valid_render_targets_fw_addr; + /* Array of number of occurred partial renders per render target */ + u32 rta_num_partial_renders_fw_addr; + /* Number of render targets in the array */ + u32 max_rts; + /* Compatibility and other flags */ + u32 rta_ctl_flags; +} __aligned(8); + +struct rogue_fwif_freelist { + aligned_u64 freelist_dev_addr; + aligned_u64 current_dev_addr; + u32 current_stack_top; + u32 max_pages; + u32 grow_pages; + /* HW pages */ + u32 current_pages; + u32 allocated_page_count; + u32 allocated_mmu_page_count; + u32 freelist_id; + + bool grow_pending __aligned(4); + /* Pages that should be used only when OOM is reached */ + u32 ready_pages; + /* Compatibility and other flags */ + u32 freelist_flags; + /* PM Global PB on which Freelist is loaded */ + u32 pm_global_pb; + u32 padding; +} __aligned(8); + +/* + ****************************************************************************** + * HWRTData + ****************************************************************************** + */ + +/* HWRTData flags */ +/* Deprecated flags 1:0 */ +#define HWRTDATA_HAS_LAST_GEOM BIT(2) +#define HWRTDATA_PARTIAL_RENDERED BIT(3) +#define HWRTDATA_DISABLE_TILE_REORDERING BIT(4) +#define HWRTDATA_NEED_BRN65101_BLIT BIT(5) +#define HWRTDATA_FIRST_BRN65101_STRIP BIT(6) +#define HWRTDATA_NEED_BRN67182_2ND_RENDER BIT(7) + +enum rogue_fwif_rtdata_state { + ROGUE_FWIF_RTDATA_STATE_NONE = 0, + ROGUE_FWIF_RTDATA_STATE_KICK_GEOM, + ROGUE_FWIF_RTDATA_STATE_KICK_GEOM_FIRST, + ROGUE_FWIF_RTDATA_STATE_GEOM_FINISHED, + ROGUE_FWIF_RTDATA_STATE_KICK_FRAG, + ROGUE_FWIF_RTDATA_STATE_FRAG_FINISHED, + ROGUE_FWIF_RTDATA_STATE_FRAG_CONTEXT_STORED, + ROGUE_FWIF_RTDATA_STATE_GEOM_OUTOFMEM, + ROGUE_FWIF_RTDATA_STATE_PARTIALRENDERFINISHED, + /* + * In case of HWR, we can't set the RTDATA state to NONE, as this will + * cause any TA to become a first TA. To ensure all related TA's are + * skipped, we use the HWR state + */ + ROGUE_FWIF_RTDATA_STATE_HWR, + ROGUE_FWIF_RTDATA_STATE_UNKNOWN = 0x7FFFFFFFU +}; + +struct rogue_fwif_hwrtdata_common { + bool geom_caches_need_zeroing __aligned(4); + + u32 screen_pixel_max; + aligned_u64 multi_sample_ctl; + u64 flipped_multi_sample_ctl; + u32 tpc_stride; + u32 tpc_size; + u32 te_screen; + u32 mtile_stride; + u32 teaa; + u32 te_mtile1; + u32 te_mtile2; + u32 isp_merge_lower_x; + u32 isp_merge_lower_y; + u32 isp_merge_upper_x; + u32 isp_merge_upper_y; + u32 isp_merge_scale_x; + u32 isp_merge_scale_y; + u32 rgn_header_size; + u32 isp_mtile_size; + u32 padding; +} __aligned(8); + +struct rogue_fwif_hwrtdata { + /* MList Data Store */ + aligned_u64 pm_mlist_dev_addr; + + aligned_u64 vce_cat_base[4]; + aligned_u64 vce_last_cat_base[4]; + aligned_u64 te_cat_base[4]; + aligned_u64 te_last_cat_base[4]; + aligned_u64 alist_cat_base; + aligned_u64 alist_last_cat_base; + + aligned_u64 pm_alist_stack_pointer; + u32 pm_mlist_stack_pointer; + + u32 hwrt_data_common_fw_addr; + + u32 hwrt_data_flags; + enum rogue_fwif_rtdata_state state; + + u32 freelists_fw_addr[MAX_FREELISTS_SIZE] __aligned(8); + u32 freelist_hwr_snapshot[MAX_FREELISTS_SIZE]; + + aligned_u64 vheap_table_dev_addr; + + struct rogue_fwif_rta_ctl rta_ctl; + + aligned_u64 tail_ptrs_dev_addr; + aligned_u64 macrotile_array_dev_addr; + aligned_u64 rgn_header_dev_addr; + aligned_u64 rtc_dev_addr; + + u32 owner_geom_not_used_by_host __aligned(8); + + bool geom_caches_need_zeroing __aligned(4); + + struct rogue_fwif_cleanup_ctl cleanup_state __aligned(64); +} __aligned(8); + +/* + ****************************************************************************** + * Sync checkpoints + ****************************************************************************** + */ + +#define PVR_SYNC_CHECKPOINT_UNDEF 0x000 +#define PVR_SYNC_CHECKPOINT_ACTIVE 0xac1 /* Checkpoint has not signaled. */ +#define PVR_SYNC_CHECKPOINT_SIGNALED 0x519 /* Checkpoint has signaled. */ +#define PVR_SYNC_CHECKPOINT_ERRORED 0xeff /* Checkpoint has been errored. */ + +#include "pvr_rogue_fwif_check.h" + +#endif /* PVR_ROGUE_FWIF_H */ diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_check.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_check.h new file mode 100644 index 0000000000..51dc37e78f --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_check.h @@ -0,0 +1,493 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_ROGUE_FWIF_CHECK_H +#define PVR_ROGUE_FWIF_CHECK_H + +#include + +#define OFFSET_CHECK(type, member, offset) \ + static_assert(offsetof(type, member) == (offset), \ + "offsetof(" #type ", " #member ") incorrect") + +#define SIZE_CHECK(type, size) \ + static_assert(sizeof(type) == (size), #type " is incorrect size") + +OFFSET_CHECK(struct rogue_fwif_file_info_buf, path, 0); +OFFSET_CHECK(struct rogue_fwif_file_info_buf, info, 200); +OFFSET_CHECK(struct rogue_fwif_file_info_buf, line_num, 400); +SIZE_CHECK(struct rogue_fwif_file_info_buf, 408); + +OFFSET_CHECK(struct rogue_fwif_tracebuf_space, trace_pointer, 0); +OFFSET_CHECK(struct rogue_fwif_tracebuf_space, trace_buffer_fw_addr, 4); +OFFSET_CHECK(struct rogue_fwif_tracebuf_space, trace_buffer, 8); +OFFSET_CHECK(struct rogue_fwif_tracebuf_space, assert_buf, 16); +SIZE_CHECK(struct rogue_fwif_tracebuf_space, 424); + +OFFSET_CHECK(struct rogue_fwif_tracebuf, log_type, 0); +OFFSET_CHECK(struct rogue_fwif_tracebuf, tracebuf, 8); +OFFSET_CHECK(struct rogue_fwif_tracebuf, tracebuf_size_in_dwords, 856); +OFFSET_CHECK(struct rogue_fwif_tracebuf, tracebuf_flags, 860); +SIZE_CHECK(struct rogue_fwif_tracebuf, 864); + +OFFSET_CHECK(struct rogue_fw_fault_info, cr_timer, 0); +OFFSET_CHECK(struct rogue_fw_fault_info, os_timer, 8); +OFFSET_CHECK(struct rogue_fw_fault_info, data, 16); +OFFSET_CHECK(struct rogue_fw_fault_info, reserved, 20); +OFFSET_CHECK(struct rogue_fw_fault_info, fault_buf, 24); +SIZE_CHECK(struct rogue_fw_fault_info, 432); + +OFFSET_CHECK(struct rogue_fwif_sysdata, config_flags, 0); +OFFSET_CHECK(struct rogue_fwif_sysdata, config_flags_ext, 4); +OFFSET_CHECK(struct rogue_fwif_sysdata, pow_state, 8); +OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_ridx, 12); +OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_widx, 16); +OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_wrap_count, 20); +OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_size, 24); +OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_drop_count, 28); +OFFSET_CHECK(struct rogue_fwif_sysdata, hw_perf_ut, 32); +OFFSET_CHECK(struct rogue_fwif_sysdata, first_drop_ordinal, 36); +OFFSET_CHECK(struct rogue_fwif_sysdata, last_drop_ordinal, 40); +OFFSET_CHECK(struct rogue_fwif_sysdata, os_runtime_flags_mirror, 44); +OFFSET_CHECK(struct rogue_fwif_sysdata, fault_info, 80); +OFFSET_CHECK(struct rogue_fwif_sysdata, fw_faults, 3536); +OFFSET_CHECK(struct rogue_fwif_sysdata, cr_poll_addr, 3540); +OFFSET_CHECK(struct rogue_fwif_sysdata, cr_poll_mask, 3548); +OFFSET_CHECK(struct rogue_fwif_sysdata, cr_poll_count, 3556); +OFFSET_CHECK(struct rogue_fwif_sysdata, start_idle_time, 3568); +OFFSET_CHECK(struct rogue_fwif_sysdata, hwr_state_flags, 3576); +OFFSET_CHECK(struct rogue_fwif_sysdata, hwr_recovery_flags, 3580); +OFFSET_CHECK(struct rogue_fwif_sysdata, fw_sys_data_flags, 3616); +OFFSET_CHECK(struct rogue_fwif_sysdata, mc_config, 3620); +SIZE_CHECK(struct rogue_fwif_sysdata, 3624); + +OFFSET_CHECK(struct rogue_fwif_slr_entry, timestamp, 0); +OFFSET_CHECK(struct rogue_fwif_slr_entry, fw_ctx_addr, 8); +OFFSET_CHECK(struct rogue_fwif_slr_entry, num_ufos, 12); +OFFSET_CHECK(struct rogue_fwif_slr_entry, ccb_name, 16); +SIZE_CHECK(struct rogue_fwif_slr_entry, 48); + +OFFSET_CHECK(struct rogue_fwif_osdata, fw_os_config_flags, 0); +OFFSET_CHECK(struct rogue_fwif_osdata, fw_sync_check_mark, 4); +OFFSET_CHECK(struct rogue_fwif_osdata, host_sync_check_mark, 8); +OFFSET_CHECK(struct rogue_fwif_osdata, forced_updates_requested, 12); +OFFSET_CHECK(struct rogue_fwif_osdata, slr_log_wp, 16); +OFFSET_CHECK(struct rogue_fwif_osdata, slr_log_first, 24); +OFFSET_CHECK(struct rogue_fwif_osdata, slr_log, 72); +OFFSET_CHECK(struct rogue_fwif_osdata, last_forced_update_time, 552); +OFFSET_CHECK(struct rogue_fwif_osdata, interrupt_count, 560); +OFFSET_CHECK(struct rogue_fwif_osdata, kccb_cmds_executed, 568); +OFFSET_CHECK(struct rogue_fwif_osdata, power_sync_fw_addr, 572); +OFFSET_CHECK(struct rogue_fwif_osdata, fw_os_data_flags, 576); +SIZE_CHECK(struct rogue_fwif_osdata, 584); + +OFFSET_CHECK(struct rogue_bifinfo, bif_req_status, 0); +OFFSET_CHECK(struct rogue_bifinfo, bif_mmu_status, 8); +OFFSET_CHECK(struct rogue_bifinfo, pc_address, 16); +OFFSET_CHECK(struct rogue_bifinfo, reserved, 24); +SIZE_CHECK(struct rogue_bifinfo, 32); + +OFFSET_CHECK(struct rogue_eccinfo, fault_gpu, 0); +SIZE_CHECK(struct rogue_eccinfo, 4); + +OFFSET_CHECK(struct rogue_mmuinfo, mmu_status, 0); +OFFSET_CHECK(struct rogue_mmuinfo, pc_address, 16); +OFFSET_CHECK(struct rogue_mmuinfo, reserved, 24); +SIZE_CHECK(struct rogue_mmuinfo, 32); + +OFFSET_CHECK(struct rogue_pollinfo, thread_num, 0); +OFFSET_CHECK(struct rogue_pollinfo, cr_poll_addr, 4); +OFFSET_CHECK(struct rogue_pollinfo, cr_poll_mask, 8); +OFFSET_CHECK(struct rogue_pollinfo, cr_poll_last_value, 12); +OFFSET_CHECK(struct rogue_pollinfo, reserved, 16); +SIZE_CHECK(struct rogue_pollinfo, 24); + +OFFSET_CHECK(struct rogue_tlbinfo, bad_addr, 0); +OFFSET_CHECK(struct rogue_tlbinfo, entry_lo, 4); +SIZE_CHECK(struct rogue_tlbinfo, 8); + +OFFSET_CHECK(struct rogue_hwrinfo, hwr_data, 0); +OFFSET_CHECK(struct rogue_hwrinfo, cr_timer, 32); +OFFSET_CHECK(struct rogue_hwrinfo, os_timer, 40); +OFFSET_CHECK(struct rogue_hwrinfo, frame_num, 48); +OFFSET_CHECK(struct rogue_hwrinfo, pid, 52); +OFFSET_CHECK(struct rogue_hwrinfo, active_hwrt_data, 56); +OFFSET_CHECK(struct rogue_hwrinfo, hwr_number, 60); +OFFSET_CHECK(struct rogue_hwrinfo, event_status, 64); +OFFSET_CHECK(struct rogue_hwrinfo, hwr_recovery_flags, 68); +OFFSET_CHECK(struct rogue_hwrinfo, hwr_type, 72); +OFFSET_CHECK(struct rogue_hwrinfo, dm, 76); +OFFSET_CHECK(struct rogue_hwrinfo, core_id, 80); +OFFSET_CHECK(struct rogue_hwrinfo, cr_time_of_kick, 88); +OFFSET_CHECK(struct rogue_hwrinfo, cr_time_hw_reset_start, 96); +OFFSET_CHECK(struct rogue_hwrinfo, cr_time_hw_reset_finish, 104); +OFFSET_CHECK(struct rogue_hwrinfo, cr_time_freelist_ready, 112); +OFFSET_CHECK(struct rogue_hwrinfo, reserved, 120); +SIZE_CHECK(struct rogue_hwrinfo, 136); + +OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_info, 0); +OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_counter, 2176); +OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, write_index, 2180); +OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, dd_req_count, 2184); +OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_info_buf_flags, 2188); +OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_dm_locked_up_count, 2192); +OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_dm_overran_count, 2228); +OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_dm_recovered_count, 2264); +OFFSET_CHECK(struct rogue_fwif_hwrinfobuf, hwr_dm_false_detect_count, 2300); +SIZE_CHECK(struct rogue_fwif_hwrinfobuf, 2336); + +OFFSET_CHECK(struct rogue_fwif_fwmemcontext, pc_dev_paddr, 0); +OFFSET_CHECK(struct rogue_fwif_fwmemcontext, page_cat_base_reg_set, 8); +OFFSET_CHECK(struct rogue_fwif_fwmemcontext, breakpoint_addr, 12); +OFFSET_CHECK(struct rogue_fwif_fwmemcontext, bp_handler_addr, 16); +OFFSET_CHECK(struct rogue_fwif_fwmemcontext, breakpoint_ctl, 20); +OFFSET_CHECK(struct rogue_fwif_fwmemcontext, fw_mem_ctx_flags, 24); +SIZE_CHECK(struct rogue_fwif_fwmemcontext, 32); + +OFFSET_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, geom_reg_vdm_call_stack_pointer, 0); +OFFSET_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, geom_reg_vdm_call_stack_pointer_init, 8); +OFFSET_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, geom_reg_vbs_so_prim, 16); +OFFSET_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, geom_current_idx, 32); +SIZE_CHECK(struct rogue_fwif_geom_ctx_state_per_geom, 40); + +OFFSET_CHECK(struct rogue_fwif_geom_ctx_state, geom_core, 0); +SIZE_CHECK(struct rogue_fwif_geom_ctx_state, 160); + +OFFSET_CHECK(struct rogue_fwif_frag_ctx_state, frag_reg_pm_deallocated_mask_status, 0); +OFFSET_CHECK(struct rogue_fwif_frag_ctx_state, frag_reg_dm_pds_mtilefree_status, 4); +OFFSET_CHECK(struct rogue_fwif_frag_ctx_state, ctx_state_flags, 8); +OFFSET_CHECK(struct rogue_fwif_frag_ctx_state, frag_reg_isp_store, 12); +SIZE_CHECK(struct rogue_fwif_frag_ctx_state, 16); + +OFFSET_CHECK(struct rogue_fwif_compute_ctx_state, ctx_state_flags, 0); +SIZE_CHECK(struct rogue_fwif_compute_ctx_state, 4); + +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, ccbctl_fw_addr, 0); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, ccb_fw_addr, 4); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, ccb_meta_dma_addr, 8); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, context_state_addr, 24); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, fw_com_ctx_flags, 28); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, priority, 32); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, priority_seq_num, 36); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, rf_cmd_addr, 40); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, stats_pending, 44); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, stats_num_stores, 48); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, stats_num_out_of_memory, 52); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, stats_num_partial_renders, 56); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, dm, 60); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, wait_signal_address, 64); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, wait_signal_node, 72); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, buf_stalled_node, 80); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, cbuf_queue_ctrl_addr, 88); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, robustness_address, 96); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, max_deadline_ms, 104); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, read_offset_needs_reset, 108); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, waiting_node, 112); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, run_node, 120); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, last_failed_ufo, 128); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, fw_mem_context_fw_addr, 136); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, server_common_context_id, 140); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, pid, 144); +OFFSET_CHECK(struct rogue_fwif_fwcommoncontext, geom_oom_disabled, 148); +SIZE_CHECK(struct rogue_fwif_fwcommoncontext, 152); + +OFFSET_CHECK(struct rogue_fwif_ccb_ctl, write_offset, 0); +OFFSET_CHECK(struct rogue_fwif_ccb_ctl, padding, 4); +OFFSET_CHECK(struct rogue_fwif_ccb_ctl, read_offset, 128); +OFFSET_CHECK(struct rogue_fwif_ccb_ctl, wrap_mask, 132); +OFFSET_CHECK(struct rogue_fwif_ccb_ctl, cmd_size, 136); +OFFSET_CHECK(struct rogue_fwif_ccb_ctl, padding2, 140); +SIZE_CHECK(struct rogue_fwif_ccb_ctl, 144); + +OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, context_fw_addr, 0); +OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, client_woff_update, 4); +OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, client_wrap_mask_update, 8); +OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, num_cleanup_ctl, 12); +OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, cleanup_ctl_fw_addr, 16); +OFFSET_CHECK(struct rogue_fwif_kccb_cmd_kick_data, work_est_cmd_header_offset, 28); +SIZE_CHECK(struct rogue_fwif_kccb_cmd_kick_data, 32); + +OFFSET_CHECK(struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data, geom_cmd_kick_data, 0); +OFFSET_CHECK(struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data, frag_cmd_kick_data, 32); +SIZE_CHECK(struct rogue_fwif_kccb_cmd_combined_geom_frag_kick_data, 64); + +OFFSET_CHECK(struct rogue_fwif_kccb_cmd_force_update_data, context_fw_addr, 0); +OFFSET_CHECK(struct rogue_fwif_kccb_cmd_force_update_data, ccb_fence_offset, 4); +SIZE_CHECK(struct rogue_fwif_kccb_cmd_force_update_data, 8); + +OFFSET_CHECK(struct rogue_fwif_cleanup_request, cleanup_type, 0); +OFFSET_CHECK(struct rogue_fwif_cleanup_request, cleanup_data, 4); +SIZE_CHECK(struct rogue_fwif_cleanup_request, 8); + +OFFSET_CHECK(struct rogue_fwif_power_request, pow_type, 0); +OFFSET_CHECK(struct rogue_fwif_power_request, power_req_data, 4); +SIZE_CHECK(struct rogue_fwif_power_request, 8); + +OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, context_fw_addr, 0); +OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, inval, 4); +OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, dm_context, 8); +OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, address, 16); +OFFSET_CHECK(struct rogue_fwif_slcflushinvaldata, size, 24); +SIZE_CHECK(struct rogue_fwif_slcflushinvaldata, 32); + +OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl, opcode, 0); +OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl, mask, 8); +SIZE_CHECK(struct rogue_fwif_hwperf_ctrl, 16); + +OFFSET_CHECK(struct rogue_fwif_hwperf_config_enable_blks, num_blocks, 0); +OFFSET_CHECK(struct rogue_fwif_hwperf_config_enable_blks, block_configs_fw_addr, 4); +SIZE_CHECK(struct rogue_fwif_hwperf_config_enable_blks, 8); + +OFFSET_CHECK(struct rogue_fwif_hwperf_config_da_blks, num_blocks, 0); +OFFSET_CHECK(struct rogue_fwif_hwperf_config_da_blks, block_configs_fw_addr, 4); +SIZE_CHECK(struct rogue_fwif_hwperf_config_da_blks, 8); + +OFFSET_CHECK(struct rogue_fwif_coreclkspeedchange_data, new_clock_speed, 0); +SIZE_CHECK(struct rogue_fwif_coreclkspeedchange_data, 4); + +OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl_blks, enable, 0); +OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl_blks, num_blocks, 4); +OFFSET_CHECK(struct rogue_fwif_hwperf_ctrl_blks, block_ids, 8); +SIZE_CHECK(struct rogue_fwif_hwperf_ctrl_blks, 40); + +OFFSET_CHECK(struct rogue_fwif_hwperf_select_custom_cntrs, custom_block, 0); +OFFSET_CHECK(struct rogue_fwif_hwperf_select_custom_cntrs, num_counters, 2); +OFFSET_CHECK(struct rogue_fwif_hwperf_select_custom_cntrs, custom_counter_ids_fw_addr, 4); +SIZE_CHECK(struct rogue_fwif_hwperf_select_custom_cntrs, 8); + +OFFSET_CHECK(struct rogue_fwif_zsbuffer_backing_data, zs_buffer_fw_addr, 0); +OFFSET_CHECK(struct rogue_fwif_zsbuffer_backing_data, done, 4); +SIZE_CHECK(struct rogue_fwif_zsbuffer_backing_data, 8); + +OFFSET_CHECK(struct rogue_fwif_freelist_gs_data, freelist_fw_addr, 0); +OFFSET_CHECK(struct rogue_fwif_freelist_gs_data, delta_pages, 4); +OFFSET_CHECK(struct rogue_fwif_freelist_gs_data, new_pages, 8); +OFFSET_CHECK(struct rogue_fwif_freelist_gs_data, ready_pages, 12); +SIZE_CHECK(struct rogue_fwif_freelist_gs_data, 16); + +OFFSET_CHECK(struct rogue_fwif_freelists_reconstruction_data, freelist_count, 0); +OFFSET_CHECK(struct rogue_fwif_freelists_reconstruction_data, freelist_ids, 4); +SIZE_CHECK(struct rogue_fwif_freelists_reconstruction_data, 76); + +OFFSET_CHECK(struct rogue_fwif_write_offset_update_data, context_fw_addr, 0); +SIZE_CHECK(struct rogue_fwif_write_offset_update_data, 8); + +OFFSET_CHECK(struct rogue_fwif_kccb_cmd, cmd_type, 0); +OFFSET_CHECK(struct rogue_fwif_kccb_cmd, kccb_flags, 4); +OFFSET_CHECK(struct rogue_fwif_kccb_cmd, cmd_data, 8); +SIZE_CHECK(struct rogue_fwif_kccb_cmd, 88); + +OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, server_common_context_id, 0); +OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, reset_reason, 4); +OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, dm, 8); +OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, reset_job_ref, 12); +OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, flags, 16); +OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, pc_address, 24); +OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, fault_address, 32); +SIZE_CHECK(struct rogue_fwif_fwccb_cmd_context_reset_data, 40); + +OFFSET_CHECK(struct rogue_fwif_fwccb_cmd_fw_pagefault_data, fw_fault_addr, 0); +SIZE_CHECK(struct rogue_fwif_fwccb_cmd_fw_pagefault_data, 8); + +OFFSET_CHECK(struct rogue_fwif_fwccb_cmd, cmd_type, 0); +OFFSET_CHECK(struct rogue_fwif_fwccb_cmd, fwccb_flags, 4); +OFFSET_CHECK(struct rogue_fwif_fwccb_cmd, cmd_data, 8); +SIZE_CHECK(struct rogue_fwif_fwccb_cmd, 88); + +OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, cmd_type, 0); +OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, cmd_size, 4); +OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, ext_job_ref, 8); +OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, int_job_ref, 12); +OFFSET_CHECK(struct rogue_fwif_ccb_cmd_header, work_est_kick_data, 16); +SIZE_CHECK(struct rogue_fwif_ccb_cmd_header, 40); + +OFFSET_CHECK(struct rogue_fwif_runtime_cfg, active_pm_latency_ms, 0); +OFFSET_CHECK(struct rogue_fwif_runtime_cfg, runtime_cfg_flags, 4); +OFFSET_CHECK(struct rogue_fwif_runtime_cfg, active_pm_latency_persistant, 8); +OFFSET_CHECK(struct rogue_fwif_runtime_cfg, core_clock_speed, 12); +OFFSET_CHECK(struct rogue_fwif_runtime_cfg, default_dusts_num_init, 16); +OFFSET_CHECK(struct rogue_fwif_runtime_cfg, phr_mode, 20); +OFFSET_CHECK(struct rogue_fwif_runtime_cfg, hcs_deadline_ms, 24); +OFFSET_CHECK(struct rogue_fwif_runtime_cfg, wdg_period_us, 28); +OFFSET_CHECK(struct rogue_fwif_runtime_cfg, osid_priority, 32); +OFFSET_CHECK(struct rogue_fwif_runtime_cfg, hwperf_buf_fw_addr, 64); +OFFSET_CHECK(struct rogue_fwif_runtime_cfg, padding, 68); +SIZE_CHECK(struct rogue_fwif_runtime_cfg, 72); + +OFFSET_CHECK(struct rogue_fwif_connection_ctl, connection_fw_state, 0); +OFFSET_CHECK(struct rogue_fwif_connection_ctl, connection_os_state, 4); +OFFSET_CHECK(struct rogue_fwif_connection_ctl, alive_fw_token, 8); +OFFSET_CHECK(struct rogue_fwif_connection_ctl, alive_os_token, 12); +SIZE_CHECK(struct rogue_fwif_connection_ctl, 16); + +OFFSET_CHECK(struct rogue_fwif_compchecks_bvnc, layout_version, 0); +OFFSET_CHECK(struct rogue_fwif_compchecks_bvnc, bvnc, 8); +SIZE_CHECK(struct rogue_fwif_compchecks_bvnc, 16); + +OFFSET_CHECK(struct rogue_fwif_init_options, os_count_support, 0); +SIZE_CHECK(struct rogue_fwif_init_options, 8); + +OFFSET_CHECK(struct rogue_fwif_compchecks, hw_bvnc, 0); +OFFSET_CHECK(struct rogue_fwif_compchecks, fw_bvnc, 16); +OFFSET_CHECK(struct rogue_fwif_compchecks, fw_processor_version, 32); +OFFSET_CHECK(struct rogue_fwif_compchecks, ddk_version, 36); +OFFSET_CHECK(struct rogue_fwif_compchecks, ddk_build, 40); +OFFSET_CHECK(struct rogue_fwif_compchecks, build_options, 44); +OFFSET_CHECK(struct rogue_fwif_compchecks, init_options, 48); +OFFSET_CHECK(struct rogue_fwif_compchecks, updated, 56); +SIZE_CHECK(struct rogue_fwif_compchecks, 64); + +OFFSET_CHECK(struct rogue_fwif_osinit, kernel_ccbctl_fw_addr, 0); +OFFSET_CHECK(struct rogue_fwif_osinit, kernel_ccb_fw_addr, 4); +OFFSET_CHECK(struct rogue_fwif_osinit, kernel_ccb_rtn_slots_fw_addr, 8); +OFFSET_CHECK(struct rogue_fwif_osinit, firmware_ccbctl_fw_addr, 12); +OFFSET_CHECK(struct rogue_fwif_osinit, firmware_ccb_fw_addr, 16); +OFFSET_CHECK(struct rogue_fwif_osinit, work_est_firmware_ccbctl_fw_addr, 20); +OFFSET_CHECK(struct rogue_fwif_osinit, work_est_firmware_ccb_fw_addr, 24); +OFFSET_CHECK(struct rogue_fwif_osinit, rogue_fwif_hwr_info_buf_ctl_fw_addr, 28); +OFFSET_CHECK(struct rogue_fwif_osinit, hwr_debug_dump_limit, 32); +OFFSET_CHECK(struct rogue_fwif_osinit, fw_os_data_fw_addr, 36); +OFFSET_CHECK(struct rogue_fwif_osinit, rogue_comp_checks, 40); +SIZE_CHECK(struct rogue_fwif_osinit, 104); + +OFFSET_CHECK(struct rogue_fwif_sigbuf_ctl, buffer_fw_addr, 0); +OFFSET_CHECK(struct rogue_fwif_sigbuf_ctl, left_size_in_regs, 4); +SIZE_CHECK(struct rogue_fwif_sigbuf_ctl, 8); + +OFFSET_CHECK(struct pdvfs_opp, volt, 0); +OFFSET_CHECK(struct pdvfs_opp, freq, 4); +SIZE_CHECK(struct pdvfs_opp, 8); + +OFFSET_CHECK(struct rogue_fwif_pdvfs_opp, opp_values, 0); +OFFSET_CHECK(struct rogue_fwif_pdvfs_opp, min_opp_point, 128); +OFFSET_CHECK(struct rogue_fwif_pdvfs_opp, max_opp_point, 132); +SIZE_CHECK(struct rogue_fwif_pdvfs_opp, 136); + +OFFSET_CHECK(struct rogue_fwif_counter_dump_ctl, buffer_fw_addr, 0); +OFFSET_CHECK(struct rogue_fwif_counter_dump_ctl, size_in_dwords, 4); +SIZE_CHECK(struct rogue_fwif_counter_dump_ctl, 8); + +OFFSET_CHECK(struct rogue_hwperf_bvnc, bvnc_string, 0); +OFFSET_CHECK(struct rogue_hwperf_bvnc, bvnc_km_feature_flags, 24); +OFFSET_CHECK(struct rogue_hwperf_bvnc, num_bvnc_blocks, 28); +OFFSET_CHECK(struct rogue_hwperf_bvnc, bvnc_gpu_cores, 30); +OFFSET_CHECK(struct rogue_hwperf_bvnc, bvnc_blocks, 32); +SIZE_CHECK(struct rogue_hwperf_bvnc, 160); + +OFFSET_CHECK(struct rogue_fwif_sysinit, fault_phys_addr, 0); +OFFSET_CHECK(struct rogue_fwif_sysinit, pds_exec_base, 8); +OFFSET_CHECK(struct rogue_fwif_sysinit, usc_exec_base, 16); +OFFSET_CHECK(struct rogue_fwif_sysinit, fbcdc_state_table_base, 24); +OFFSET_CHECK(struct rogue_fwif_sysinit, fbcdc_large_state_table_base, 32); +OFFSET_CHECK(struct rogue_fwif_sysinit, texture_heap_base, 40); +OFFSET_CHECK(struct rogue_fwif_sysinit, hw_perf_filter, 48); +OFFSET_CHECK(struct rogue_fwif_sysinit, slc3_fence_dev_addr, 56); +OFFSET_CHECK(struct rogue_fwif_sysinit, tpu_trilinear_frac_mask, 64); +OFFSET_CHECK(struct rogue_fwif_sysinit, sigbuf_ctl, 80); +OFFSET_CHECK(struct rogue_fwif_sysinit, pdvfs_opp_info, 152); +OFFSET_CHECK(struct rogue_fwif_sysinit, coremem_data_store, 288); +OFFSET_CHECK(struct rogue_fwif_sysinit, counter_dump_ctl, 304); +OFFSET_CHECK(struct rogue_fwif_sysinit, filter_flags, 312); +OFFSET_CHECK(struct rogue_fwif_sysinit, runtime_cfg_fw_addr, 316); +OFFSET_CHECK(struct rogue_fwif_sysinit, trace_buf_ctl_fw_addr, 320); +OFFSET_CHECK(struct rogue_fwif_sysinit, fw_sys_data_fw_addr, 324); +OFFSET_CHECK(struct rogue_fwif_sysinit, gpu_util_fw_cb_ctl_fw_addr, 328); +OFFSET_CHECK(struct rogue_fwif_sysinit, reg_cfg_fw_addr, 332); +OFFSET_CHECK(struct rogue_fwif_sysinit, hwperf_ctl_fw_addr, 336); +OFFSET_CHECK(struct rogue_fwif_sysinit, align_checks, 340); +OFFSET_CHECK(struct rogue_fwif_sysinit, initial_core_clock_speed, 344); +OFFSET_CHECK(struct rogue_fwif_sysinit, active_pm_latency_ms, 348); +OFFSET_CHECK(struct rogue_fwif_sysinit, firmware_started, 352); +OFFSET_CHECK(struct rogue_fwif_sysinit, marker_val, 356); +OFFSET_CHECK(struct rogue_fwif_sysinit, firmware_started_timestamp, 360); +OFFSET_CHECK(struct rogue_fwif_sysinit, jones_disable_mask, 364); +OFFSET_CHECK(struct rogue_fwif_sysinit, firmware_perf, 368); +OFFSET_CHECK(struct rogue_fwif_sysinit, core_clock_rate_fw_addr, 372); +OFFSET_CHECK(struct rogue_fwif_sysinit, gpio_validation_mode, 376); +OFFSET_CHECK(struct rogue_fwif_sysinit, bvnc_km_feature_flags, 380); +OFFSET_CHECK(struct rogue_fwif_sysinit, tfbc_compression_control, 540); +SIZE_CHECK(struct rogue_fwif_sysinit, 544); + +OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, time_corr, 0); +OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, time_corr_seq_count, 10240); +OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, gpu_util_flags, 10244); +OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, last_word, 10248); +OFFSET_CHECK(struct rogue_fwif_gpu_util_fwcb, stats_counters, 10256); +SIZE_CHECK(struct rogue_fwif_gpu_util_fwcb, 10280); + +OFFSET_CHECK(struct rogue_fwif_rta_ctl, render_target_index, 0); +OFFSET_CHECK(struct rogue_fwif_rta_ctl, current_render_target, 4); +OFFSET_CHECK(struct rogue_fwif_rta_ctl, active_render_targets, 8); +OFFSET_CHECK(struct rogue_fwif_rta_ctl, cumul_active_render_targets, 12); +OFFSET_CHECK(struct rogue_fwif_rta_ctl, valid_render_targets_fw_addr, 16); +OFFSET_CHECK(struct rogue_fwif_rta_ctl, rta_num_partial_renders_fw_addr, 20); +OFFSET_CHECK(struct rogue_fwif_rta_ctl, max_rts, 24); +OFFSET_CHECK(struct rogue_fwif_rta_ctl, rta_ctl_flags, 28); +SIZE_CHECK(struct rogue_fwif_rta_ctl, 32); + +OFFSET_CHECK(struct rogue_fwif_freelist, freelist_dev_addr, 0); +OFFSET_CHECK(struct rogue_fwif_freelist, current_dev_addr, 8); +OFFSET_CHECK(struct rogue_fwif_freelist, current_stack_top, 16); +OFFSET_CHECK(struct rogue_fwif_freelist, max_pages, 20); +OFFSET_CHECK(struct rogue_fwif_freelist, grow_pages, 24); +OFFSET_CHECK(struct rogue_fwif_freelist, current_pages, 28); +OFFSET_CHECK(struct rogue_fwif_freelist, allocated_page_count, 32); +OFFSET_CHECK(struct rogue_fwif_freelist, allocated_mmu_page_count, 36); +OFFSET_CHECK(struct rogue_fwif_freelist, freelist_id, 40); +OFFSET_CHECK(struct rogue_fwif_freelist, grow_pending, 44); +OFFSET_CHECK(struct rogue_fwif_freelist, ready_pages, 48); +OFFSET_CHECK(struct rogue_fwif_freelist, freelist_flags, 52); +OFFSET_CHECK(struct rogue_fwif_freelist, pm_global_pb, 56); +SIZE_CHECK(struct rogue_fwif_freelist, 64); + +OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, geom_caches_need_zeroing, 0); +OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, screen_pixel_max, 4); +OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, multi_sample_ctl, 8); +OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, flipped_multi_sample_ctl, 16); +OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, tpc_stride, 24); +OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, tpc_size, 28); +OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, te_screen, 32); +OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, mtile_stride, 36); +OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, teaa, 40); +OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, te_mtile1, 44); +OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, te_mtile2, 48); +OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_lower_x, 52); +OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_lower_y, 56); +OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_upper_x, 60); +OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_upper_y, 64); +OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_scale_x, 68); +OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_merge_scale_y, 72); +OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, rgn_header_size, 76); +OFFSET_CHECK(struct rogue_fwif_hwrtdata_common, isp_mtile_size, 80); +SIZE_CHECK(struct rogue_fwif_hwrtdata_common, 88); + +OFFSET_CHECK(struct rogue_fwif_hwrtdata, pm_mlist_dev_addr, 0); +OFFSET_CHECK(struct rogue_fwif_hwrtdata, vce_cat_base, 8); +OFFSET_CHECK(struct rogue_fwif_hwrtdata, vce_last_cat_base, 40); +OFFSET_CHECK(struct rogue_fwif_hwrtdata, te_cat_base, 72); +OFFSET_CHECK(struct rogue_fwif_hwrtdata, te_last_cat_base, 104); +OFFSET_CHECK(struct rogue_fwif_hwrtdata, alist_cat_base, 136); +OFFSET_CHECK(struct rogue_fwif_hwrtdata, alist_last_cat_base, 144); +OFFSET_CHECK(struct rogue_fwif_hwrtdata, pm_alist_stack_pointer, 152); +OFFSET_CHECK(struct rogue_fwif_hwrtdata, pm_mlist_stack_pointer, 160); +OFFSET_CHECK(struct rogue_fwif_hwrtdata, hwrt_data_common_fw_addr, 164); +OFFSET_CHECK(struct rogue_fwif_hwrtdata, hwrt_data_flags, 168); +OFFSET_CHECK(struct rogue_fwif_hwrtdata, state, 172); +OFFSET_CHECK(struct rogue_fwif_hwrtdata, freelists_fw_addr, 176); +OFFSET_CHECK(struct rogue_fwif_hwrtdata, freelist_hwr_snapshot, 188); +OFFSET_CHECK(struct rogue_fwif_hwrtdata, vheap_table_dev_addr, 200); +OFFSET_CHECK(struct rogue_fwif_hwrtdata, rta_ctl, 208); +OFFSET_CHECK(struct rogue_fwif_hwrtdata, tail_ptrs_dev_addr, 240); +OFFSET_CHECK(struct rogue_fwif_hwrtdata, macrotile_array_dev_addr, 248); +OFFSET_CHECK(struct rogue_fwif_hwrtdata, rgn_header_dev_addr, 256); +OFFSET_CHECK(struct rogue_fwif_hwrtdata, rtc_dev_addr, 264); +OFFSET_CHECK(struct rogue_fwif_hwrtdata, owner_geom_not_used_by_host, 272); +OFFSET_CHECK(struct rogue_fwif_hwrtdata, geom_caches_need_zeroing, 276); +OFFSET_CHECK(struct rogue_fwif_hwrtdata, cleanup_state, 320); +SIZE_CHECK(struct rogue_fwif_hwrtdata, 384); + +OFFSET_CHECK(struct rogue_fwif_sync_checkpoint, state, 0); +OFFSET_CHECK(struct rogue_fwif_sync_checkpoint, fw_ref_count, 4); +SIZE_CHECK(struct rogue_fwif_sync_checkpoint, 8); + +#endif /* PVR_ROGUE_FWIF_CHECK_H */ diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_client.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_client.h new file mode 100644 index 0000000000..6e22440008 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_client.h @@ -0,0 +1,373 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_ROGUE_FWIF_CLIENT_H +#define PVR_ROGUE_FWIF_CLIENT_H + +#include +#include +#include +#include + +#include "pvr_rogue_fwif_shared.h" + +/* + * Page size used for Parameter Management. + */ +#define ROGUE_PM_PAGE_SIZE SZ_4K + +/* + * Minimum/Maximum PB size. + * + * Base page size is dependent on core: + * S6/S6XT/S7 = 50 pages + * S8XE = 40 pages + * S8XE with BRN66011 fixed = 25 pages + * + * Minimum PB = Base Pages + (NUM_TE_PIPES-1)*16K + (NUM_VCE_PIPES-1)*64K + + * IF_PM_PREALLOC(NUM_TE_PIPES*16K + NUM_VCE_PIPES*16K) + * + * Maximum PB size must ensure that no PM address space can be fully used, + * because if the full address space was used it would wrap and corrupt itself. + * Since there are two freelists (local is always minimum sized) this can be + * described as following three conditions being met: + * + * (Minimum PB + Maximum PB) < ALIST PM address space size (16GB) + * (Minimum PB + Maximum PB) < TE PM address space size (16GB) / NUM_TE_PIPES + * (Minimum PB + Maximum PB) < VCE PM address space size (16GB) / NUM_VCE_PIPES + * + * Since the max of NUM_TE_PIPES and NUM_VCE_PIPES is 4, we have a hard limit + * of 4GB minus the Minimum PB. For convenience we take the smaller power-of-2 + * value of 2GB. This is far more than any current applications use. + */ +#define ROGUE_PM_MAX_FREELIST_SIZE SZ_2G + +/* + * Flags supported by the geometry DM command i.e. &struct rogue_fwif_cmd_geom. + */ + +#define ROGUE_GEOM_FLAGS_FIRSTKICK BIT_MASK(0) +#define ROGUE_GEOM_FLAGS_LASTKICK BIT_MASK(1) +/* Use single core in a multi core setup. */ +#define ROGUE_GEOM_FLAGS_SINGLE_CORE BIT_MASK(3) + +/* + * Flags supported by the fragment DM command i.e. &struct rogue_fwif_cmd_frag. + */ + +/* Use single core in a multi core setup. */ +#define ROGUE_FRAG_FLAGS_SINGLE_CORE BIT_MASK(3) +/* Indicates whether this render produces visibility results. */ +#define ROGUE_FRAG_FLAGS_GET_VIS_RESULTS BIT_MASK(5) +/* Indicates whether a depth buffer is present. */ +#define ROGUE_FRAG_FLAGS_DEPTHBUFFER BIT_MASK(7) +/* Indicates whether a stencil buffer is present. */ +#define ROGUE_FRAG_FLAGS_STENCILBUFFER BIT_MASK(8) +/* Disable pixel merging for this render. */ +#define ROGUE_FRAG_FLAGS_DISABLE_PIXELMERGE BIT_MASK(15) +/* Indicates whether a scratch buffer is present. */ +#define ROGUE_FRAG_FLAGS_SCRATCHBUFFER BIT_MASK(19) +/* Disallow compute overlapped with this render. */ +#define ROGUE_FRAG_FLAGS_PREVENT_CDM_OVERLAP BIT_MASK(26) + +/* + * Flags supported by the compute DM command i.e. &struct rogue_fwif_cmd_compute. + */ + +#define ROGUE_COMPUTE_FLAG_PREVENT_ALL_OVERLAP BIT_MASK(2) +/*!< Use single core in a multi core setup. */ +#define ROGUE_COMPUTE_FLAG_SINGLE_CORE BIT_MASK(5) + +/* + * Flags supported by the transfer DM command i.e. &struct rogue_fwif_cmd_transfer. + */ + +/*!< Use single core in a multi core setup. */ +#define ROGUE_TRANSFER_FLAGS_SINGLE_CORE BIT_MASK(1) + +/* + ************************************************ + * Parameter/HWRTData control structures. + ************************************************ + */ + +/* + * Configuration registers which need to be loaded by the firmware before a geometry + * job can be started. + */ +struct rogue_fwif_geom_regs { + u64 vdm_ctrl_stream_base; + u64 tpu_border_colour_table; + + /* Only used when feature VDM_DRAWINDIRECT present. */ + u64 vdm_draw_indirect0; + /* Only used when feature VDM_DRAWINDIRECT present. */ + u32 vdm_draw_indirect1; + + u32 ppp_ctrl; + u32 te_psg; + /* Only used when BRN 49927 present. */ + u32 tpu; + + u32 vdm_context_resume_task0_size; + /* Only used when feature VDM_OBJECT_LEVEL_LLS present. */ + u32 vdm_context_resume_task3_size; + + /* Only used when BRN 56279 or BRN 67381 present. */ + u32 pds_ctrl; + + u32 view_idx; + + /* Only used when feature TESSELLATION present */ + u32 pds_coeff_free_prog; + + u32 padding; +}; + +/* Only used when BRN 44455 or BRN 63027 present. */ +struct rogue_fwif_dummy_rgnhdr_init_geom_regs { + u64 te_psgregion_addr; +}; + +/* + * Represents a geometry command that can be used to tile a whole scene's objects as + * per TA behavior. + */ +struct rogue_fwif_cmd_geom { + /* + * rogue_fwif_cmd_geom_frag_shared field must always be at the beginning of the + * struct. + * + * The command struct (rogue_fwif_cmd_geom) is shared between Client and + * Firmware. Kernel is unable to perform read/write operations on the + * command struct, the SHARED region is the only exception from this rule. + * This region must be the first member so that Kernel can easily access it. + * For more info, see rogue_fwif_cmd_geom_frag_shared definition. + */ + struct rogue_fwif_cmd_geom_frag_shared cmd_shared; + + struct rogue_fwif_geom_regs regs __aligned(8); + u32 flags __aligned(8); + + /* + * Holds the geometry/fragment fence value to allow the fragment partial render command + * to go through. + */ + struct rogue_fwif_ufo partial_render_geom_frag_fence; + + /* Only used when BRN 44455 or BRN 63027 present. */ + struct rogue_fwif_dummy_rgnhdr_init_geom_regs dummy_rgnhdr_init_geom_regs __aligned(8); + + /* Only used when BRN 61484 or BRN 66333 present. */ + u32 brn61484_66333_live_rt; + + u32 padding; +}; + +/* + * Configuration registers which need to be loaded by the firmware before ISP + * can be started. + */ +struct rogue_fwif_frag_regs { + u32 usc_pixel_output_ctrl; + +#define ROGUE_MAXIMUM_OUTPUT_REGISTERS_PER_PIXEL 8U + u32 usc_clear_register[ROGUE_MAXIMUM_OUTPUT_REGISTERS_PER_PIXEL]; + + u32 isp_bgobjdepth; + u32 isp_bgobjvals; + u32 isp_aa; + /* Only used when feature S7_TOP_INFRASTRUCTURE present. */ + u32 isp_xtp_pipe_enable; + + u32 isp_ctl; + + /* Only used when BRN 49927 present. */ + u32 tpu; + + u32 event_pixel_pds_info; + + /* Only used when feature CLUSTER_GROUPING present. */ + u32 pixel_phantom; + + u32 view_idx; + + u32 event_pixel_pds_data; + + /* Only used when BRN 65101 present. */ + u32 brn65101_event_pixel_pds_data; + + /* Only used when feature GPU_MULTICORE_SUPPORT or BRN 47217 present. */ + u32 isp_oclqry_stride; + + /* Only used when feature ZLS_SUBTILE present. */ + u32 isp_zls_pixels; + + /* Only used when feature ISP_ZLS_D24_S8_PACKING_OGL_MODE present. */ + u32 rgx_cr_blackpearl_fix; + + /* All values below the ALIGN(8) must be 64 bit. */ + aligned_u64 isp_scissor_base; + u64 isp_dbias_base; + u64 isp_oclqry_base; + u64 isp_zlsctl; + u64 isp_zload_store_base; + u64 isp_stencil_load_store_base; + + /* + * Only used when feature FBCDC_ALGORITHM present and value < 3 or feature + * FB_CDC_V4 present. Additionally, BRNs 48754, 60227, 72310 and 72311 must + * not be present. + */ + u64 fb_cdc_zls; + +#define ROGUE_PBE_WORDS_REQUIRED_FOR_RENDERS 3U + u64 pbe_word[8U][ROGUE_PBE_WORDS_REQUIRED_FOR_RENDERS]; + u64 tpu_border_colour_table; + u64 pds_bgnd[3U]; + + /* Only used when BRN 65101 present. */ + u64 pds_bgnd_brn65101[3U]; + + u64 pds_pr_bgnd[3U]; + + /* Only used when BRN 62850 or 62865 present. */ + u64 isp_dummy_stencil_store_base; + + /* Only used when BRN 66193 present. */ + u64 isp_dummy_depth_store_base; + + /* Only used when BRN 67182 present. */ + u32 rgnhdr_single_rt_size; + /* Only used when BRN 67182 present. */ + u32 rgnhdr_scratch_offset; +}; + +struct rogue_fwif_cmd_frag { + struct rogue_fwif_cmd_geom_frag_shared cmd_shared __aligned(8); + + struct rogue_fwif_frag_regs regs __aligned(8); + /* command control flags. */ + u32 flags; + /* Stride IN BYTES for Z-Buffer in case of RTAs. */ + u32 zls_stride; + /* Stride IN BYTES for S-Buffer in case of RTAs. */ + u32 sls_stride; + + /* Only used if feature GPU_MULTICORE_SUPPORT present. */ + u32 execute_count; +}; + +/* + * Configuration registers which need to be loaded by the firmware before CDM + * can be started. + */ +struct rogue_fwif_compute_regs { + u64 tpu_border_colour_table; + + /* Only used when feature CDM_USER_MODE_QUEUE present. */ + u64 cdm_cb_queue; + + /* Only used when feature CDM_USER_MODE_QUEUE present. */ + u64 cdm_cb_base; + /* Only used when feature CDM_USER_MODE_QUEUE present. */ + u64 cdm_cb; + + /* Only used when feature CDM_USER_MODE_QUEUE is not present. */ + u64 cdm_ctrl_stream_base; + + u64 cdm_context_state_base_addr; + + /* Only used when BRN 49927 is present. */ + u32 tpu; + u32 cdm_resume_pds1; + + /* Only used when feature COMPUTE_MORTON_CAPABLE present. */ + u32 cdm_item; + + /* Only used when feature CLUSTER_GROUPING present. */ + u32 compute_cluster; + + /* Only used when feature TPU_DM_GLOBAL_REGISTERS present. */ + u32 tpu_tag_cdm_ctrl; + + u32 padding; +}; + +struct rogue_fwif_cmd_compute { + /* Common command attributes */ + struct rogue_fwif_cmd_common common __aligned(8); + + /* CDM registers */ + struct rogue_fwif_compute_regs regs; + + /* Control flags */ + u32 flags __aligned(8); + + /* Only used when feature UNIFIED_STORE_VIRTUAL_PARTITIONING present. */ + u32 num_temp_regions; + + /* Only used when feature CDM_USER_MODE_QUEUE present. */ + u32 stream_start_offset; + + /* Only used when feature GPU_MULTICORE_SUPPORT present. */ + u32 execute_count; +}; + +struct rogue_fwif_transfer_regs { + /* + * All 32 bit values should be added in the top section. This then requires only a + * single RGXFW_ALIGN to align all the 64 bit values in the second section. + */ + u32 isp_bgobjvals; + + u32 usc_pixel_output_ctrl; + u32 usc_clear_register0; + u32 usc_clear_register1; + u32 usc_clear_register2; + u32 usc_clear_register3; + + u32 isp_mtile_size; + u32 isp_render_origin; + u32 isp_ctl; + + /* Only used when feature S7_TOP_INFRASTRUCTURE present. */ + u32 isp_xtp_pipe_enable; + u32 isp_aa; + + u32 event_pixel_pds_info; + + u32 event_pixel_pds_code; + u32 event_pixel_pds_data; + + u32 isp_render; + u32 isp_rgn; + + /* Only used when feature GPU_MULTICORE_SUPPORT present. */ + u32 frag_screen; + + /* All values below the aligned_u64 must be 64 bit. */ + aligned_u64 pds_bgnd0_base; + u64 pds_bgnd1_base; + u64 pds_bgnd3_sizeinfo; + + u64 isp_mtile_base; +#define ROGUE_PBE_WORDS_REQUIRED_FOR_TQS 3 + /* TQ_MAX_RENDER_TARGETS * PBE_STATE_SIZE */ + u64 pbe_wordx_mrty[3U * ROGUE_PBE_WORDS_REQUIRED_FOR_TQS]; +}; + +struct rogue_fwif_cmd_transfer { + /* Common command attributes */ + struct rogue_fwif_cmd_common common __aligned(8); + + struct rogue_fwif_transfer_regs regs __aligned(8); + + u32 flags; + + u32 padding; +}; + +#include "pvr_rogue_fwif_client_check.h" + +#endif /* PVR_ROGUE_FWIF_CLIENT_H */ diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_client_check.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_client_check.h new file mode 100644 index 0000000000..54aa447416 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_client_check.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_ROGUE_FWIF_CLIENT_CHECK_H +#define PVR_ROGUE_FWIF_CLIENT_CHECK_H + +#include + +#define OFFSET_CHECK(type, member, offset) \ + static_assert(offsetof(type, member) == (offset), \ + "offsetof(" #type ", " #member ") incorrect") + +#define SIZE_CHECK(type, size) \ + static_assert(sizeof(type) == (size), #type " is incorrect size") + +OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_ctrl_stream_base, 0); +OFFSET_CHECK(struct rogue_fwif_geom_regs, tpu_border_colour_table, 8); +OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_draw_indirect0, 16); +OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_draw_indirect1, 24); +OFFSET_CHECK(struct rogue_fwif_geom_regs, ppp_ctrl, 28); +OFFSET_CHECK(struct rogue_fwif_geom_regs, te_psg, 32); +OFFSET_CHECK(struct rogue_fwif_geom_regs, tpu, 36); +OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_context_resume_task0_size, 40); +OFFSET_CHECK(struct rogue_fwif_geom_regs, vdm_context_resume_task3_size, 44); +OFFSET_CHECK(struct rogue_fwif_geom_regs, pds_ctrl, 48); +OFFSET_CHECK(struct rogue_fwif_geom_regs, view_idx, 52); +OFFSET_CHECK(struct rogue_fwif_geom_regs, pds_coeff_free_prog, 56); +SIZE_CHECK(struct rogue_fwif_geom_regs, 64); + +OFFSET_CHECK(struct rogue_fwif_dummy_rgnhdr_init_geom_regs, te_psgregion_addr, 0); +SIZE_CHECK(struct rogue_fwif_dummy_rgnhdr_init_geom_regs, 8); + +OFFSET_CHECK(struct rogue_fwif_cmd_geom, cmd_shared, 0); +OFFSET_CHECK(struct rogue_fwif_cmd_geom, regs, 16); +OFFSET_CHECK(struct rogue_fwif_cmd_geom, flags, 80); +OFFSET_CHECK(struct rogue_fwif_cmd_geom, partial_render_geom_frag_fence, 84); +OFFSET_CHECK(struct rogue_fwif_cmd_geom, dummy_rgnhdr_init_geom_regs, 96); +OFFSET_CHECK(struct rogue_fwif_cmd_geom, brn61484_66333_live_rt, 104); +SIZE_CHECK(struct rogue_fwif_cmd_geom, 112); + +OFFSET_CHECK(struct rogue_fwif_frag_regs, usc_pixel_output_ctrl, 0); +OFFSET_CHECK(struct rogue_fwif_frag_regs, usc_clear_register, 4); +OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_bgobjdepth, 36); +OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_bgobjvals, 40); +OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_aa, 44); +OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_xtp_pipe_enable, 48); +OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_ctl, 52); +OFFSET_CHECK(struct rogue_fwif_frag_regs, tpu, 56); +OFFSET_CHECK(struct rogue_fwif_frag_regs, event_pixel_pds_info, 60); +OFFSET_CHECK(struct rogue_fwif_frag_regs, pixel_phantom, 64); +OFFSET_CHECK(struct rogue_fwif_frag_regs, view_idx, 68); +OFFSET_CHECK(struct rogue_fwif_frag_regs, event_pixel_pds_data, 72); +OFFSET_CHECK(struct rogue_fwif_frag_regs, brn65101_event_pixel_pds_data, 76); +OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_oclqry_stride, 80); +OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_zls_pixels, 84); +OFFSET_CHECK(struct rogue_fwif_frag_regs, rgx_cr_blackpearl_fix, 88); +OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_scissor_base, 96); +OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_dbias_base, 104); +OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_oclqry_base, 112); +OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_zlsctl, 120); +OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_zload_store_base, 128); +OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_stencil_load_store_base, 136); +OFFSET_CHECK(struct rogue_fwif_frag_regs, fb_cdc_zls, 144); +OFFSET_CHECK(struct rogue_fwif_frag_regs, pbe_word, 152); +OFFSET_CHECK(struct rogue_fwif_frag_regs, tpu_border_colour_table, 344); +OFFSET_CHECK(struct rogue_fwif_frag_regs, pds_bgnd, 352); +OFFSET_CHECK(struct rogue_fwif_frag_regs, pds_bgnd_brn65101, 376); +OFFSET_CHECK(struct rogue_fwif_frag_regs, pds_pr_bgnd, 400); +OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_dummy_stencil_store_base, 424); +OFFSET_CHECK(struct rogue_fwif_frag_regs, isp_dummy_depth_store_base, 432); +OFFSET_CHECK(struct rogue_fwif_frag_regs, rgnhdr_single_rt_size, 440); +OFFSET_CHECK(struct rogue_fwif_frag_regs, rgnhdr_scratch_offset, 444); +SIZE_CHECK(struct rogue_fwif_frag_regs, 448); + +OFFSET_CHECK(struct rogue_fwif_cmd_frag, cmd_shared, 0); +OFFSET_CHECK(struct rogue_fwif_cmd_frag, regs, 16); +OFFSET_CHECK(struct rogue_fwif_cmd_frag, flags, 464); +OFFSET_CHECK(struct rogue_fwif_cmd_frag, zls_stride, 468); +OFFSET_CHECK(struct rogue_fwif_cmd_frag, sls_stride, 472); +OFFSET_CHECK(struct rogue_fwif_cmd_frag, execute_count, 476); +SIZE_CHECK(struct rogue_fwif_cmd_frag, 480); + +OFFSET_CHECK(struct rogue_fwif_compute_regs, tpu_border_colour_table, 0); +OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_cb_queue, 8); +OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_cb_base, 16); +OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_cb, 24); +OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_ctrl_stream_base, 32); +OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_context_state_base_addr, 40); +OFFSET_CHECK(struct rogue_fwif_compute_regs, tpu, 48); +OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_resume_pds1, 52); +OFFSET_CHECK(struct rogue_fwif_compute_regs, cdm_item, 56); +OFFSET_CHECK(struct rogue_fwif_compute_regs, compute_cluster, 60); +OFFSET_CHECK(struct rogue_fwif_compute_regs, tpu_tag_cdm_ctrl, 64); +SIZE_CHECK(struct rogue_fwif_compute_regs, 72); + +OFFSET_CHECK(struct rogue_fwif_cmd_compute, common, 0); +OFFSET_CHECK(struct rogue_fwif_cmd_compute, regs, 8); +OFFSET_CHECK(struct rogue_fwif_cmd_compute, flags, 80); +OFFSET_CHECK(struct rogue_fwif_cmd_compute, num_temp_regions, 84); +OFFSET_CHECK(struct rogue_fwif_cmd_compute, stream_start_offset, 88); +OFFSET_CHECK(struct rogue_fwif_cmd_compute, execute_count, 92); +SIZE_CHECK(struct rogue_fwif_cmd_compute, 96); + +OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_bgobjvals, 0); +OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_pixel_output_ctrl, 4); +OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_clear_register0, 8); +OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_clear_register1, 12); +OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_clear_register2, 16); +OFFSET_CHECK(struct rogue_fwif_transfer_regs, usc_clear_register3, 20); +OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_mtile_size, 24); +OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_render_origin, 28); +OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_ctl, 32); +OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_xtp_pipe_enable, 36); +OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_aa, 40); +OFFSET_CHECK(struct rogue_fwif_transfer_regs, event_pixel_pds_info, 44); +OFFSET_CHECK(struct rogue_fwif_transfer_regs, event_pixel_pds_code, 48); +OFFSET_CHECK(struct rogue_fwif_transfer_regs, event_pixel_pds_data, 52); +OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_render, 56); +OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_rgn, 60); +OFFSET_CHECK(struct rogue_fwif_transfer_regs, frag_screen, 64); +OFFSET_CHECK(struct rogue_fwif_transfer_regs, pds_bgnd0_base, 72); +OFFSET_CHECK(struct rogue_fwif_transfer_regs, pds_bgnd1_base, 80); +OFFSET_CHECK(struct rogue_fwif_transfer_regs, pds_bgnd3_sizeinfo, 88); +OFFSET_CHECK(struct rogue_fwif_transfer_regs, isp_mtile_base, 96); +OFFSET_CHECK(struct rogue_fwif_transfer_regs, pbe_wordx_mrty, 104); +SIZE_CHECK(struct rogue_fwif_transfer_regs, 176); + +OFFSET_CHECK(struct rogue_fwif_cmd_transfer, common, 0); +OFFSET_CHECK(struct rogue_fwif_cmd_transfer, regs, 8); +OFFSET_CHECK(struct rogue_fwif_cmd_transfer, flags, 184); +SIZE_CHECK(struct rogue_fwif_cmd_transfer, 192); + +#endif /* PVR_ROGUE_FWIF_CLIENT_CHECK_H */ diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_common.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_common.h new file mode 100644 index 0000000000..6ebb95ba98 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_common.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_ROGUE_FWIF_COMMON_H +#define PVR_ROGUE_FWIF_COMMON_H + +#include + +/* + * This macro represents a mask of LSBs that must be zero on data structure + * sizes and offsets to ensure they are 8-byte granular on types shared between + * the FW and host driver. + */ +#define PVR_FW_ALIGNMENT_LSB 7U + +/* Macro to test structure size alignment. */ +#define PVR_FW_STRUCT_SIZE_ASSERT(_a) \ + static_assert((sizeof(_a) & PVR_FW_ALIGNMENT_LSB) == 0U, \ + "Size of " #_a " is not properly aligned") + +/* The master definition for data masters known to the firmware. */ + +#define PVR_FWIF_DM_GP (0) +/* Either TDM or 2D DM is present. */ +/* When the 'tla' feature is present in the hw (as per @pvr_device_features). */ +#define PVR_FWIF_DM_2D (1) +/* + * When the 'fastrender_dm' feature is present in the hw (as per + * @pvr_device_features). + */ +#define PVR_FWIF_DM_TDM (1) + +#define PVR_FWIF_DM_GEOM (2) +#define PVR_FWIF_DM_FRAG (3) +#define PVR_FWIF_DM_CDM (4) +#define PVR_FWIF_DM_RAY (5) +#define PVR_FWIF_DM_GEOM2 (6) +#define PVR_FWIF_DM_GEOM3 (7) +#define PVR_FWIF_DM_GEOM4 (8) + +#define PVR_FWIF_DM_LAST PVR_FWIF_DM_GEOM4 + +/* Maximum number of DM in use: GP, 2D/TDM, GEOM, 3D, CDM, RAY, GEOM2, GEOM3, GEOM4 */ +#define PVR_FWIF_DM_MAX (PVR_FWIF_DM_LAST + 1U) + +/* GPU Utilisation states */ +#define PVR_FWIF_GPU_UTIL_STATE_IDLE 0U +#define PVR_FWIF_GPU_UTIL_STATE_ACTIVE 1U +#define PVR_FWIF_GPU_UTIL_STATE_BLOCKED 2U +#define PVR_FWIF_GPU_UTIL_STATE_NUM 3U +#define PVR_FWIF_GPU_UTIL_STATE_MASK 0x3ULL + +/* + * Maximum amount of register writes that can be done by the register + * programmer (FW or META DMA). This is not a HW limitation, it is only + * a protection against malformed inputs to the register programmer. + */ +#define PVR_MAX_NUM_REGISTER_PROGRAMMER_WRITES 128U + +#endif /* PVR_ROGUE_FWIF_COMMON_H */ diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_dev_info.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_dev_info.h new file mode 100644 index 0000000000..168277bce9 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_dev_info.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef __PVR_ROGUE_FWIF_DEV_INFO_H__ +#define __PVR_ROGUE_FWIF_DEV_INFO_H__ + +enum { + PVR_FW_HAS_BRN_44079 = 0, + PVR_FW_HAS_BRN_47217, + PVR_FW_HAS_BRN_48492, + PVR_FW_HAS_BRN_48545, + PVR_FW_HAS_BRN_49927, + PVR_FW_HAS_BRN_50767, + PVR_FW_HAS_BRN_51764, + PVR_FW_HAS_BRN_62269, + PVR_FW_HAS_BRN_63142, + PVR_FW_HAS_BRN_63553, + PVR_FW_HAS_BRN_66011, + PVR_FW_HAS_BRN_71242, + + PVR_FW_HAS_BRN_MAX +}; + +enum { + PVR_FW_HAS_ERN_35421 = 0, + PVR_FW_HAS_ERN_38020, + PVR_FW_HAS_ERN_38748, + PVR_FW_HAS_ERN_42064, + PVR_FW_HAS_ERN_42290, + PVR_FW_HAS_ERN_42606, + PVR_FW_HAS_ERN_47025, + PVR_FW_HAS_ERN_57596, + + PVR_FW_HAS_ERN_MAX +}; + +enum { + PVR_FW_HAS_FEATURE_AXI_ACELITE = 0, + PVR_FW_HAS_FEATURE_CDM_CONTROL_STREAM_FORMAT, + PVR_FW_HAS_FEATURE_CLUSTER_GROUPING, + PVR_FW_HAS_FEATURE_COMMON_STORE_SIZE_IN_DWORDS, + PVR_FW_HAS_FEATURE_COMPUTE, + PVR_FW_HAS_FEATURE_COMPUTE_MORTON_CAPABLE, + PVR_FW_HAS_FEATURE_COMPUTE_OVERLAP, + PVR_FW_HAS_FEATURE_COREID_PER_OS, + PVR_FW_HAS_FEATURE_DYNAMIC_DUST_POWER, + PVR_FW_HAS_FEATURE_ECC_RAMS, + PVR_FW_HAS_FEATURE_FBCDC, + PVR_FW_HAS_FEATURE_FBCDC_ALGORITHM, + PVR_FW_HAS_FEATURE_FBCDC_ARCHITECTURE, + PVR_FW_HAS_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS, + PVR_FW_HAS_FEATURE_FBC_MAX_LARGE_DESCRIPTORS, + PVR_FW_HAS_FEATURE_FB_CDC_V4, + PVR_FW_HAS_FEATURE_GPU_MULTICORE_SUPPORT, + PVR_FW_HAS_FEATURE_GPU_VIRTUALISATION, + PVR_FW_HAS_FEATURE_GS_RTA_SUPPORT, + PVR_FW_HAS_FEATURE_IRQ_PER_OS, + PVR_FW_HAS_FEATURE_ISP_MAX_TILES_IN_FLIGHT, + PVR_FW_HAS_FEATURE_ISP_SAMPLES_PER_PIXEL, + PVR_FW_HAS_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE, + PVR_FW_HAS_FEATURE_LAYOUT_MARS, + PVR_FW_HAS_FEATURE_MAX_PARTITIONS, + PVR_FW_HAS_FEATURE_META, + PVR_FW_HAS_FEATURE_META_COREMEM_SIZE, + PVR_FW_HAS_FEATURE_MIPS, + PVR_FW_HAS_FEATURE_NUM_CLUSTERS, + PVR_FW_HAS_FEATURE_NUM_ISP_IPP_PIPES, + PVR_FW_HAS_FEATURE_NUM_OSIDS, + PVR_FW_HAS_FEATURE_NUM_RASTER_PIPES, + PVR_FW_HAS_FEATURE_PBE2_IN_XE, + PVR_FW_HAS_FEATURE_PBVNC_COREID_REG, + PVR_FW_HAS_FEATURE_PERFBUS, + PVR_FW_HAS_FEATURE_PERF_COUNTER_BATCH, + PVR_FW_HAS_FEATURE_PHYS_BUS_WIDTH, + PVR_FW_HAS_FEATURE_RISCV_FW_PROCESSOR, + PVR_FW_HAS_FEATURE_ROGUEXE, + PVR_FW_HAS_FEATURE_S7_TOP_INFRASTRUCTURE, + PVR_FW_HAS_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT, + PVR_FW_HAS_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2, + PVR_FW_HAS_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION, + PVR_FW_HAS_FEATURE_SLC_BANKS, + PVR_FW_HAS_FEATURE_SLC_CACHE_LINE_SIZE_BITS, + PVR_FW_HAS_FEATURE_SLC_SIZE_CONFIGURABLE, + PVR_FW_HAS_FEATURE_SLC_SIZE_IN_KILOBYTES, + PVR_FW_HAS_FEATURE_SOC_TIMER, + PVR_FW_HAS_FEATURE_SYS_BUS_SECURE_RESET, + PVR_FW_HAS_FEATURE_TESSELLATION, + PVR_FW_HAS_FEATURE_TILE_REGION_PROTECTION, + PVR_FW_HAS_FEATURE_TILE_SIZE_X, + PVR_FW_HAS_FEATURE_TILE_SIZE_Y, + PVR_FW_HAS_FEATURE_TLA, + PVR_FW_HAS_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS, + PVR_FW_HAS_FEATURE_TPU_DM_GLOBAL_REGISTERS, + PVR_FW_HAS_FEATURE_TPU_FILTERING_MODE_CONTROL, + PVR_FW_HAS_FEATURE_USC_MIN_OUTPUT_REGISTERS_PER_PIX, + PVR_FW_HAS_FEATURE_VDM_DRAWINDIRECT, + PVR_FW_HAS_FEATURE_VDM_OBJECT_LEVEL_LLS, + PVR_FW_HAS_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS, + PVR_FW_HAS_FEATURE_WATCHDOG_TIMER, + PVR_FW_HAS_FEATURE_WORKGROUP_PROTECTION, + PVR_FW_HAS_FEATURE_XE_ARCHITECTURE, + PVR_FW_HAS_FEATURE_XE_MEMORY_HIERARCHY, + PVR_FW_HAS_FEATURE_XE_TPU2, + PVR_FW_HAS_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH, + PVR_FW_HAS_FEATURE_XPU_MAX_SLAVES, + PVR_FW_HAS_FEATURE_XPU_REGISTER_BROADCAST, + PVR_FW_HAS_FEATURE_XT_TOP_INFRASTRUCTURE, + PVR_FW_HAS_FEATURE_ZLS_SUBTILE, + + PVR_FW_HAS_FEATURE_MAX +}; + +#endif /* __PVR_ROGUE_FWIF_DEV_INFO_H__ */ diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_resetframework.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_resetframework.h new file mode 100644 index 0000000000..1db1f4c532 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_resetframework.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_ROGUE_FWIF_RESETFRAMEWORK_H +#define PVR_ROGUE_FWIF_RESETFRAMEWORK_H + +#include +#include + +#include "pvr_rogue_fwif_shared.h" + +struct rogue_fwif_rf_registers { + union { + u64 cdmreg_cdm_cb_base; + u64 cdmreg_cdm_ctrl_stream_base; + }; + u64 cdmreg_cdm_cb_queue; + u64 cdmreg_cdm_cb; +}; + +struct rogue_fwif_rf_cmd { + /* THIS MUST BE THE LAST MEMBER OF THE CONTAINING STRUCTURE */ + struct rogue_fwif_rf_registers fw_registers __aligned(8); +}; + +#define ROGUE_FWIF_RF_CMD_SIZE sizeof(struct rogue_fwif_rf_cmd) + +#endif /* PVR_ROGUE_FWIF_RESETFRAMEWORK_H */ diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h new file mode 100644 index 0000000000..56e11009e1 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h @@ -0,0 +1,1648 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_ROGUE_FWIF_SF_H +#define PVR_ROGUE_FWIF_SF_H + +/* + ****************************************************************************** + * *DO*NOT* rearrange or delete lines in rogue_fw_log_sfgroups or stid_fmts + * WILL BREAK fw tracing message compatibility with previous + * fw versions. Only add new ones, if so required. + ****************************************************************************** + */ + +/* Available log groups. */ +enum rogue_fw_log_sfgroups { + ROGUE_FW_GROUP_NULL, + ROGUE_FW_GROUP_MAIN, + ROGUE_FW_GROUP_CLEANUP, + ROGUE_FW_GROUP_CSW, + ROGUE_FW_GROUP_PM, + ROGUE_FW_GROUP_RTD, + ROGUE_FW_GROUP_SPM, + ROGUE_FW_GROUP_MTS, + ROGUE_FW_GROUP_BIF, + ROGUE_FW_GROUP_MISC, + ROGUE_FW_GROUP_POW, + ROGUE_FW_GROUP_HWR, + ROGUE_FW_GROUP_HWP, + ROGUE_FW_GROUP_RPM, + ROGUE_FW_GROUP_DMA, + ROGUE_FW_GROUP_DBG, +}; + +#define PVR_SF_STRING_MAX_SIZE 256U + +/* pair of string format id and string formats */ +struct rogue_fw_stid_fmt { + u32 id; + char name[PVR_SF_STRING_MAX_SIZE]; +}; + +/* + * The symbolic names found in the table above are assigned an u32 value of + * the following format: + * 31 30 28 27 20 19 16 15 12 11 0 bits + * - --- ---- ---- ---- ---- ---- ---- ---- + * 0-11: id number + * 12-15: group id number + * 16-19: number of parameters + * 20-27: unused + * 28-30: active: identify SF packet, otherwise regular int32 + * 31: reserved for signed/unsigned compatibility + * + * The following macro assigns those values to the enum generated SF ids list. + */ +#define ROGUE_FW_LOG_IDMARKER (0x70000000U) +#define ROGUE_FW_LOG_CREATESFID(a, b, e) ((u32)(a) | ((u32)(b) << 12) | ((u32)(e) << 16) | \ + ROGUE_FW_LOG_IDMARKER) + +#define ROGUE_FW_LOG_IDMASK (0xFFF00000) +#define ROGUE_FW_LOG_VALIDID(I) (((I) & ROGUE_FW_LOG_IDMASK) == ROGUE_FW_LOG_IDMARKER) + +/* Return the group id that the given (enum generated) id belongs to */ +#define ROGUE_FW_SF_GID(x) (((u32)(x) >> 12) & 0xfU) +/* Returns how many arguments the SF(string format) for the given (enum generated) id requires */ +#define ROGUE_FW_SF_PARAMNUM(x) (((u32)(x) >> 16) & 0xfU) + +/* pair of string format id and string formats */ +struct rogue_km_stid_fmt { + u32 id; + const char *name; +}; + +static const struct rogue_km_stid_fmt stid_fmts[] = { + { ROGUE_FW_LOG_CREATESFID(0, ROGUE_FW_GROUP_NULL, 0), + "You should not use this string" }, + + { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_MAIN, 6), + "Kick 3D: FWCtx 0x%08.8x @ %d, RTD 0x%08x. Partial render:%d, CSW resume:%d, prio:%d" }, + { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_MAIN, 2), + "3D finished, HWRTData0State=%x, HWRTData1State=%x" }, + { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_MAIN, 4), + "Kick 3D TQ: FWCtx 0x%08.8x @ %d, CSW resume:%d, prio: %d" }, + { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_MAIN, 0), + "3D Transfer finished" }, + { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_MAIN, 3), + "Kick Compute: FWCtx 0x%08.8x @ %d, prio: %d" }, + { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_MAIN, 0), + "Compute finished" }, + { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_MAIN, 7), + "Kick TA: FWCtx 0x%08.8x @ %d, RTD 0x%08x. First kick:%d, Last kick:%d, CSW resume:%d, prio:%d" }, + { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_MAIN, 0), + "TA finished" }, + { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_MAIN, 0), + "Restart TA after partial render" }, + { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_MAIN, 0), + "Resume TA without partial render" }, + { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_MAIN, 2), + "Out of memory! Context 0x%08x, HWRTData 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_MAIN, 3), + "Kick TLA: FWCtx 0x%08.8x @ %d, prio:%d" }, + { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_MAIN, 0), + "TLA finished" }, + { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_MAIN, 3), + "cCCB Woff update = %d, DM = %d, FWCtx = 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_MAIN, 2), + "UFO Checks for FWCtx 0x%08.8x @ %d" }, + { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_MAIN, 3), + "UFO Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_MAIN, 0), + "UFO Checks succeeded" }, + { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_MAIN, 3), + "UFO PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_MAIN, 1), + "UFO SPM PR-Checks for FWCtx 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_MAIN, 4), + "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= ????????, [0x%08.8x] is ???????? requires 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_MAIN, 2), + "UFO Updates for FWCtx 0x%08.8x @ %d" }, + { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_MAIN, 2), + "UFO Update: [0x%08.8x] = 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_MAIN, 1), + "ASSERT Failed: line %d of:" }, + { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_MAIN, 2), + "HWR: Lockup detected on DM%d, FWCtx: 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_MAIN, 3), + "HWR: Reset fw state for DM%d, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_MAIN, 0), + "HWR: Reset HW" }, + { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_MAIN, 0), + "HWR: Lockup recovered." }, + { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_MAIN, 1), + "HWR: False lockup detected for DM%u" }, + { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_MAIN, 3), + "Alignment check %d failed: host = 0x%x, fw = 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_MAIN, 0), + "GP USC triggered" }, + { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_MAIN, 2), + "Overallocating %u temporary registers and %u shared registers for breakpoint handler" }, + { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_MAIN, 1), + "Setting breakpoint: Addr 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(34, ROGUE_FW_GROUP_MAIN, 0), + "Store breakpoint state" }, + { ROGUE_FW_LOG_CREATESFID(35, ROGUE_FW_GROUP_MAIN, 0), + "Unsetting BP Registers" }, + { ROGUE_FW_LOG_CREATESFID(36, ROGUE_FW_GROUP_MAIN, 1), + "Active RTs expected to be zero, actually %u" }, + { ROGUE_FW_LOG_CREATESFID(37, ROGUE_FW_GROUP_MAIN, 1), + "RTC present, %u active render targets" }, + { ROGUE_FW_LOG_CREATESFID(38, ROGUE_FW_GROUP_MAIN, 1), + "Estimated Power 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(39, ROGUE_FW_GROUP_MAIN, 1), + "RTA render target %u" }, + { ROGUE_FW_LOG_CREATESFID(40, ROGUE_FW_GROUP_MAIN, 2), + "Kick RTA render %u of %u" }, + { ROGUE_FW_LOG_CREATESFID(41, ROGUE_FW_GROUP_MAIN, 3), + "HWR sizes check %d failed: addresses = %d, sizes = %d" }, + { ROGUE_FW_LOG_CREATESFID(42, ROGUE_FW_GROUP_MAIN, 1), + "Pow: DUSTS_ENABLE = 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(43, ROGUE_FW_GROUP_MAIN, 2), + "Pow: On(1)/Off(0): %d, Units: 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(44, ROGUE_FW_GROUP_MAIN, 2), + "Pow: Changing number of dusts from %d to %d" }, + { ROGUE_FW_LOG_CREATESFID(45, ROGUE_FW_GROUP_MAIN, 0), + "Pow: Sidekick ready to be powered down" }, + { ROGUE_FW_LOG_CREATESFID(46, ROGUE_FW_GROUP_MAIN, 2), + "Pow: Request to change num of dusts to %d (bPowRascalDust=%d)" }, + { ROGUE_FW_LOG_CREATESFID(47, ROGUE_FW_GROUP_MAIN, 0), + "No ZS Buffer used for partial render (store)" }, + { ROGUE_FW_LOG_CREATESFID(48, ROGUE_FW_GROUP_MAIN, 0), + "No Depth/Stencil Buffer used for partial render (load)" }, + { ROGUE_FW_LOG_CREATESFID(49, ROGUE_FW_GROUP_MAIN, 2), + "HWR: Lock-up DM%d FWCtx: 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(50, ROGUE_FW_GROUP_MAIN, 7), + "MLIST%d checker: CatBase TE=0x%08x (%d Pages), VCE=0x%08x (%d Pages), ALIST=0x%08x, IsTA=%d" }, + { ROGUE_FW_LOG_CREATESFID(51, ROGUE_FW_GROUP_MAIN, 3), + "MLIST%d checker: MList[%d] = 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(52, ROGUE_FW_GROUP_MAIN, 1), + "MLIST%d OK" }, + { ROGUE_FW_LOG_CREATESFID(53, ROGUE_FW_GROUP_MAIN, 1), + "MLIST%d is empty" }, + { ROGUE_FW_LOG_CREATESFID(54, ROGUE_FW_GROUP_MAIN, 8), + "MLIST%d checker: CatBase TE=0x%08x%08x, VCE=0x%08x%08x, ALIST=0x%08x%08x, IsTA=%d" }, + { ROGUE_FW_LOG_CREATESFID(55, ROGUE_FW_GROUP_MAIN, 0), + "3D OQ flush kick" }, + { ROGUE_FW_LOG_CREATESFID(56, ROGUE_FW_GROUP_MAIN, 1), + "HWPerf block ID (0x%x) unsupported by device" }, + { ROGUE_FW_LOG_CREATESFID(57, ROGUE_FW_GROUP_MAIN, 2), + "Setting breakpoint: Addr 0x%08.8x DM%u" }, + { ROGUE_FW_LOG_CREATESFID(58, ROGUE_FW_GROUP_MAIN, 3), + "Kick RTU: FWCtx 0x%08.8x @ %d, prio: %d" }, + { ROGUE_FW_LOG_CREATESFID(59, ROGUE_FW_GROUP_MAIN, 1), + "RDM finished on context %u" }, + { ROGUE_FW_LOG_CREATESFID(60, ROGUE_FW_GROUP_MAIN, 3), + "Kick SHG: FWCtx 0x%08.8x @ %d, prio: %d" }, + { ROGUE_FW_LOG_CREATESFID(61, ROGUE_FW_GROUP_MAIN, 0), + "SHG finished" }, + { ROGUE_FW_LOG_CREATESFID(62, ROGUE_FW_GROUP_MAIN, 1), + "FBA finished on context %u" }, + { ROGUE_FW_LOG_CREATESFID(63, ROGUE_FW_GROUP_MAIN, 0), + "UFO Checks failed" }, + { ROGUE_FW_LOG_CREATESFID(64, ROGUE_FW_GROUP_MAIN, 1), + "Kill DM%d start" }, + { ROGUE_FW_LOG_CREATESFID(65, ROGUE_FW_GROUP_MAIN, 1), + "Kill DM%d complete" }, + { ROGUE_FW_LOG_CREATESFID(66, ROGUE_FW_GROUP_MAIN, 2), + "FC%u cCCB Woff update = %u" }, + { ROGUE_FW_LOG_CREATESFID(67, ROGUE_FW_GROUP_MAIN, 4), + "Kick RTU: FWCtx 0x%08.8x @ %d, prio: %d, Frame Context: %d" }, + { ROGUE_FW_LOG_CREATESFID(68, ROGUE_FW_GROUP_MAIN, 0), + "GPU init" }, + { ROGUE_FW_LOG_CREATESFID(69, ROGUE_FW_GROUP_MAIN, 1), + "GPU Units init (# mask: 0x%x)" }, + { ROGUE_FW_LOG_CREATESFID(70, ROGUE_FW_GROUP_MAIN, 3), + "Register access cycles: read: %d cycles, write: %d cycles, iterations: %d" }, + { ROGUE_FW_LOG_CREATESFID(71, ROGUE_FW_GROUP_MAIN, 3), + "Register configuration added. Address: 0x%x Value: 0x%x%x" }, + { ROGUE_FW_LOG_CREATESFID(72, ROGUE_FW_GROUP_MAIN, 1), + "Register configuration applied to type %d. (0:pow on, 1:Rascal/dust init, 2-5: TA,3D,CDM,TLA, 6:All)" }, + { ROGUE_FW_LOG_CREATESFID(73, ROGUE_FW_GROUP_MAIN, 0), + "Perform TPC flush." }, + { ROGUE_FW_LOG_CREATESFID(74, ROGUE_FW_GROUP_MAIN, 0), + "GPU has locked up (see HWR logs for more info)" }, + { ROGUE_FW_LOG_CREATESFID(75, ROGUE_FW_GROUP_MAIN, 0), + "HWR has been triggered - GPU has overrun its deadline (see HWR logs)" }, + { ROGUE_FW_LOG_CREATESFID(76, ROGUE_FW_GROUP_MAIN, 0), + "HWR has been triggered - GPU has failed a poll (see HWR logs)" }, + { ROGUE_FW_LOG_CREATESFID(77, ROGUE_FW_GROUP_MAIN, 1), + "Doppler out of memory event for FC %u" }, + { ROGUE_FW_LOG_CREATESFID(78, ROGUE_FW_GROUP_MAIN, 3), + "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(79, ROGUE_FW_GROUP_MAIN, 3), + "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(80, ROGUE_FW_GROUP_MAIN, 1), + "TIMESTAMP -> [0x%08.8x]" }, + { ROGUE_FW_LOG_CREATESFID(81, ROGUE_FW_GROUP_MAIN, 2), + "UFO RMW Updates for FWCtx 0x%08.8x @ %d" }, + { ROGUE_FW_LOG_CREATESFID(82, ROGUE_FW_GROUP_MAIN, 2), + "UFO Update: [0x%08.8x] = 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(83, ROGUE_FW_GROUP_MAIN, 2), + "Kick Null cmd: FWCtx 0x%08.8x @ %d" }, + { ROGUE_FW_LOG_CREATESFID(84, ROGUE_FW_GROUP_MAIN, 2), + "RPM Out of memory! Context 0x%08x, SH requestor %d" }, + { ROGUE_FW_LOG_CREATESFID(85, ROGUE_FW_GROUP_MAIN, 4), + "Discard RTU due to RPM abort: FWCtx 0x%08.8x @ %d, prio: %d, Frame Context: %d" }, + { ROGUE_FW_LOG_CREATESFID(86, ROGUE_FW_GROUP_MAIN, 4), + "Deferring DM%u from running context 0x%08x @ %d (deferred DMs = 0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(87, ROGUE_FW_GROUP_MAIN, 4), + "Deferring DM%u from running context 0x%08x @ %d to let other deferred DMs run (deferred DMs = 0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(88, ROGUE_FW_GROUP_MAIN, 4), + "No longer deferring DM%u from running context = 0x%08x @ %d (deferred DMs = 0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(89, ROGUE_FW_GROUP_MAIN, 3), + "FWCCB for DM%u is full, we will have to wait for space! (Roff = %u, Woff = %u)" }, + { ROGUE_FW_LOG_CREATESFID(90, ROGUE_FW_GROUP_MAIN, 3), + "FWCCB for OSid %u is full, we will have to wait for space! (Roff = %u, Woff = %u)" }, + { ROGUE_FW_LOG_CREATESFID(91, ROGUE_FW_GROUP_MAIN, 1), + "Host Sync Partition marker: %d" }, + { ROGUE_FW_LOG_CREATESFID(92, ROGUE_FW_GROUP_MAIN, 1), + "Host Sync Partition repeat: %d" }, + { ROGUE_FW_LOG_CREATESFID(93, ROGUE_FW_GROUP_MAIN, 1), + "Core clock set to %d Hz" }, + { ROGUE_FW_LOG_CREATESFID(94, ROGUE_FW_GROUP_MAIN, 7), + "Compute Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)" }, + { ROGUE_FW_LOG_CREATESFID(95, ROGUE_FW_GROUP_MAIN, 3), + "Signal check failed, Required Data: 0x%x, Address: 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(96, ROGUE_FW_GROUP_MAIN, 5), + "Signal update, Snoop Filter: %u, MMU Ctx: %u, Signal Id: %u, Signals Base: 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(97, ROGUE_FW_GROUP_MAIN, 4), + "Signalled the previously waiting FWCtx: 0x%08.8x, OSId: %u, Signal Address: 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(98, ROGUE_FW_GROUP_MAIN, 0), + "Compute stalled" }, + { ROGUE_FW_LOG_CREATESFID(99, ROGUE_FW_GROUP_MAIN, 3), + "Compute stalled (Roff = %u, Woff = %u, Size = %u)" }, + { ROGUE_FW_LOG_CREATESFID(100, ROGUE_FW_GROUP_MAIN, 3), + "Compute resumed (Roff = %u, Woff = %u, Size = %u)" }, + { ROGUE_FW_LOG_CREATESFID(101, ROGUE_FW_GROUP_MAIN, 4), + "Signal update notification from the host, PC Physical Address: 0x%08x%08x, Signal Virtual Address: 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(102, ROGUE_FW_GROUP_MAIN, 4), + "Signal update from DM: %u, OSId: %u, PC Physical Address: 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(103, ROGUE_FW_GROUP_MAIN, 1), + "DM: %u signal check failed" }, + { ROGUE_FW_LOG_CREATESFID(104, ROGUE_FW_GROUP_MAIN, 3), + "Kick TDM: FWCtx 0x%08.8x @ %d, prio:%d" }, + { ROGUE_FW_LOG_CREATESFID(105, ROGUE_FW_GROUP_MAIN, 0), + "TDM finished" }, + { ROGUE_FW_LOG_CREATESFID(106, ROGUE_FW_GROUP_MAIN, 4), + "MMU_PM_CAT_BASE_TE[%d]_PIPE[%d]: 0x%08x 0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(107, ROGUE_FW_GROUP_MAIN, 0), + "BRN 54141 HIT" }, + { ROGUE_FW_LOG_CREATESFID(108, ROGUE_FW_GROUP_MAIN, 0), + "BRN 54141 Dummy TA kicked" }, + { ROGUE_FW_LOG_CREATESFID(109, ROGUE_FW_GROUP_MAIN, 0), + "BRN 54141 resume TA" }, + { ROGUE_FW_LOG_CREATESFID(110, ROGUE_FW_GROUP_MAIN, 0), + "BRN 54141 double hit after applying WA" }, + { ROGUE_FW_LOG_CREATESFID(111, ROGUE_FW_GROUP_MAIN, 2), + "BRN 54141 Dummy TA VDM base address: 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(112, ROGUE_FW_GROUP_MAIN, 4), + "Signal check failed, Required Data: 0x%x, Current Data: 0x%x, Address: 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(113, ROGUE_FW_GROUP_MAIN, 2), + "TDM stalled (Roff = %u, Woff = %u)" }, + { ROGUE_FW_LOG_CREATESFID(114, ROGUE_FW_GROUP_MAIN, 1), + "Write Offset update notification for stalled FWCtx 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(115, ROGUE_FW_GROUP_MAIN, 3), + "Changing OSid %d's priority from %u to %u" }, + { ROGUE_FW_LOG_CREATESFID(116, ROGUE_FW_GROUP_MAIN, 0), + "Compute resumed" }, + { ROGUE_FW_LOG_CREATESFID(117, ROGUE_FW_GROUP_MAIN, 7), + "Kick TLA: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(118, ROGUE_FW_GROUP_MAIN, 7), + "Kick TDM: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(119, ROGUE_FW_GROUP_MAIN, 11), + "Kick TA: FWCtx 0x%08.8x @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(120, ROGUE_FW_GROUP_MAIN, 10), + "Kick 3D: FWCtx 0x%08.8x @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(121, ROGUE_FW_GROUP_MAIN, 8), + "Kick 3D TQ: FWCtx 0x%08.8x @ %d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(122, ROGUE_FW_GROUP_MAIN, 6), + "Kick Compute: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, ext:0x%08x, int:0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(123, ROGUE_FW_GROUP_MAIN, 8), + "Kick RTU: FWCtx 0x%08.8x @ %d, Frame Context:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(124, ROGUE_FW_GROUP_MAIN, 7), + "Kick SHG: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(125, ROGUE_FW_GROUP_MAIN, 1), + "Reconfigure CSRM: special coeff support enable %d." }, + { ROGUE_FW_LOG_CREATESFID(127, ROGUE_FW_GROUP_MAIN, 1), + "TA requires max coeff mode, deferring: %d." }, + { ROGUE_FW_LOG_CREATESFID(128, ROGUE_FW_GROUP_MAIN, 1), + "3D requires max coeff mode, deferring: %d." }, + { ROGUE_FW_LOG_CREATESFID(129, ROGUE_FW_GROUP_MAIN, 1), + "Kill DM%d failed" }, + { ROGUE_FW_LOG_CREATESFID(130, ROGUE_FW_GROUP_MAIN, 2), + "Thread Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)" }, + { ROGUE_FW_LOG_CREATESFID(131, ROGUE_FW_GROUP_MAIN, 3), + "Thread Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)" }, + { ROGUE_FW_LOG_CREATESFID(132, ROGUE_FW_GROUP_MAIN, 1), + "DM %d failed to Context Switch on time. Triggered HCS (see HWR logs)." }, + { ROGUE_FW_LOG_CREATESFID(133, ROGUE_FW_GROUP_MAIN, 1), + "HCS changed to %d ms" }, + { ROGUE_FW_LOG_CREATESFID(134, ROGUE_FW_GROUP_MAIN, 4), + "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(135, ROGUE_FW_GROUP_MAIN, 2), + " Phantom %d: USCTiles=%d" }, + { ROGUE_FW_LOG_CREATESFID(136, ROGUE_FW_GROUP_MAIN, 0), + "Isolation grouping is disabled" }, + { ROGUE_FW_LOG_CREATESFID(137, ROGUE_FW_GROUP_MAIN, 1), + "Isolation group configured with a priority threshold of %d" }, + { ROGUE_FW_LOG_CREATESFID(138, ROGUE_FW_GROUP_MAIN, 1), + "OS %d has come online" }, + { ROGUE_FW_LOG_CREATESFID(139, ROGUE_FW_GROUP_MAIN, 1), + "OS %d has gone offline" }, + { ROGUE_FW_LOG_CREATESFID(140, ROGUE_FW_GROUP_MAIN, 4), + "Signalled the previously stalled FWCtx: 0x%08.8x, OSId: %u, Signal Address: 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(141, ROGUE_FW_GROUP_MAIN, 7), + "TDM Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)" }, + { ROGUE_FW_LOG_CREATESFID(142, ROGUE_FW_GROUP_MAIN, 6), + "Reset TDM Queue Read Offset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes 0, Woff = %u, Size = %u)" }, + { ROGUE_FW_LOG_CREATESFID(143, ROGUE_FW_GROUP_MAIN, 5), + "User Mode Queue mismatched stream start: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)" }, + { ROGUE_FW_LOG_CREATESFID(144, ROGUE_FW_GROUP_MAIN, 0), + "GPU deinit" }, + { ROGUE_FW_LOG_CREATESFID(145, ROGUE_FW_GROUP_MAIN, 0), + "GPU units deinit" }, + { ROGUE_FW_LOG_CREATESFID(146, ROGUE_FW_GROUP_MAIN, 2), + "Initialised OS %d with config flags 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(147, ROGUE_FW_GROUP_MAIN, 2), + "UFO limit exceeded %d/%d" }, + { ROGUE_FW_LOG_CREATESFID(148, ROGUE_FW_GROUP_MAIN, 0), + "3D Dummy stencil store" }, + { ROGUE_FW_LOG_CREATESFID(149, ROGUE_FW_GROUP_MAIN, 3), + "Initialised OS %d with config flags 0x%08x and extended config flags 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(150, ROGUE_FW_GROUP_MAIN, 1), + "Unknown Command (eCmdType=0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(151, ROGUE_FW_GROUP_MAIN, 4), + "UFO forced update: FWCtx 0x%08.8x @ %d [0x%08.8x] = 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(152, ROGUE_FW_GROUP_MAIN, 5), + "UFO forced update NOP: FWCtx 0x%08.8x @ %d [0x%08.8x] = 0x%08.8x, reason %d" }, + { ROGUE_FW_LOG_CREATESFID(153, ROGUE_FW_GROUP_MAIN, 3), + "TDM context switch check: Roff %u points to 0x%08x, Match=%u" }, + { ROGUE_FW_LOG_CREATESFID(154, ROGUE_FW_GROUP_MAIN, 6), + "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x" }, + { ROGUE_FW_LOG_CREATESFID(155, ROGUE_FW_GROUP_MAIN, 2), + "FW IRQ # %u @ %u" }, + { ROGUE_FW_LOG_CREATESFID(156, ROGUE_FW_GROUP_MAIN, 3), + "Setting breakpoint: Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u" }, + { ROGUE_FW_LOG_CREATESFID(157, ROGUE_FW_GROUP_MAIN, 3), + "Invalid KCCB setup for OSid %u: KCCB 0x%08x, KCCB Ctrl 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(158, ROGUE_FW_GROUP_MAIN, 3), + "Invalid KCCB cmd (%u) for OSid %u @ KCCB 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(159, ROGUE_FW_GROUP_MAIN, 4), + "FW FAULT: At line %d in file 0x%08x%08x, additional data=0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(160, ROGUE_FW_GROUP_MAIN, 4), + "Invalid breakpoint: MemCtx 0x%08x Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u" }, + { ROGUE_FW_LOG_CREATESFID(161, ROGUE_FW_GROUP_MAIN, 3), + "Discarding invalid SLC flushinval command for OSid %u: DM %u, FWCtx 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(162, ROGUE_FW_GROUP_MAIN, 4), + "Invalid Write Offset update notification from OSid %u to DM %u: FWCtx 0x%08x, MemCtx 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(163, ROGUE_FW_GROUP_MAIN, 4), + "Null FWCtx in KCCB kick cmd for OSid %u: KCCB 0x%08x, ROff %u, WOff %u" }, + { ROGUE_FW_LOG_CREATESFID(164, ROGUE_FW_GROUP_MAIN, 3), + "Checkpoint CCB for OSid %u is full, signalling host for full check state (Roff = %u, Woff = %u)" }, + { ROGUE_FW_LOG_CREATESFID(165, ROGUE_FW_GROUP_MAIN, 8), + "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x chptCCBCtl@0x%x chptCCB@0x%x" }, + { ROGUE_FW_LOG_CREATESFID(166, ROGUE_FW_GROUP_MAIN, 4), + "OSid %d fw state transition request: from %d to %d (0-offline 1-ready 2-active 3-offloading). Status %d (1-ok 0-fail)" }, + { ROGUE_FW_LOG_CREATESFID(167, ROGUE_FW_GROUP_MAIN, 2), + "OSid %u has %u stale commands in its KCCB" }, + { ROGUE_FW_LOG_CREATESFID(168, ROGUE_FW_GROUP_MAIN, 0), + "Applying VCE pause" }, + { ROGUE_FW_LOG_CREATESFID(169, ROGUE_FW_GROUP_MAIN, 3), + "OSid %u KCCB slot %u value updated to %u" }, + { ROGUE_FW_LOG_CREATESFID(170, ROGUE_FW_GROUP_MAIN, 7), + "Unknown KCCB Command: KCCBCtl=0x%08x, KCCB=0x%08x, Roff=%u, Woff=%u, Wrap=%u, Cmd=0x%08x, CmdType=0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(171, ROGUE_FW_GROUP_MAIN, 10), + "Unknown Client CCB Command processing fences: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u" }, + { ROGUE_FW_LOG_CREATESFID(172, ROGUE_FW_GROUP_MAIN, 10), + "Unknown Client CCB Command executing kick: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u" }, + { ROGUE_FW_LOG_CREATESFID(173, ROGUE_FW_GROUP_MAIN, 2), + "Null FWCtx in KCCB kick cmd for OSid %u with WOff %u" }, + { ROGUE_FW_LOG_CREATESFID(174, ROGUE_FW_GROUP_MAIN, 2), + "Discarding invalid SLC flushinval command for OSid %u, FWCtx 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(175, ROGUE_FW_GROUP_MAIN, 3), + "Invalid Write Offset update notification from OSid %u: FWCtx 0x%08x, MemCtx 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(176, ROGUE_FW_GROUP_MAIN, 2), + "Initialised Firmware with config flags 0x%08x and extended config flags 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(177, ROGUE_FW_GROUP_MAIN, 1), + "Set Periodic Hardware Reset Mode: %d" }, + { ROGUE_FW_LOG_CREATESFID(179, ROGUE_FW_GROUP_MAIN, 3), + "PHR mode %d, FW state: 0x%08x, HWR flags: 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(180, ROGUE_FW_GROUP_MAIN, 1), + "PHR mode %d triggered a reset" }, + { ROGUE_FW_LOG_CREATESFID(181, ROGUE_FW_GROUP_MAIN, 2), + "Signal update, Snoop Filter: %u, Signal Id: %u" }, + { ROGUE_FW_LOG_CREATESFID(182, ROGUE_FW_GROUP_MAIN, 1), + "WARNING: Skipping FW KCCB Cmd type %d which is not yet supported on Series8." }, + { ROGUE_FW_LOG_CREATESFID(183, ROGUE_FW_GROUP_MAIN, 4), + "MMU context cache data NULL, but cache flags=0x%x (sync counter=%u, update value=%u) OSId=%u" }, + { ROGUE_FW_LOG_CREATESFID(184, ROGUE_FW_GROUP_MAIN, 5), + "SLC range based flush: Context=%u VAddr=0x%02x%08x, Size=0x%08x, Invalidate=%d" }, + { ROGUE_FW_LOG_CREATESFID(185, ROGUE_FW_GROUP_MAIN, 3), + "FBSC invalidate for Context Set [0x%08x]: Entry mask 0x%08x%08x." }, + { ROGUE_FW_LOG_CREATESFID(186, ROGUE_FW_GROUP_MAIN, 3), + "TDM context switch check: Roff %u was not valid for kick starting at %u, moving back to %u" }, + { ROGUE_FW_LOG_CREATESFID(187, ROGUE_FW_GROUP_MAIN, 2), + "Signal updates: FIFO: %u, Signals: 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(188, ROGUE_FW_GROUP_MAIN, 2), + "Invalid FBSC cmd: FWCtx 0x%08x, MemCtx 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(189, ROGUE_FW_GROUP_MAIN, 0), + "Insert BRN68497 WA blit after TDM Context store." }, + { ROGUE_FW_LOG_CREATESFID(190, ROGUE_FW_GROUP_MAIN, 1), + "UFO Updates for previously finished FWCtx 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(191, ROGUE_FW_GROUP_MAIN, 1), + "RTC with RTA present, %u active render targets" }, + { ROGUE_FW_LOG_CREATESFID(192, ROGUE_FW_GROUP_MAIN, 0), + "Invalid RTA Set-up. The ValidRenderTargets array in RTACtl is Null!" }, + { ROGUE_FW_LOG_CREATESFID(193, ROGUE_FW_GROUP_MAIN, 2), + "Block 0x%x / Counter 0x%x INVALID and ignored" }, + { ROGUE_FW_LOG_CREATESFID(194, ROGUE_FW_GROUP_MAIN, 2), + "ECC fault GPU=0x%08x FW=0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(195, ROGUE_FW_GROUP_MAIN, 1), + "Processing XPU event on DM = %d" }, + { ROGUE_FW_LOG_CREATESFID(196, ROGUE_FW_GROUP_MAIN, 2), + "OSid %u failed to respond to the virtualisation watchdog in time. Timestamp of its last input = %u" }, + { ROGUE_FW_LOG_CREATESFID(197, ROGUE_FW_GROUP_MAIN, 1), + "GPU-%u has locked up (see HWR logs for more info)" }, + { ROGUE_FW_LOG_CREATESFID(198, ROGUE_FW_GROUP_MAIN, 3), + "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(199, ROGUE_FW_GROUP_MAIN, 0), + "GPU has locked up (see HWR logs for more info)" }, + { ROGUE_FW_LOG_CREATESFID(200, ROGUE_FW_GROUP_MAIN, 1), + "Reprocessing outstanding XPU events from cores 0x%02x" }, + { ROGUE_FW_LOG_CREATESFID(201, ROGUE_FW_GROUP_MAIN, 3), + "Secondary XPU event on DM=%d, CoreMask=0x%02x, Raised=0x%02x" }, + { ROGUE_FW_LOG_CREATESFID(202, ROGUE_FW_GROUP_MAIN, 8), + "TDM Queue: Core %u, FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)" }, + { ROGUE_FW_LOG_CREATESFID(203, ROGUE_FW_GROUP_MAIN, 3), + "TDM stalled Core %u (Roff = %u, Woff = %u)" }, + { ROGUE_FW_LOG_CREATESFID(204, ROGUE_FW_GROUP_MAIN, 8), + "Compute Queue: Core %u, FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)" }, + { ROGUE_FW_LOG_CREATESFID(205, ROGUE_FW_GROUP_MAIN, 4), + "Compute stalled core %u (Roff = %u, Woff = %u, Size = %u)" }, + { ROGUE_FW_LOG_CREATESFID(206, ROGUE_FW_GROUP_MAIN, 6), + "User Mode Queue mismatched stream start: Core %u, FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)" }, + { ROGUE_FW_LOG_CREATESFID(207, ROGUE_FW_GROUP_MAIN, 3), + "TDM resumed core %u (Roff = %u, Woff = %u)" }, + { ROGUE_FW_LOG_CREATESFID(208, ROGUE_FW_GROUP_MAIN, 4), + "Compute resumed core %u (Roff = %u, Woff = %u, Size = %u)" }, + { ROGUE_FW_LOG_CREATESFID(209, ROGUE_FW_GROUP_MAIN, 2), + " Updated permission for OSid %u to perform MTS kicks: %u (1 = allowed, 0 = not allowed)" }, + { ROGUE_FW_LOG_CREATESFID(210, ROGUE_FW_GROUP_MAIN, 2), + "Mask = 0x%X, mask2 = 0x%X" }, + { ROGUE_FW_LOG_CREATESFID(211, ROGUE_FW_GROUP_MAIN, 3), + " core %u, reg = %u, mask = 0x%X)" }, + { ROGUE_FW_LOG_CREATESFID(212, ROGUE_FW_GROUP_MAIN, 1), + "ECC fault received from safety bus: 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(213, ROGUE_FW_GROUP_MAIN, 1), + "Safety Watchdog threshold period set to 0x%x clock cycles" }, + { ROGUE_FW_LOG_CREATESFID(214, ROGUE_FW_GROUP_MAIN, 0), + "MTS Safety Event triggered by the safety watchdog." }, + { ROGUE_FW_LOG_CREATESFID(215, ROGUE_FW_GROUP_MAIN, 3), + "DM%d USC tasks range limit 0 - %d, stride %d" }, + { ROGUE_FW_LOG_CREATESFID(216, ROGUE_FW_GROUP_MAIN, 1), + "ECC fault GPU=0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(217, ROGUE_FW_GROUP_MAIN, 0), + "GPU Hardware units reset to prevent transient faults." }, + { ROGUE_FW_LOG_CREATESFID(218, ROGUE_FW_GROUP_MAIN, 2), + "Kick Abort cmd: FWCtx 0x%08.8x @ %d" }, + { ROGUE_FW_LOG_CREATESFID(219, ROGUE_FW_GROUP_MAIN, 7), + "Kick Ray: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(220, ROGUE_FW_GROUP_MAIN, 0), + "Ray finished" }, + { ROGUE_FW_LOG_CREATESFID(221, ROGUE_FW_GROUP_MAIN, 2), + "State of firmware's private data at boot time: %d (0 = uninitialised, 1 = initialised); Fw State Flags = 0x%08X" }, + { ROGUE_FW_LOG_CREATESFID(222, ROGUE_FW_GROUP_MAIN, 2), + "CFI Timeout detected (%d increasing to %d)" }, + { ROGUE_FW_LOG_CREATESFID(223, ROGUE_FW_GROUP_MAIN, 2), + "CFI Timeout detected for FBM (%d increasing to %d)" }, + { ROGUE_FW_LOG_CREATESFID(224, ROGUE_FW_GROUP_MAIN, 0), + "Geom OOM event not allowed" }, + { ROGUE_FW_LOG_CREATESFID(225, ROGUE_FW_GROUP_MAIN, 4), + "Changing OSid %d's priority from %u to %u; Isolation = %u (0 = off; 1 = on)" }, + { ROGUE_FW_LOG_CREATESFID(226, ROGUE_FW_GROUP_MAIN, 2), + "Skipping already executed TA FWCtx 0x%08.8x @ %d" }, + { ROGUE_FW_LOG_CREATESFID(227, ROGUE_FW_GROUP_MAIN, 2), + "Attempt to execute TA FWCtx 0x%08.8x @ %d ahead of time on other GEOM" }, + { ROGUE_FW_LOG_CREATESFID(228, ROGUE_FW_GROUP_MAIN, 8), + "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(229, ROGUE_FW_GROUP_MAIN, 12), + "Kick TA: Kick ID %u FWCtx 0x%08.8x @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(230, ROGUE_FW_GROUP_MAIN, 11), + "Kick 3D: Kick ID %u FWCtx 0x%08.8x @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(231, ROGUE_FW_GROUP_MAIN, 7), + "Kick Compute: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, ext:0x%08x, int:0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(232, ROGUE_FW_GROUP_MAIN, 1), + "TDM finished: Kick ID %u " }, + { ROGUE_FW_LOG_CREATESFID(233, ROGUE_FW_GROUP_MAIN, 1), + "TA finished: Kick ID %u " }, + { ROGUE_FW_LOG_CREATESFID(234, ROGUE_FW_GROUP_MAIN, 3), + "3D finished: Kick ID %u , HWRTData0State=%x, HWRTData1State=%x" }, + { ROGUE_FW_LOG_CREATESFID(235, ROGUE_FW_GROUP_MAIN, 1), + "Compute finished: Kick ID %u " }, + { ROGUE_FW_LOG_CREATESFID(236, ROGUE_FW_GROUP_MAIN, 10), + "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %d, Base 0x%08x%08x. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(237, ROGUE_FW_GROUP_MAIN, 8), + "Kick Ray: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(238, ROGUE_FW_GROUP_MAIN, 1), + "Ray finished: Kick ID %u " }, + + { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_MTS, 2), + "Bg Task DM = %u, counted = %d" }, + { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_MTS, 1), + "Bg Task complete DM = %u" }, + { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_MTS, 3), + "Irq Task DM = %u, Breq = %d, SBIrq = 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_MTS, 1), + "Irq Task complete DM = %u" }, + { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_MTS, 0), + "Kick MTS Bg task DM=All" }, + { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_MTS, 1), + "Kick MTS Irq task DM=%d" }, + { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_MTS, 2), + "Ready queue debug DM = %u, celltype = %d" }, + { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_MTS, 2), + "Ready-to-run debug DM = %u, item = 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_MTS, 3), + "Client command header DM = %u, client CCB = 0x%x, cmd = 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_MTS, 3), + "Ready-to-run debug OSid = %u, DM = %u, item = 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_MTS, 3), + "Ready queue debug DM = %u, celltype = %d, OSid = %u" }, + { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_MTS, 3), + "Bg Task DM = %u, counted = %d, OSid = %u" }, + { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_MTS, 1), + "Bg Task complete DM Bitfield: %u" }, + { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_MTS, 0), + "Irq Task complete." }, + { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_MTS, 7), + "Discarded Command Type: %d OS ID = %d PID = %d context = 0x%08x cccb ROff = 0x%x, due to USC breakpoint hit by OS ID = %d PID = %d." }, + { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_MTS, 4), + "KCCB Slot %u: DM=%u, Cmd=0x%08x, OSid=%u" }, + { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_MTS, 2), + "KCCB Slot %u: Return value %u" }, + { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_MTS, 1), + "Bg Task OSid = %u" }, + { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_MTS, 3), + "KCCB Slot %u: Cmd=0x%08x, OSid=%u" }, + { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_MTS, 1), + "Irq Task (EVENT_STATUS=0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_MTS, 2), + "VZ sideband test, kicked with OSid=%u from MTS, OSid for test=%u" }, + + { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_CLEANUP, 1), + "FwCommonContext [0x%08x] cleaned" }, + { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_CLEANUP, 3), + "FwCommonContext [0x%08x] is busy: ReadOffset = %d, WriteOffset = %d" }, + { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_CLEANUP, 2), + "HWRTData [0x%08x] for DM=%d, received cleanup request" }, + { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_CLEANUP, 3), + "HWRTData [0x%08x] HW Context cleaned for DM%u, executed commands = %d" }, + { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_CLEANUP, 2), + "HWRTData [0x%08x] HW Context for DM%u is busy" }, + { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_CLEANUP, 2), + "HWRTData [0x%08x] HW Context %u cleaned" }, + { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_CLEANUP, 1), + "Freelist [0x%08x] cleaned" }, + { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_CLEANUP, 1), + "ZSBuffer [0x%08x] cleaned" }, + { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_CLEANUP, 3), + "ZSBuffer [0x%08x] is busy: submitted = %d, executed = %d" }, + { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_CLEANUP, 4), + "HWRTData [0x%08x] HW Context for DM%u is busy: submitted = %d, executed = %d" }, + { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_CLEANUP, 2), + "HW Ray Frame data [0x%08x] for DM=%d, received cleanup request" }, + { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_CLEANUP, 3), + "HW Ray Frame Data [0x%08x] cleaned for DM%u, executed commands = %d" }, + { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_CLEANUP, 4), + "HW Ray Frame Data [0x%08x] for DM%u is busy: submitted = %d, executed = %d" }, + { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_CLEANUP, 2), + "HW Ray Frame Data [0x%08x] HW Context %u cleaned" }, + { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_CLEANUP, 1), + "Discarding invalid cleanup request of type 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_CLEANUP, 1), + "Received cleanup request for HWRTData [0x%08x]" }, + { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_CLEANUP, 3), + "HWRTData [0x%08x] HW Context is busy: submitted = %d, executed = %d" }, + { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_CLEANUP, 3), + "HWRTData [0x%08x] HW Context %u cleaned, executed commands = %d" }, + + { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_CSW, 1), + "CDM FWCtx 0x%08.8x needs resume" }, + { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_CSW, 3), + "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_CSW, 1), + "CDM FWCtx shared alloc size load 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_CSW, 0), + "*** CDM FWCtx store complete" }, + { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_CSW, 0), + "*** CDM FWCtx store start" }, + { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_CSW, 0), + "CDM Soft Reset" }, + { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_CSW, 1), + "3D FWCtx 0x%08.8x needs resume" }, + { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_CSW, 1), + "*** 3D FWCtx 0x%08.8x resume" }, + { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_CSW, 0), + "*** 3D context store complete" }, + { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_CSW, 3), + "3D context store pipe state: 0x%08.8x 0x%08.8x 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_CSW, 0), + "*** 3D context store start" }, + { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_CSW, 1), + "*** 3D TQ FWCtx 0x%08.8x resume" }, + { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_CSW, 1), + "TA FWCtx 0x%08.8x needs resume" }, + { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_CSW, 3), + "*** TA FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_CSW, 2), + "TA context shared alloc size store 0x%x, load 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_CSW, 0), + "*** TA context store complete" }, + { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_CSW, 0), + "*** TA context store start" }, + { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_CSW, 3), + "Higher priority context scheduled for DM %u, old prio:%d, new prio:%d" }, + { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_CSW, 2), + "Set FWCtx 0x%x priority to %u" }, + { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_CSW, 2), + "3D context store pipe%d state: 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_CSW, 2), + "3D context resume pipe%d state: 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_CSW, 1), + "SHG FWCtx 0x%08.8x needs resume" }, + { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_CSW, 3), + "*** SHG FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_CSW, 2), + "SHG context shared alloc size store 0x%x, load 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_CSW, 0), + "*** SHG context store complete" }, + { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_CSW, 0), + "*** SHG context store start" }, + { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_CSW, 1), + "Performing TA indirection, last used pipe %d" }, + { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_CSW, 0), + "CDM context store hit ctrl stream terminate. Skip resume." }, + { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_CSW, 4), + "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x, shader state %u" }, + { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_CSW, 2), + "TA PDS/USC state buffer flip (%d->%d)" }, + { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_CSW, 0), + "TA context store hit BRN 52563: vertex store tasks outstanding" }, + { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_CSW, 1), + "TA USC poll failed (USC vertex task count: %d)" }, + { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_CSW, 0), + "TA context store deferred due to BRN 54141." }, + { ROGUE_FW_LOG_CREATESFID(34, ROGUE_FW_GROUP_CSW, 7), + "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u" }, + { ROGUE_FW_LOG_CREATESFID(35, ROGUE_FW_GROUP_CSW, 0), + "*** TDM context store start" }, + { ROGUE_FW_LOG_CREATESFID(36, ROGUE_FW_GROUP_CSW, 0), + "*** TDM context store complete" }, + { ROGUE_FW_LOG_CREATESFID(37, ROGUE_FW_GROUP_CSW, 2), + "TDM context needs resume, header [0x%08.8x, 0x%08.8x]" }, + { ROGUE_FW_LOG_CREATESFID(38, ROGUE_FW_GROUP_CSW, 8), + "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u. Hard Context Switching: %u" }, + { ROGUE_FW_LOG_CREATESFID(39, ROGUE_FW_GROUP_CSW, 3), + "3D context store pipe %2d (%2d) state: 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(40, ROGUE_FW_GROUP_CSW, 3), + "3D context resume pipe %2d (%2d) state: 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(41, ROGUE_FW_GROUP_CSW, 1), + "*** 3D context store start version %d (1=IPP_TILE, 2=ISP_TILE)" }, + { ROGUE_FW_LOG_CREATESFID(42, ROGUE_FW_GROUP_CSW, 3), + "3D context store pipe%d state: 0x%08.8x%08x" }, + { ROGUE_FW_LOG_CREATESFID(43, ROGUE_FW_GROUP_CSW, 3), + "3D context resume pipe%d state: 0x%08.8x%08x" }, + { ROGUE_FW_LOG_CREATESFID(44, ROGUE_FW_GROUP_CSW, 2), + "3D context resume IPP state: 0x%08.8x%08x" }, + { ROGUE_FW_LOG_CREATESFID(45, ROGUE_FW_GROUP_CSW, 1), + "All 3D pipes empty after ISP tile mode store! IPP_status: 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(46, ROGUE_FW_GROUP_CSW, 3), + "TDM context resume pipe%d state: 0x%08.8x%08x" }, + { ROGUE_FW_LOG_CREATESFID(47, ROGUE_FW_GROUP_CSW, 0), + "*** 3D context store start version 4" }, + { ROGUE_FW_LOG_CREATESFID(48, ROGUE_FW_GROUP_CSW, 2), + "Multicore context resume on DM%d active core mask 0x%04.4x" }, + { ROGUE_FW_LOG_CREATESFID(49, ROGUE_FW_GROUP_CSW, 2), + "Multicore context store on DM%d active core mask 0x%04.4x" }, + { ROGUE_FW_LOG_CREATESFID(50, ROGUE_FW_GROUP_CSW, 5), + "TDM context resume Core %d, pipe%d state: 0x%08.8x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(51, ROGUE_FW_GROUP_CSW, 0), + "*** RDM FWCtx store complete" }, + { ROGUE_FW_LOG_CREATESFID(52, ROGUE_FW_GROUP_CSW, 0), + "*** RDM FWCtx store start" }, + { ROGUE_FW_LOG_CREATESFID(53, ROGUE_FW_GROUP_CSW, 1), + "RDM FWCtx 0x%08.8x needs resume" }, + { ROGUE_FW_LOG_CREATESFID(54, ROGUE_FW_GROUP_CSW, 1), + "RDM FWCtx 0x%08.8x resume" }, + + { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_BIF, 3), + "Activate MemCtx=0x%08x BIFreq=%d secure=%d" }, + { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_BIF, 1), + "Deactivate MemCtx=0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_BIF, 1), + "Alloc PC reg %d" }, + { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_BIF, 2), + "Grab reg set %d refcount now %d" }, + { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_BIF, 2), + "Ungrab reg set %d refcount now %d" }, + { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_BIF, 6), + "Setup reg=%d BIFreq=%d, expect=0x%08x%08x, actual=0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_BIF, 2), + "Trust enabled:%d, for BIFreq=%d" }, + { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_BIF, 9), + "BIF Tiling Cfg %d base 0x%08x%08x len 0x%08x%08x enable %d stride %d --> 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_BIF, 4), + "Wrote the Value %d to OSID0, Cat Base %d, Register's contents are now 0x%08x 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_BIF, 3), + "Wrote the Value %d to OSID1, Context %d, Register's contents are now 0x%04x" }, + { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_BIF, 7), + "ui32OSid = %u, Catbase = %u, Reg Address = 0x%x, Reg index = %u, Bitshift index = %u, Val = 0x%08x%08x" }, \ + { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_BIF, 5), + "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, BIFREQ %u" }, + { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_BIF, 1), + "Unmap GPU memory (event status 0x%x)" }, + { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_BIF, 3), + "Activate MemCtx=0x%08x DM=%d secure=%d" }, + { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_BIF, 6), + "Setup reg=%d DM=%d, expect=0x%08x%08x, actual=0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_BIF, 4), + "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u" }, + { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_BIF, 2), + "Trust enabled:%d, for DM=%d" }, + { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_BIF, 5), + "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, DM %u" }, + { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_BIF, 6), + "Setup register set=%d DM=%d, PC address=0x%08x%08x, OSid=%u, NewPCRegRequired=%d" }, + { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_BIF, 3), + "Alloc PC set %d as register range [%u - %u]" }, + + { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_MISC, 1), + "GPIO write 0x%02x" }, + { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_MISC, 1), + "GPIO read 0x%02x" }, + { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_MISC, 0), + "GPIO enabled" }, + { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_MISC, 0), + "GPIO disabled" }, + { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_MISC, 1), + "GPIO status=%d (0=OK, 1=Disabled)" }, + { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_MISC, 2), + "GPIO_AP: Read address=0x%02x (%d byte(s))" }, + { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_MISC, 2), + "GPIO_AP: Write address=0x%02x (%d byte(s))" }, + { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_MISC, 0), + "GPIO_AP timeout!" }, + { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_MISC, 1), + "GPIO_AP error. GPIO status=%d (0=OK, 1=Disabled)" }, + { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_MISC, 1), + "GPIO already read 0x%02x" }, + { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_MISC, 2), + "SR: Check buffer %d available returned %d" }, + { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_MISC, 1), + "SR: Waiting for buffer %d" }, + { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_MISC, 2), + "SR: Timeout waiting for buffer %d (after %d ticks)" }, + { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_MISC, 2), + "SR: Skip frame check for strip %d returned %d (0=No skip, 1=Skip frame)" }, + { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_MISC, 1), + "SR: Skip remaining strip %d in frame" }, + { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_MISC, 1), + "SR: Inform HW that strip %d is a new frame" }, + { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_MISC, 1), + "SR: Timeout waiting for INTERRUPT_FRAME_SKIP (after %d ticks)" }, + { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_MISC, 1), + "SR: Strip mode is %d" }, + { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_MISC, 1), + "SR: Strip Render start (strip %d)" }, + { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_MISC, 1), + "SR: Strip Render complete (buffer %d)" }, + { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_MISC, 1), + "SR: Strip Render fault (buffer %d)" }, + { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_MISC, 1), + "TRP state: %d" }, + { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_MISC, 1), + "TRP failure: %d" }, + { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_MISC, 1), + "SW TRP State: %d" }, + { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_MISC, 1), + "SW TRP failure: %d" }, + { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_MISC, 1), + "HW kick event (%u)" }, + { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_MISC, 4), + "GPU core (%u/%u): checksum 0x%08x vs. 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_MISC, 6), + "GPU core (%u/%u), unit (%u,%u): checksum 0x%08x vs. 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_MISC, 6), + "HWR: Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_MISC, 4), + "HWR: USC Core%u, ui32TotalSlotsUsedByDM=0x%08x, psDMHWCtl->ui32USCSlotsUsedByDM=0x%08x, bHWRNeeded=%u" }, + { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_MISC, 6), + "HWR: USC Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x" }, + + { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_PM, 10), + "ALIST%d SP = %u, MLIST%d SP = %u (VCE 0x%08x%08x, TE 0x%08x%08x, ALIST 0x%08x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_PM, 8), + "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d, mmu:%d" }, + { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_PM, 14), + "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-3D-Base: 0x%08x%08x (SP = %u, 4PT = %u)" }, + { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_PM, 14), + "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-TA-Base: 0x%08x%08x (SP = %u, 4PT = %u)" }, + { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_PM, 5), + "Freelist grow completed [0x%08x]: added pages 0x%08x, total pages 0x%08x, new DevVirtAddr 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_PM, 1), + "Grow for freelist ID=0x%08x denied by host" }, + { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_PM, 5), + "Freelist update completed [0x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_PM, 1), + "Reconstruction of freelist ID=0x%08x failed" }, + { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_PM, 2), + "Ignored attempt to pause or unpause the DM while there is no relevant operation in progress (0-TA,1-3D): %d, operation(0-unpause, 1-pause): %d" }, + { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_PM, 2), + "Force free 3D Context memory, FWCtx: 0x%08x, status(1:success, 0:fail): %d" }, + { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_PM, 1), + "PM pause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_PM, 1), + "PM unpause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_PM, 1), + "PM pause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_PM, 1), + "PM unpause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_PM, 1), + "PM ALLOC/DALLOC change was not actioned: PM_PAGE_MANAGEOP_STATUS=0x%x" }, + { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_PM, 7), + "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d" }, + { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_PM, 10), + "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)" }, + { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_PM, 10), + "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)" }, + { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_PM, 7), + "Freelist update completed [0x%08x / FL State 0x%08x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_PM, 7), + "Freelist update failed [0x%08x / FL State 0x%08x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_PM, 10), + "UFL-3D-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)" }, + { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_PM, 10), + "UFL-TA-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)" }, + { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_PM, 5), + "Freelist 0x%08x base address from HW: 0x%02x%08x (expected value: 0x%02x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_PM, 5), + "Analysis of FL grow: Pause=(%u,%u) Paused+Valid(%u,%u) PMStateBuffer=0x%x" }, + { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_PM, 5), + "Attempt FL grow for FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u" }, + { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_PM, 5), + "Deferring FL grow for non-loaded FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u" }, + { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_PM, 4), + "Is GEOM: %d, finished: %d (HWRTData = 0x%08x, MemCtx = 0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_PM, 1), + "3D Timeout Now for FWCtx 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_PM, 1), + "GEOM PM Recycle for FWCtx 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_PM, 1), + "PM running primary config (Core %d)" }, + { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_PM, 1), + "PM running secondary config (Core %d)" }, + { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_PM, 1), + "PM running tertiary config (Core %d)" }, + { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_PM, 1), + "PM running quaternary config (Core %d)" }, + + { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_RPM, 3), + "Global link list dynamic page count: vertex 0x%x, varying 0x%x, node 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_RPM, 3), + "Global link list static page count: vertex 0x%x, varying 0x%x, node 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_RPM, 0), + "RPM request failed. Waiting for freelist grow." }, + { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_RPM, 0), + "RPM request failed. Aborting the current frame." }, + { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_RPM, 1), + "RPM waiting for pending grow on freelist 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_RPM, 3), + "Request freelist grow [0x%08x] current pages %d, grow size %d" }, + { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_RPM, 2), + "Freelist load: SHF = 0x%08x, SHG = 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_RPM, 2), + "SHF FPL register: 0x%08x.0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_RPM, 2), + "SHG FPL register: 0x%08x.0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_RPM, 5), + "Kernel requested RPM grow on freelist (type %d) at 0x%08x from current size %d to new size %d, RPM restart: %d (1=Yes)" }, + { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_RPM, 0), + "Restarting SHG" }, + { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_RPM, 0), + "Grow failed, aborting the current frame." }, + { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_RPM, 1), + "RPM abort complete on HWFrameData [0x%08x]." }, + { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_RPM, 1), + "RPM freelist cleanup [0x%08x] requires abort to proceed." }, + { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_RPM, 2), + "RPM page table base register: 0x%08x.0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_RPM, 0), + "Issuing RPM abort." }, + { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_RPM, 0), + "RPM OOM received but toggle bits indicate free pages available" }, + { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_RPM, 0), + "RPM hardware timeout. Unable to process OOM event." }, + { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_RPM, 5), + "SHF FL (0x%08x) load, FPL: 0x%08x.0x%08x, roff: 0x%08x, woff: 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_RPM, 5), + "SHG FL (0x%08x) load, FPL: 0x%08x.0x%08x, roff: 0x%08x, woff: 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_RPM, 3), + "SHF FL (0x%08x) store, roff: 0x%08x, woff: 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_RPM, 3), + "SHG FL (0x%08x) store, roff: 0x%08x, woff: 0x%08x" }, + + { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_RTD, 2), + "3D RTData 0x%08x finished on HW context %u" }, + { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_RTD, 2), + "3D RTData 0x%08x ready on HW context %u" }, + { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_RTD, 4), + "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %d, global: %d, mmu: %d" }, + { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_RTD, 2), + "Loading VFP table 0x%08x%08x for 3D" }, + { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_RTD, 2), + "Loading VFP table 0x%08x%08x for TA" }, + { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_RTD, 10), + "Load Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u" }, + { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_RTD, 0), + "Perform VHEAP table store" }, + { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_RTD, 2), + "RTData 0x%08x: found match in Context=%d: Load=No, Store=No" }, + { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_RTD, 2), + "RTData 0x%08x: found NULL in Context=%d: Load=Yes, Store=No" }, + { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_RTD, 3), + "RTData 0x%08x: found state 3D finished (0x%08x) in Context=%d: Load=Yes, Store=Yes" }, + { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_RTD, 3), + "RTData 0x%08x: found state TA finished (0x%08x) in Context=%d: Load=Yes, Store=Yes" }, + { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_RTD, 5), + "Loading stack-pointers for %d (0:MidTA,1:3D) on context %d, MLIST = 0x%08x, ALIST = 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_RTD, 10), + "Store Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u" }, + { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_RTD, 2), + "TA RTData 0x%08x finished on HW context %u" }, + { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_RTD, 2), + "TA RTData 0x%08x loaded on HW context %u" }, + { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_RTD, 12), + "Store Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u" }, + { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_RTD, 12), + "Load Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u" }, + { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_RTD, 1), + "Freelist 0x%x RESET!!!!!!!!" }, + { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_RTD, 5), + "Freelist 0x%x stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u" }, + { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_RTD, 3), + "Request reconstruction of Freelist 0x%x type: %d (0:local,1:global,2:mmu) on HW context %u" }, + { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_RTD, 1), + "Freelist reconstruction ACK from host (HWR state :%u)" }, + { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_RTD, 0), + "Freelist reconstruction completed" }, + { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_RTD, 3), + "TA RTData 0x%08x loaded on HW context %u HWRTDataNeedsLoading=%d" }, + { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_RTD, 3), + "TE Region headers base 0x%08x%08x (RGNHDR Init: %d)" }, + { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_RTD, 8), + "TA Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_RTD, 2), + "3D RTData 0x%08x loaded on HW context %u" }, + { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_RTD, 4), + "3D Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x (MemCtx 0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_RTD, 2), + "Restarting TA after partial render, HWRTData0State=0x%x, HWRTData1State=0x%x" }, + { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_RTD, 3), + "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %d, global: %d" }, + { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_RTD, 12), + "Store Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u" }, + { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_RTD, 12), + "Load Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u" }, + { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_RTD, 5), + "3D Buffers: FWCtx 0x%08x, parent RT 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_RTD, 7), + "TA Buffers: FWCtx 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(34, ROGUE_FW_GROUP_RTD, 4), + "3D Buffers: FWCtx 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(35, ROGUE_FW_GROUP_RTD, 6), + "Load Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u)" }, + { ROGUE_FW_LOG_CREATESFID(36, ROGUE_FW_GROUP_RTD, 1), + "TA RTData 0x%08x marked as killed." }, + { ROGUE_FW_LOG_CREATESFID(37, ROGUE_FW_GROUP_RTD, 1), + "3D RTData 0x%08x marked as killed." }, + { ROGUE_FW_LOG_CREATESFID(38, ROGUE_FW_GROUP_RTD, 1), + "RTData 0x%08x will be killed after TA restart." }, + { ROGUE_FW_LOG_CREATESFID(39, ROGUE_FW_GROUP_RTD, 3), + "RTData 0x%08x Render State Buffer 0x%02x%08x will be reset." }, + { ROGUE_FW_LOG_CREATESFID(40, ROGUE_FW_GROUP_RTD, 3), + "GEOM RTData 0x%08x using Render State Buffer 0x%02x%08x." }, + { ROGUE_FW_LOG_CREATESFID(41, ROGUE_FW_GROUP_RTD, 3), + "FRAG RTData 0x%08x using Render State Buffer 0x%02x%08x." }, + + { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_SPM, 0), + "Force Z-Load for partial render" }, + { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_SPM, 0), + "Force Z-Store for partial render" }, + { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_SPM, 1), + "3D MemFree: Local FL 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_SPM, 1), + "3D MemFree: MMU FL 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_SPM, 1), + "3D MemFree: Global FL 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_SPM, 6), + "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x, HardwareSync Fence [0x%08.8x] is 0x%08.8x requires 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_SPM, 3), + "OOM TA_cmd=0x%08x, U-FL 0x%08x, N-FL 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_SPM, 5), + "OOM TA_cmd=0x%08x, OOM MMU:%d, U-FL 0x%08x, N-FL 0x%08x, MMU-FL 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_SPM, 0), + "Partial render avoided" }, + { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_SPM, 0), + "Partial render discarded" }, + { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_SPM, 0), + "Partial Render finished" }, + { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_SPM, 0), + "SPM Owner = 3D-BG" }, + { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_SPM, 0), + "SPM Owner = 3D-IRQ" }, + { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_SPM, 0), + "SPM Owner = NONE" }, + { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_SPM, 0), + "SPM Owner = TA-BG" }, + { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_SPM, 0), + "SPM Owner = TA-IRQ" }, + { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_SPM, 2), + "ZStore address 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_SPM, 2), + "SStore address 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_SPM, 2), + "ZLoad address 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_SPM, 2), + "SLoad address 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_SPM, 0), + "No deferred ZS Buffer provided" }, + { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_SPM, 1), + "ZS Buffer successfully populated (ID=0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_SPM, 1), + "No need to populate ZS Buffer (ID=0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_SPM, 1), + "ZS Buffer successfully unpopulated (ID=0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_SPM, 1), + "No need to unpopulate ZS Buffer (ID=0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_SPM, 1), + "Send ZS-Buffer backing request to host (ID=0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_SPM, 1), + "Send ZS-Buffer unbacking request to host (ID=0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_SPM, 1), + "Don't send ZS-Buffer backing request. Previous request still pending (ID=0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_SPM, 1), + "Don't send ZS-Buffer unbacking request. Previous request still pending (ID=0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_SPM, 1), + "Partial Render waiting for ZBuffer to be backed (ID=0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_SPM, 1), + "Partial Render waiting for SBuffer to be backed (ID=0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_SPM, 0), + "SPM State = none" }, + { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_SPM, 0), + "SPM State = PR blocked" }, + { ROGUE_FW_LOG_CREATESFID(34, ROGUE_FW_GROUP_SPM, 0), + "SPM State = wait for grow" }, + { ROGUE_FW_LOG_CREATESFID(35, ROGUE_FW_GROUP_SPM, 0), + "SPM State = wait for HW" }, + { ROGUE_FW_LOG_CREATESFID(36, ROGUE_FW_GROUP_SPM, 0), + "SPM State = PR running" }, + { ROGUE_FW_LOG_CREATESFID(37, ROGUE_FW_GROUP_SPM, 0), + "SPM State = PR avoided" }, + { ROGUE_FW_LOG_CREATESFID(38, ROGUE_FW_GROUP_SPM, 0), + "SPM State = PR executed" }, + { ROGUE_FW_LOG_CREATESFID(39, ROGUE_FW_GROUP_SPM, 2), + "3DMemFree matches freelist 0x%08x (FL type = %u)" }, + { ROGUE_FW_LOG_CREATESFID(40, ROGUE_FW_GROUP_SPM, 0), + "Raise the 3DMemFreeDetected flag" }, + { ROGUE_FW_LOG_CREATESFID(41, ROGUE_FW_GROUP_SPM, 1), + "Wait for pending grow on Freelist 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(42, ROGUE_FW_GROUP_SPM, 1), + "ZS Buffer failed to be populated (ID=0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(43, ROGUE_FW_GROUP_SPM, 5), + "Grow update inconsistency: FL addr: 0x%02x%08x, curr pages: %u, ready: %u, new: %u" }, + { ROGUE_FW_LOG_CREATESFID(44, ROGUE_FW_GROUP_SPM, 4), + "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u, SP : %u" }, + { ROGUE_FW_LOG_CREATESFID(45, ROGUE_FW_GROUP_SPM, 5), + "Received grow update, FL addr: 0x%02x%08x, current pages: %u, ready pages: %u, threshold: %u" }, + { ROGUE_FW_LOG_CREATESFID(46, ROGUE_FW_GROUP_SPM, 1), + "No deferred partial render FW (Type=%d) Buffer provided" }, + { ROGUE_FW_LOG_CREATESFID(47, ROGUE_FW_GROUP_SPM, 1), + "No need to populate PR Buffer (ID=0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(48, ROGUE_FW_GROUP_SPM, 1), + "No need to unpopulate PR Buffer (ID=0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(49, ROGUE_FW_GROUP_SPM, 1), + "Send PR Buffer backing request to host (ID=0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(50, ROGUE_FW_GROUP_SPM, 1), + "Send PR Buffer unbacking request to host (ID=0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(51, ROGUE_FW_GROUP_SPM, 1), + "Don't send PR Buffer backing request. Previous request still pending (ID=0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(52, ROGUE_FW_GROUP_SPM, 1), + "Don't send PR Buffer unbacking request. Previous request still pending (ID=0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(53, ROGUE_FW_GROUP_SPM, 2), + "Partial Render waiting for Buffer %d type to be backed (ID=0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(54, ROGUE_FW_GROUP_SPM, 4), + "Received grow update, FL addr: 0x%02x%08x, new pages: %u, ready pages: %u" }, + { ROGUE_FW_LOG_CREATESFID(66, ROGUE_FW_GROUP_SPM, 3), + "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(67, ROGUE_FW_GROUP_SPM, 3), + "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u" }, + { ROGUE_FW_LOG_CREATESFID(68, ROGUE_FW_GROUP_SPM, 3), + "OOM TA/3D PR deadlock unblocked reordering DM%d runlist head from Context 0x%08x to 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(69, ROGUE_FW_GROUP_SPM, 0), + "SPM State = PR force free" }, + + { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_POW, 4), + "Check Pow state DM%d int: 0x%x, ext: 0x%x, pow flags: 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_POW, 3), + "GPU idle (might be powered down). Pow state int: 0x%x, ext: 0x%x, flags: 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_POW, 3), + "OS requested pow off (forced = %d), DM%d, pow flags: 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_POW, 4), + "Initiate powoff query. Inactive DMs: %d %d %d %d" }, + { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_POW, 2), + "Any RD-DM pending? %d, Any RD-DM Active? %d" }, + { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_POW, 3), + "GPU ready to be powered down. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_POW, 2), + "HW Request On(1)/Off(0): %d, Units: 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_POW, 2), + "Request to change num of dusts to %d (Power flags=%d)" }, + { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_POW, 2), + "Changing number of dusts from %d to %d" }, + { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_POW, 0), + "Sidekick init" }, + { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_POW, 1), + "Rascal+Dusts init (# dusts mask: 0x%x)" }, + { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_POW, 0), + "Initiate powoff query for RD-DMs." }, + { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_POW, 0), + "Initiate powoff query for TLA-DM." }, + { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_POW, 2), + "Any RD-DM pending? %d, Any RD-DM Active? %d" }, + { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_POW, 2), + "TLA-DM pending? %d, TLA-DM Active? %d" }, + { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_POW, 1), + "Request power up due to BRN37270. Pow stat int: 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_POW, 3), + "Cancel power off request int: 0x%x, ext: 0x%x, pow flags: 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_POW, 1), + "OS requested forced IDLE, pow flags: 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_POW, 1), + "OS cancelled forced IDLE, pow flags: 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_POW, 3), + "Idle timer start. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_POW, 3), + "Cancel idle timer. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_POW, 2), + "Active PM latency set to %dms. Core clock: %d Hz" }, + { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_POW, 2), + "Compute cluster mask change to 0x%x, %d dusts powered." }, + { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_POW, 0), + "Null command executed, repeating initiate powoff query for RD-DMs." }, + { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_POW, 1), + "Power monitor: Estimate of dynamic energy %u" }, + { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_POW, 3), + "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_POW, 2), + "Proactive DVFS: New deadline, time = 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_POW, 2), + "Proactive DVFS: New workload, cycles = 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_POW, 1), + "Proactive DVFS: Proactive frequency calculated = %u" }, + { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_POW, 1), + "Proactive DVFS: Reactive utilisation = %u percent" }, + { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_POW, 2), + "Proactive DVFS: Reactive frequency calculated = %u.%u" }, + { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_POW, 1), + "Proactive DVFS: OPP Point Sent = 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(34, ROGUE_FW_GROUP_POW, 2), + "Proactive DVFS: Deadline removed = 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(35, ROGUE_FW_GROUP_POW, 2), + "Proactive DVFS: Workload removed = 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(36, ROGUE_FW_GROUP_POW, 1), + "Proactive DVFS: Throttle to a maximum = 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(37, ROGUE_FW_GROUP_POW, 0), + "Proactive DVFS: Failed to pass OPP point via GPIO." }, + { ROGUE_FW_LOG_CREATESFID(38, ROGUE_FW_GROUP_POW, 0), + "Proactive DVFS: Invalid node passed to function." }, + { ROGUE_FW_LOG_CREATESFID(39, ROGUE_FW_GROUP_POW, 1), + "Proactive DVFS: Guest OS attempted to do a privileged action. OSid = %u" }, + { ROGUE_FW_LOG_CREATESFID(40, ROGUE_FW_GROUP_POW, 1), + "Proactive DVFS: Unprofiled work started. Total unprofiled work present: %u" }, + { ROGUE_FW_LOG_CREATESFID(41, ROGUE_FW_GROUP_POW, 1), + "Proactive DVFS: Unprofiled work finished. Total unprofiled work present: %u" }, + { ROGUE_FW_LOG_CREATESFID(42, ROGUE_FW_GROUP_POW, 0), + "Proactive DVFS: Disabled: Not enabled by host." }, + { ROGUE_FW_LOG_CREATESFID(43, ROGUE_FW_GROUP_POW, 2), + "HW Request Completed(1)/Aborted(0): %d, Ticks: %d" }, + { ROGUE_FW_LOG_CREATESFID(44, ROGUE_FW_GROUP_POW, 1), + "Allowed number of dusts is %d due to BRN59042." }, + { ROGUE_FW_LOG_CREATESFID(45, ROGUE_FW_GROUP_POW, 3), + "Host timed out while waiting for a forced idle state. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(46, ROGUE_FW_GROUP_POW, 5), + "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x, Fence Counters: Check: %u - Update: %u" }, + { ROGUE_FW_LOG_CREATESFID(47, ROGUE_FW_GROUP_POW, 2), + "Proactive DVFS: OPP Point Sent = 0x%x, Success = 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(48, ROGUE_FW_GROUP_POW, 0), + "Proactive DVFS: GPU transitioned to idle" }, + { ROGUE_FW_LOG_CREATESFID(49, ROGUE_FW_GROUP_POW, 0), + "Proactive DVFS: GPU transitioned to active" }, + { ROGUE_FW_LOG_CREATESFID(50, ROGUE_FW_GROUP_POW, 1), + "Power counter dumping: Data truncated writing register %u. Buffer too small." }, + { ROGUE_FW_LOG_CREATESFID(51, ROGUE_FW_GROUP_POW, 0), + "Power controller returned ABORT for last request so retrying." }, + { ROGUE_FW_LOG_CREATESFID(52, ROGUE_FW_GROUP_POW, 2), + "Discarding invalid power request: type 0x%x, DM %u" }, + { ROGUE_FW_LOG_CREATESFID(53, ROGUE_FW_GROUP_POW, 2), + "Detected attempt to cancel forced idle while not forced idle (pow state 0x%x, pow flags 0x%x)" }, + { ROGUE_FW_LOG_CREATESFID(54, ROGUE_FW_GROUP_POW, 2), + "Detected attempt to force power off while not forced idle (pow state 0x%x, pow flags 0x%x)" }, + { ROGUE_FW_LOG_CREATESFID(55, ROGUE_FW_GROUP_POW, 1), + "Detected attempt to change dust count while not forced idle (pow state 0x%x)" }, + { ROGUE_FW_LOG_CREATESFID(56, ROGUE_FW_GROUP_POW, 3), + "Power monitor: Type = %d (0 = power, 1 = energy), Estimate result = 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(57, ROGUE_FW_GROUP_POW, 2), + "Conflicting clock frequency range: OPP min = %u, max = %u" }, + { ROGUE_FW_LOG_CREATESFID(58, ROGUE_FW_GROUP_POW, 1), + "Proactive DVFS: Set floor to a minimum = 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(59, ROGUE_FW_GROUP_POW, 2), + "OS requested pow off (forced = %d), pow flags: 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(60, ROGUE_FW_GROUP_POW, 1), + "Discarding invalid power request: type 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(61, ROGUE_FW_GROUP_POW, 3), + "Request to change SPU power state mask from 0x%x to 0x%x. Pow flags: 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(62, ROGUE_FW_GROUP_POW, 2), + "Changing SPU power state mask from 0x%x to 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(63, ROGUE_FW_GROUP_POW, 1), + "Detected attempt to change SPU power state mask while not forced idle (pow state 0x%x)" }, + { ROGUE_FW_LOG_CREATESFID(64, ROGUE_FW_GROUP_POW, 1), + "Invalid SPU power mask 0x%x! Changing to 1" }, + { ROGUE_FW_LOG_CREATESFID(65, ROGUE_FW_GROUP_POW, 2), + "Proactive DVFS: Send OPP %u with clock divider value %u" }, + { ROGUE_FW_LOG_CREATESFID(66, ROGUE_FW_GROUP_POW, 0), + "PPA block started in perf validation mode." }, + { ROGUE_FW_LOG_CREATESFID(67, ROGUE_FW_GROUP_POW, 1), + "Reset PPA block state %u (1=reset, 0=recalculate)." }, + { ROGUE_FW_LOG_CREATESFID(68, ROGUE_FW_GROUP_POW, 1), + "Power controller returned ABORT for Core-%d last request so retrying." }, + { ROGUE_FW_LOG_CREATESFID(69, ROGUE_FW_GROUP_POW, 3), + "HW Request On(1)/Off(0): %d, Units: 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(70, ROGUE_FW_GROUP_POW, 5), + "Request to change SPU power state mask from 0x%x to 0x%x and RAC from 0x%x to 0x%x. Pow flags: 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(71, ROGUE_FW_GROUP_POW, 4), + "Changing SPU power state mask from 0x%x to 0x%x and RAC from 0x%x to 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(72, ROGUE_FW_GROUP_POW, 2), + "RAC pending? %d, RAC Active? %d" }, + { ROGUE_FW_LOG_CREATESFID(73, ROGUE_FW_GROUP_POW, 0), + "Initiate powoff query for RAC." }, + + { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_HWR, 2), + "Lockup detected on DM%d, FWCtx: 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_HWR, 3), + "Reset fw state for DM%d, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_HWR, 0), + "Reset HW" }, + { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_HWR, 0), + "Lockup recovered." }, + { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_HWR, 2), + "Lock-up DM%d FWCtx: 0x%08.8x" }, + { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_HWR, 4), + "Lockup detected: GLB(%d->%d), PER-DM(0x%08x->0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_HWR, 3), + "Early fault detection: GLB(%d->%d), PER-DM(0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_HWR, 3), + "Hold scheduling due lockup: GLB(%d), PER-DM(0x%08x->0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_HWR, 4), + "False lockup detected: GLB(%d->%d), PER-DM(0x%08x->0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_HWR, 4), + "BRN37729: GLB(%d->%d), PER-DM(0x%08x->0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_HWR, 3), + "Freelists reconstructed: GLB(%d->%d), PER-DM(0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_HWR, 4), + "Reconstructing freelists: %u (0-No, 1-Yes): GLB(%d->%d), PER-DM(0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_HWR, 3), + "HW poll %u (0-Unset 1-Set) failed (reg:0x%08x val:0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_HWR, 2), + "Discarded cmd on DM%u FWCtx=0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_HWR, 6), + "Discarded cmd on DM%u (reason=%u) HWRTData=0x%08x (st: %d), FWCtx 0x%08x @ %d" }, + { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_HWR, 2), + "PM fence WA could not be applied, Valid TA Setup: %d, RD powered off: %d" }, + { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_HWR, 5), + "FL snapshot RTD 0x%08.8x - local (0x%08.8x): %d, global (0x%08.8x): %d" }, + { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_HWR, 8), + "FL check RTD 0x%08.8x, discard: %d - local (0x%08.8x): s%d?=c%d, global (0x%08.8x): s%d?=c%d" }, + { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_HWR, 2), + "FL reconstruction 0x%08.8x c%d" }, + { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_HWR, 3), + "3D check: missing TA FWCtx 0x%08.8x @ %d, RTD 0x%08x." }, + { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_HWR, 2), + "Reset HW (mmu:%d, extmem: %d)" }, + { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_HWR, 4), + "Zero TA caches for FWCtx: 0x%08.8x (TPC addr: 0x%08x%08x, size: %d bytes)" }, + { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_HWR, 2), + "Recovery DM%u: Freelists reconstructed. New R-Flags=0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_HWR, 5), + "Recovery DM%u: FWCtx 0x%08x skipped to command @ %u. PR=%u. New R-Flags=0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_HWR, 1), + "Recovery DM%u: DM fully recovered" }, + { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_HWR, 2), + "DM%u: Hold scheduling due to R-Flag = 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_HWR, 0), + "Analysis: Need freelist reconstruction" }, + { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_HWR, 2), + "Analysis DM%u: Lockup FWCtx: 0x%08.8x. Need to skip to next command" }, + { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_HWR, 2), + "Analysis DM%u: Lockup while TA is OOM FWCtx: 0x%08.8x. Need to skip to next command" }, + { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_HWR, 2), + "Analysis DM%u: Lockup while partial render FWCtx: 0x%08.8x. Need PR cleanup" }, + { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_HWR, 0), + "GPU has locked up" }, + { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_HWR, 1), + "DM%u ready for HWR" }, + { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_HWR, 2), + "Recovery DM%u: Updated Recovery counter. New R-Flags=0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(34, ROGUE_FW_GROUP_HWR, 1), + "Analysis: BRN37729 detected, reset TA and re-kicked 0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(35, ROGUE_FW_GROUP_HWR, 1), + "DM%u timed out" }, + { ROGUE_FW_LOG_CREATESFID(36, ROGUE_FW_GROUP_HWR, 1), + "RGX_CR_EVENT_STATUS=0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(37, ROGUE_FW_GROUP_HWR, 2), + "DM%u lockup falsely detected, R-Flags=0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(38, ROGUE_FW_GROUP_HWR, 0), + "GPU has overrun its deadline" }, + { ROGUE_FW_LOG_CREATESFID(39, ROGUE_FW_GROUP_HWR, 0), + "GPU has failed a poll" }, + { ROGUE_FW_LOG_CREATESFID(40, ROGUE_FW_GROUP_HWR, 2), + "RGX DM%u phase count=0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(41, ROGUE_FW_GROUP_HWR, 2), + "Reset HW (loop:%d, poll failures: 0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(42, ROGUE_FW_GROUP_HWR, 1), + "MMU fault event: 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(43, ROGUE_FW_GROUP_HWR, 1), + "BIF1 page fault detected (Bank1 MMU Status: 0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(44, ROGUE_FW_GROUP_HWR, 1), + "Fast CRC Failed. Proceeding to full register checking (DM: %u)." }, + { ROGUE_FW_LOG_CREATESFID(45, ROGUE_FW_GROUP_HWR, 2), + "Meta MMU page fault detected (Meta MMU Status: 0x%08x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(46, ROGUE_FW_GROUP_HWR, 2), + "Fast CRC Check result for DM%u is HWRNeeded=%u" }, + { ROGUE_FW_LOG_CREATESFID(47, ROGUE_FW_GROUP_HWR, 2), + "Full Signature Check result for DM%u is HWRNeeded=%u" }, + { ROGUE_FW_LOG_CREATESFID(48, ROGUE_FW_GROUP_HWR, 3), + "Final result for DM%u is HWRNeeded=%u with HWRChecksToGo=%u" }, + { ROGUE_FW_LOG_CREATESFID(49, ROGUE_FW_GROUP_HWR, 3), + "USC Slots result for DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d" }, + { ROGUE_FW_LOG_CREATESFID(50, ROGUE_FW_GROUP_HWR, 2), + "Deadline counter for DM%u is HWRDeadline=%u" }, + { ROGUE_FW_LOG_CREATESFID(51, ROGUE_FW_GROUP_HWR, 1), + "Holding Scheduling on OSid %u due to pending freelist reconstruction" }, + { ROGUE_FW_LOG_CREATESFID(52, ROGUE_FW_GROUP_HWR, 2), + "Requesting reconstruction for freelist 0x%x (ID=%d)" }, + { ROGUE_FW_LOG_CREATESFID(53, ROGUE_FW_GROUP_HWR, 1), + "Reconstruction of freelist ID=%d complete" }, + { ROGUE_FW_LOG_CREATESFID(54, ROGUE_FW_GROUP_HWR, 4), + "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global,2:mmu) on HW context %u" }, + { ROGUE_FW_LOG_CREATESFID(55, ROGUE_FW_GROUP_HWR, 1), + "Reconstruction of freelist ID=%d failed" }, + { ROGUE_FW_LOG_CREATESFID(56, ROGUE_FW_GROUP_HWR, 4), + "Restricting PDS Tasks to help other stalling DMs (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(57, ROGUE_FW_GROUP_HWR, 4), + "Unrestricting PDS Tasks again (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(58, ROGUE_FW_GROUP_HWR, 2), + "USC slots: %u used by DM%u" }, + { ROGUE_FW_LOG_CREATESFID(59, ROGUE_FW_GROUP_HWR, 1), + "USC slots: %u empty" }, + { ROGUE_FW_LOG_CREATESFID(60, ROGUE_FW_GROUP_HWR, 5), + "HCS DM%d's Context Switch failed to meet deadline. Current time: 0x%08x%08x, deadline: 0x%08x%08x" }, + { ROGUE_FW_LOG_CREATESFID(61, ROGUE_FW_GROUP_HWR, 1), + "Begin hardware reset (HWR Counter=%d)" }, + { ROGUE_FW_LOG_CREATESFID(62, ROGUE_FW_GROUP_HWR, 1), + "Finished hardware reset (HWR Counter=%d)" }, + { ROGUE_FW_LOG_CREATESFID(63, ROGUE_FW_GROUP_HWR, 2), + "Holding Scheduling on DM %u for OSid %u due to pending freelist reconstruction" }, + { ROGUE_FW_LOG_CREATESFID(64, ROGUE_FW_GROUP_HWR, 5), + "User Mode Queue ROff reset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes StreamStartOffset = %u)" }, + { ROGUE_FW_LOG_CREATESFID(65, ROGUE_FW_GROUP_HWR, 4), + "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global) on HW context %u" }, + { ROGUE_FW_LOG_CREATESFID(66, ROGUE_FW_GROUP_HWR, 3), + "Mips page fault detected (BadVAddr: 0x%08x, EntryLo0: 0x%08x, EntryLo1: 0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(67, ROGUE_FW_GROUP_HWR, 1), + "At least one other DM is running okay so DM%u will get another chance" }, + { ROGUE_FW_LOG_CREATESFID(68, ROGUE_FW_GROUP_HWR, 2), + "Reconstructing in FW, FL: 0x%x (ID=%d)" }, + { ROGUE_FW_LOG_CREATESFID(69, ROGUE_FW_GROUP_HWR, 4), + "Zero RTC for FWCtx: 0x%08.8x (RTC addr: 0x%08x%08x, size: %d bytes)" }, + { ROGUE_FW_LOG_CREATESFID(70, ROGUE_FW_GROUP_HWR, 5), + "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global) phase: %d (0:TA, 1:3D) on HW context %u" }, + { ROGUE_FW_LOG_CREATESFID(71, ROGUE_FW_GROUP_HWR, 3), + "Start long HW poll %u (0-Unset 1-Set) for (reg:0x%08x val:0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(72, ROGUE_FW_GROUP_HWR, 1), + "End long HW poll (result=%d)" }, + { ROGUE_FW_LOG_CREATESFID(73, ROGUE_FW_GROUP_HWR, 3), + "DM%u has taken %d ticks and deadline is %d ticks" }, + { ROGUE_FW_LOG_CREATESFID(74, ROGUE_FW_GROUP_HWR, 5), + "USC Watchdog result for DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u" }, + { ROGUE_FW_LOG_CREATESFID(75, ROGUE_FW_GROUP_HWR, 6), + "Reconstruction needed for freelist 0x%x (ID=%d) OSid: %d type: %d (0:local,1:global) phase: %d (0:TA, 1:3D) on HW context %u" }, + { ROGUE_FW_LOG_CREATESFID(76, ROGUE_FW_GROUP_HWR, 1), + "GPU-%u has locked up" }, + { ROGUE_FW_LOG_CREATESFID(77, ROGUE_FW_GROUP_HWR, 1), + "DM%u has locked up" }, + { ROGUE_FW_LOG_CREATESFID(78, ROGUE_FW_GROUP_HWR, 2), + "Core %d RGX_CR_EVENT_STATUS=0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(79, ROGUE_FW_GROUP_HWR, 2), + "RGX_CR_MULTICORE_EVENT_STATUS%u=0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(80, ROGUE_FW_GROUP_HWR, 5), + "BIF0 page fault detected (Core %d MMU Status: 0x%08x%08x Req Status: 0x%08x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(81, ROGUE_FW_GROUP_HWR, 3), + "MMU page fault detected (Core %d MMU Status: 0x%08x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(82, ROGUE_FW_GROUP_HWR, 4), + "MMU page fault detected (Core %d MMU Status: 0x%08x%08x 0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(83, ROGUE_FW_GROUP_HWR, 4), + "Reset HW (core:%d of %d, loop:%d, poll failures: 0x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(84, ROGUE_FW_GROUP_HWR, 3), + "Fast CRC Check result for Core%u, DM%u is HWRNeeded=%u" }, + { ROGUE_FW_LOG_CREATESFID(85, ROGUE_FW_GROUP_HWR, 3), + "Full Signature Check result for Core%u, DM%u is HWRNeeded=%u" }, + { ROGUE_FW_LOG_CREATESFID(86, ROGUE_FW_GROUP_HWR, 4), + "USC Slots result for Core%u, DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d" }, + { ROGUE_FW_LOG_CREATESFID(87, ROGUE_FW_GROUP_HWR, 6), + "USC Watchdog result for Core%u DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u" }, + { ROGUE_FW_LOG_CREATESFID(88, ROGUE_FW_GROUP_HWR, 3), + "RISC-V MMU page fault detected (FWCORE MMU Status 0x%08x Req Status 0x%08x%08x)" }, + { ROGUE_FW_LOG_CREATESFID(89, ROGUE_FW_GROUP_HWR, 2), + "TEXAS1_PFS poll failed on core %d with value 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(90, ROGUE_FW_GROUP_HWR, 2), + "BIF_PFS poll failed on core %d with value 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(91, ROGUE_FW_GROUP_HWR, 2), + "MMU_ABORT_PM_STATUS set poll failed on core %d with value 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(92, ROGUE_FW_GROUP_HWR, 2), + "MMU_ABORT_PM_STATUS unset poll failed on core %d with value 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(93, ROGUE_FW_GROUP_HWR, 2), + "MMU_CTRL_INVAL poll (all but fw) failed on core %d with value 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(94, ROGUE_FW_GROUP_HWR, 2), + "MMU_CTRL_INVAL poll (all) failed on core %d with value 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(95, ROGUE_FW_GROUP_HWR, 3), + "TEXAS%d_PFS poll failed on core %d with value 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(96, ROGUE_FW_GROUP_HWR, 3), + "Extra Registers Check result for Core%u, DM%u is HWRNeeded=%u" }, + { ROGUE_FW_LOG_CREATESFID(97, ROGUE_FW_GROUP_HWR, 1), + "FW attempted to write to read-only GPU address 0x%08x" }, + + { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_HWP, 2), + "Block 0x%x mapped to Config Idx %u" }, + { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_HWP, 1), + "Block 0x%x omitted from event - not enabled in HW" }, + { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_HWP, 1), + "Block 0x%x included in event - enabled in HW" }, + { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_HWP, 2), + "Select register state hi_0x%x lo_0x%x" }, + { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_HWP, 1), + "Counter stream block header word 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_HWP, 1), + "Counter register offset 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_HWP, 1), + "Block 0x%x config unset, skipping" }, + { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_HWP, 1), + "Accessing Indirect block 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_HWP, 1), + "Accessing Direct block 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_HWP, 1), + "Programmed counter select register at offset 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_HWP, 2), + "Block register offset 0x%x and value 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_HWP, 1), + "Reading config block from driver 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_HWP, 2), + "Reading block range 0x%x to 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_HWP, 1), + "Recording block 0x%x config from driver" }, + { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_HWP, 0), + "Finished reading config block from driver" }, + { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_HWP, 2), + "Custom Counter offset: 0x%x value: 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_HWP, 2), + "Select counter n:%u ID:0x%x" }, + { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_HWP, 3), + "The counter ID 0x%x is not allowed. The package [b:%u, n:%u] will be discarded" }, + { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_HWP, 1), + "Custom Counters filter status %d" }, + { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_HWP, 2), + "The Custom block %d is not allowed. Use only blocks lower than %d. The package will be discarded" }, + { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_HWP, 2), + "The package will be discarded because it contains %d counters IDs while the upper limit is %d" }, + { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_HWP, 2), + "Check Filter 0x%x is 0x%x ?" }, + { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_HWP, 1), + "The custom block %u is reset" }, + { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_HWP, 1), + "Encountered an invalid command (%d)" }, + { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_HWP, 2), + "HWPerf Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)" }, + { ROGUE_FW_LOG_CREATESFID(26, ROGUE_FW_GROUP_HWP, 3), + "HWPerf Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)" }, + { ROGUE_FW_LOG_CREATESFID(27, ROGUE_FW_GROUP_HWP, 1), + "Custom Counter block: %d" }, + { ROGUE_FW_LOG_CREATESFID(28, ROGUE_FW_GROUP_HWP, 1), + "Block 0x%x ENABLED" }, + { ROGUE_FW_LOG_CREATESFID(29, ROGUE_FW_GROUP_HWP, 1), + "Block 0x%x DISABLED" }, + { ROGUE_FW_LOG_CREATESFID(30, ROGUE_FW_GROUP_HWP, 2), + "Accessing Indirect block 0x%x, instance %u" }, + { ROGUE_FW_LOG_CREATESFID(31, ROGUE_FW_GROUP_HWP, 2), + "Counter register 0x%x, Value 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(32, ROGUE_FW_GROUP_HWP, 1), + "Counters filter status %d" }, + { ROGUE_FW_LOG_CREATESFID(33, ROGUE_FW_GROUP_HWP, 2), + "Block 0x%x mapped to Ctl Idx %u" }, + { ROGUE_FW_LOG_CREATESFID(34, ROGUE_FW_GROUP_HWP, 0), + "Block(s) in use for workload estimation." }, + { ROGUE_FW_LOG_CREATESFID(35, ROGUE_FW_GROUP_HWP, 3), + "GPU %u Cycle counter 0x%x, Value 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(36, ROGUE_FW_GROUP_HWP, 3), + "GPU Mask 0x%x Cycle counter 0x%x, Value 0x%x" }, + { ROGUE_FW_LOG_CREATESFID(37, ROGUE_FW_GROUP_HWP, 1), + "Blocks IGNORED for GPU %u" }, + + { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_DMA, 5), + "Transfer 0x%02x request: 0x%02x%08x -> 0x%08x, size %u" }, + { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_DMA, 4), + "Transfer of type 0x%02x expected on channel %u, 0x%02x found, status %u" }, + { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_DMA, 1), + "DMA Interrupt register 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_DMA, 1), + "Waiting for transfer of type 0x%02x completion..." }, + { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_DMA, 3), + "Loading of cCCB data from FW common context 0x%08x (offset: %u, size: %u) failed" }, + { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_DMA, 3), + "Invalid load of cCCB data from FW common context 0x%08x (offset: %u, size: %u)" }, + { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_DMA, 1), + "Transfer 0x%02x request poll failure" }, + { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_DMA, 2), + "Boot transfer(s) failed (code? %u, data? %u), used slower memcpy instead" }, + { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_DMA, 7), + "Transfer 0x%02x request on ch. %u: system 0x%02x%08x, coremem 0x%08x, flags 0x%x, size %u" }, + + { ROGUE_FW_LOG_CREATESFID(1, ROGUE_FW_GROUP_DBG, 2), + "0x%08x 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(2, ROGUE_FW_GROUP_DBG, 1), + "0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(3, ROGUE_FW_GROUP_DBG, 2), + "0x%08x 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(4, ROGUE_FW_GROUP_DBG, 3), + "0x%08x 0x%08x 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(5, ROGUE_FW_GROUP_DBG, 4), + "0x%08x 0x%08x 0x%08x 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(6, ROGUE_FW_GROUP_DBG, 5), + "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(7, ROGUE_FW_GROUP_DBG, 6), + "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(8, ROGUE_FW_GROUP_DBG, 7), + "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(9, ROGUE_FW_GROUP_DBG, 8), + "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x" }, + { ROGUE_FW_LOG_CREATESFID(10, ROGUE_FW_GROUP_DBG, 1), + "%d" }, + { ROGUE_FW_LOG_CREATESFID(11, ROGUE_FW_GROUP_DBG, 2), + "%d %d" }, + { ROGUE_FW_LOG_CREATESFID(12, ROGUE_FW_GROUP_DBG, 3), + "%d %d %d" }, + { ROGUE_FW_LOG_CREATESFID(13, ROGUE_FW_GROUP_DBG, 4), + "%d %d %d %d" }, + { ROGUE_FW_LOG_CREATESFID(14, ROGUE_FW_GROUP_DBG, 5), + "%d %d %d %d %d" }, + { ROGUE_FW_LOG_CREATESFID(15, ROGUE_FW_GROUP_DBG, 6), + "%d %d %d %d %d %d" }, + { ROGUE_FW_LOG_CREATESFID(16, ROGUE_FW_GROUP_DBG, 7), + "%d %d %d %d %d %d %d" }, + { ROGUE_FW_LOG_CREATESFID(17, ROGUE_FW_GROUP_DBG, 8), + "%d %d %d %d %d %d %d %d" }, + { ROGUE_FW_LOG_CREATESFID(18, ROGUE_FW_GROUP_DBG, 1), + "%u" }, + { ROGUE_FW_LOG_CREATESFID(19, ROGUE_FW_GROUP_DBG, 2), + "%u %u" }, + { ROGUE_FW_LOG_CREATESFID(20, ROGUE_FW_GROUP_DBG, 3), + "%u %u %u" }, + { ROGUE_FW_LOG_CREATESFID(21, ROGUE_FW_GROUP_DBG, 4), + "%u %u %u %u" }, + { ROGUE_FW_LOG_CREATESFID(22, ROGUE_FW_GROUP_DBG, 5), + "%u %u %u %u %u" }, + { ROGUE_FW_LOG_CREATESFID(23, ROGUE_FW_GROUP_DBG, 6), + "%u %u %u %u %u %u" }, + { ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_DBG, 7), + "%u %u %u %u %u %u %u" }, + { ROGUE_FW_LOG_CREATESFID(25, ROGUE_FW_GROUP_DBG, 8), + "%u %u %u %u %u %u %u %u" }, + + { ROGUE_FW_LOG_CREATESFID(65535, ROGUE_FW_GROUP_NULL, 15), + "You should not use this string" }, +}; + +#define ROGUE_FW_SF_FIRST ROGUE_FW_LOG_CREATESFID(0, ROGUE_FW_GROUP_NULL, 0) +#define ROGUE_FW_SF_MAIN_ASSERT_FAILED ROGUE_FW_LOG_CREATESFID(24, ROGUE_FW_GROUP_MAIN, 1) +#define ROGUE_FW_SF_LAST ROGUE_FW_LOG_CREATESFID(65535, ROGUE_FW_GROUP_NULL, 15) + +#endif /* PVR_ROGUE_FWIF_SF_H */ diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared.h new file mode 100644 index 0000000000..6c09c15bf9 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared.h @@ -0,0 +1,258 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_ROGUE_FWIF_SHARED_H +#define PVR_ROGUE_FWIF_SHARED_H + +#include +#include + +#define ROGUE_FWIF_NUM_RTDATAS 2U +#define ROGUE_FWIF_NUM_GEOMDATAS 1U +#define ROGUE_FWIF_NUM_RTDATA_FREELISTS 2U +#define ROGUE_NUM_GEOM_CORES 1U + +#define ROGUE_NUM_GEOM_CORES_SIZE 2U + +/* + * Maximum number of UFOs in a CCB command. + * The number is based on having 32 sync prims (as originally), plus 32 sync + * checkpoints. + * Once the use of sync prims is no longer supported, we will retain + * the same total (64) as the number of sync checkpoints which may be + * supporting a fence is not visible to the client driver and has to + * allow for the number of different timelines involved in fence merges. + */ +#define ROGUE_FWIF_CCB_CMD_MAX_UFOS (32U + 32U) + +/* + * This is a generic limit imposed on any DM (GEOMETRY,FRAGMENT,CDM,TDM,2D,TRANSFER) + * command passed through the bridge. + * Just across the bridge in the server, any incoming kick command size is + * checked against this maximum limit. + * In case the incoming command size is larger than the specified limit, + * the bridge call is retired with error. + */ +#define ROGUE_FWIF_DM_INDEPENDENT_KICK_CMD_SIZE (1024U) + +#define ROGUE_FWIF_PRBUFFER_START (0) +#define ROGUE_FWIF_PRBUFFER_ZSBUFFER (0) +#define ROGUE_FWIF_PRBUFFER_MSAABUFFER (1) +#define ROGUE_FWIF_PRBUFFER_MAXSUPPORTED (2) + +struct rogue_fwif_dma_addr { + aligned_u64 dev_addr; + u32 fw_addr; + u32 padding; +} __aligned(8); + +struct rogue_fwif_ufo { + u32 addr; + u32 value; +}; + +#define ROGUE_FWIF_UFO_ADDR_IS_SYNC_CHECKPOINT (1) + +struct rogue_fwif_sync_checkpoint { + u32 state; + u32 fw_ref_count; +}; + +struct rogue_fwif_cleanup_ctl { + /* Number of commands received by the FW */ + u32 submitted_commands; + /* Number of commands executed by the FW */ + u32 executed_commands; +} __aligned(8); + +/* + * Used to share frame numbers across UM-KM-FW, + * frame number is set in UM, + * frame number is required in both KM for HTB and FW for FW trace. + * + * May be used to house Kick flags in the future. + */ +struct rogue_fwif_cmd_common { + /* associated frame number */ + u32 frame_num; +}; + +/* + * Geometry and fragment commands require set of firmware addresses that are stored in the Kernel. + * Client has handle(s) to Kernel containers storing these addresses, instead of raw addresses. We + * have to patch/write these addresses in KM to prevent UM from controlling FW addresses directly. + * Typedefs for geometry and fragment commands are shared between Client and Firmware (both + * single-BVNC). Kernel is implemented in a multi-BVNC manner, so it can't use geometry|fragment + * CMD type definitions directly. Therefore we have a SHARED block that is shared between UM-KM-FW + * across all BVNC configurations. + */ +struct rogue_fwif_cmd_geom_frag_shared { + /* Common command attributes */ + struct rogue_fwif_cmd_common cmn; + + /* + * RTData associated with this command, this is used for context + * selection and for storing out HW-context, when TA is switched out for + * continuing later + */ + u32 hwrt_data_fw_addr; + + /* Supported PR Buffers like Z/S/MSAA Scratch */ + u32 pr_buffer_fw_addr[ROGUE_FWIF_PRBUFFER_MAXSUPPORTED]; +}; + +/* + * Client Circular Command Buffer (CCCB) control structure. + * This is shared between the Server and the Firmware and holds byte offsets + * into the CCCB as well as the wrapping mask to aid wrap around. A given + * snapshot of this queue with Cmd 1 running on the GPU might be: + * + * Roff Doff Woff + * [..........|-1----------|=2===|=3===|=4===|~5~~~~|~6~~~~|~7~~~~|..........] + * < runnable commands >< !ready to run > + * + * Cmd 1 : Currently executing on the GPU data master. + * Cmd 2,3,4: Fence dependencies met, commands runnable. + * Cmd 5... : Fence dependency not met yet. + */ +struct rogue_fwif_cccb_ctl { + /* Host write offset into CCB. This must be aligned to 16 bytes. */ + u32 write_offset; + /* + * Firmware read offset into CCB. Points to the command that is runnable + * on GPU, if R!=W + */ + u32 read_offset; + /* + * Firmware fence dependency offset. Points to commands not ready, i.e. + * fence dependencies are not met. + */ + u32 dep_offset; + /* Offset wrapping mask, total capacity in bytes of the CCB-1 */ + u32 wrap_mask; + + /* Only used if SUPPORT_AGP is present. */ + u32 read_offset2; + + /* Only used if SUPPORT_AGP4 is present. */ + u32 read_offset3; + /* Only used if SUPPORT_AGP4 is present. */ + u32 read_offset4; + + u32 padding; +} __aligned(8); + +#define ROGUE_FW_LOCAL_FREELIST (0) +#define ROGUE_FW_GLOBAL_FREELIST (1) +#define ROGUE_FW_FREELIST_TYPE_LAST ROGUE_FW_GLOBAL_FREELIST +#define ROGUE_FW_MAX_FREELISTS (ROGUE_FW_FREELIST_TYPE_LAST + 1U) + +struct rogue_fwif_geom_registers_caswitch { + u64 geom_reg_vdm_context_state_base_addr; + u64 geom_reg_vdm_context_state_resume_addr; + u64 geom_reg_ta_context_state_base_addr; + + struct { + u64 geom_reg_vdm_context_store_task0; + u64 geom_reg_vdm_context_store_task1; + u64 geom_reg_vdm_context_store_task2; + + /* VDM resume state update controls */ + u64 geom_reg_vdm_context_resume_task0; + u64 geom_reg_vdm_context_resume_task1; + u64 geom_reg_vdm_context_resume_task2; + + u64 geom_reg_vdm_context_store_task3; + u64 geom_reg_vdm_context_store_task4; + + u64 geom_reg_vdm_context_resume_task3; + u64 geom_reg_vdm_context_resume_task4; + } geom_state[2]; +}; + +#define ROGUE_FWIF_GEOM_REGISTERS_CSWITCH_SIZE \ + sizeof(struct rogue_fwif_geom_registers_caswitch) + +struct rogue_fwif_cdm_registers_cswitch { + u64 cdmreg_cdm_context_pds0; + u64 cdmreg_cdm_context_pds1; + u64 cdmreg_cdm_terminate_pds; + u64 cdmreg_cdm_terminate_pds1; + + /* CDM resume controls */ + u64 cdmreg_cdm_resume_pds0; + u64 cdmreg_cdm_context_pds0_b; + u64 cdmreg_cdm_resume_pds0_b; +}; + +struct rogue_fwif_static_rendercontext_state { + /* Geom registers for ctx switch */ + struct rogue_fwif_geom_registers_caswitch ctxswitch_regs[ROGUE_NUM_GEOM_CORES_SIZE] + __aligned(8); +}; + +#define ROGUE_FWIF_STATIC_RENDERCONTEXT_SIZE \ + sizeof(struct rogue_fwif_static_rendercontext_state) + +struct rogue_fwif_static_computecontext_state { + /* CDM registers for ctx switch */ + struct rogue_fwif_cdm_registers_cswitch ctxswitch_regs __aligned(8); +}; + +#define ROGUE_FWIF_STATIC_COMPUTECONTEXT_SIZE \ + sizeof(struct rogue_fwif_static_computecontext_state) + +enum rogue_fwif_prbuffer_state { + ROGUE_FWIF_PRBUFFER_UNBACKED = 0, + ROGUE_FWIF_PRBUFFER_BACKED, + ROGUE_FWIF_PRBUFFER_BACKING_PENDING, + ROGUE_FWIF_PRBUFFER_UNBACKING_PENDING, +}; + +struct rogue_fwif_prbuffer { + /* Buffer ID*/ + u32 buffer_id; + /* Needs On-demand Z/S/MSAA Buffer allocation */ + bool on_demand __aligned(4); + /* Z/S/MSAA -Buffer state */ + enum rogue_fwif_prbuffer_state state; + /* Cleanup state */ + struct rogue_fwif_cleanup_ctl cleanup_sate; + /* Compatibility and other flags */ + u32 prbuffer_flags; +} __aligned(8); + +/* Last reset reason for a context. */ +enum rogue_context_reset_reason { + /* No reset reason recorded */ + ROGUE_CONTEXT_RESET_REASON_NONE = 0, + /* Caused a reset due to locking up */ + ROGUE_CONTEXT_RESET_REASON_GUILTY_LOCKUP = 1, + /* Affected by another context locking up */ + ROGUE_CONTEXT_RESET_REASON_INNOCENT_LOCKUP = 2, + /* Overran the global deadline */ + ROGUE_CONTEXT_RESET_REASON_GUILTY_OVERRUNING = 3, + /* Affected by another context overrunning */ + ROGUE_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING = 4, + /* Forced reset to ensure scheduling requirements */ + ROGUE_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH = 5, + /* FW Safety watchdog triggered */ + ROGUE_CONTEXT_RESET_REASON_FW_WATCHDOG = 12, + /* FW page fault (no HWR) */ + ROGUE_CONTEXT_RESET_REASON_FW_PAGEFAULT = 13, + /* FW execution error (GPU reset requested) */ + ROGUE_CONTEXT_RESET_REASON_FW_EXEC_ERR = 14, + /* Host watchdog detected FW error */ + ROGUE_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR = 15, + /* Geometry DM OOM event is not allowed */ + ROGUE_CONTEXT_GEOM_OOM_DISABLED = 16, +}; + +struct rogue_context_reset_reason_data { + enum rogue_context_reset_reason reset_reason; + u32 reset_ext_job_ref; +}; + +#include "pvr_rogue_fwif_shared_check.h" + +#endif /* PVR_ROGUE_FWIF_SHARED_H */ diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared_check.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared_check.h new file mode 100644 index 0000000000..597ed54bbd --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared_check.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_ROGUE_FWIF_SHARED_CHECK_H +#define PVR_ROGUE_FWIF_SHARED_CHECK_H + +#include + +#define OFFSET_CHECK(type, member, offset) \ + static_assert(offsetof(type, member) == (offset), \ + "offsetof(" #type ", " #member ") incorrect") + +#define SIZE_CHECK(type, size) \ + static_assert(sizeof(type) == (size), #type " is incorrect size") + +OFFSET_CHECK(struct rogue_fwif_dma_addr, dev_addr, 0); +OFFSET_CHECK(struct rogue_fwif_dma_addr, fw_addr, 8); +SIZE_CHECK(struct rogue_fwif_dma_addr, 16); + +OFFSET_CHECK(struct rogue_fwif_ufo, addr, 0); +OFFSET_CHECK(struct rogue_fwif_ufo, value, 4); +SIZE_CHECK(struct rogue_fwif_ufo, 8); + +OFFSET_CHECK(struct rogue_fwif_cleanup_ctl, submitted_commands, 0); +OFFSET_CHECK(struct rogue_fwif_cleanup_ctl, executed_commands, 4); +SIZE_CHECK(struct rogue_fwif_cleanup_ctl, 8); + +OFFSET_CHECK(struct rogue_fwif_cccb_ctl, write_offset, 0); +OFFSET_CHECK(struct rogue_fwif_cccb_ctl, read_offset, 4); +OFFSET_CHECK(struct rogue_fwif_cccb_ctl, dep_offset, 8); +OFFSET_CHECK(struct rogue_fwif_cccb_ctl, wrap_mask, 12); +OFFSET_CHECK(struct rogue_fwif_cccb_ctl, read_offset2, 16); +OFFSET_CHECK(struct rogue_fwif_cccb_ctl, read_offset3, 20); +OFFSET_CHECK(struct rogue_fwif_cccb_ctl, read_offset4, 24); +SIZE_CHECK(struct rogue_fwif_cccb_ctl, 32); + +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_reg_vdm_context_state_base_addr, 0); +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_reg_vdm_context_state_resume_addr, 8); +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_reg_ta_context_state_base_addr, 16); +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_state[0].geom_reg_vdm_context_store_task0, 24); +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_state[0].geom_reg_vdm_context_store_task1, 32); +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_state[0].geom_reg_vdm_context_store_task2, 40); +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_state[0].geom_reg_vdm_context_resume_task0, 48); +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_state[0].geom_reg_vdm_context_resume_task1, 56); +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_state[0].geom_reg_vdm_context_resume_task2, 64); +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_state[0].geom_reg_vdm_context_store_task3, 72); +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_state[0].geom_reg_vdm_context_store_task4, 80); +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_state[0].geom_reg_vdm_context_resume_task3, 88); +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_state[0].geom_reg_vdm_context_resume_task4, 96); +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_state[1].geom_reg_vdm_context_store_task0, 104); +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_state[1].geom_reg_vdm_context_store_task1, 112); +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_state[1].geom_reg_vdm_context_store_task2, 120); +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_state[1].geom_reg_vdm_context_resume_task0, 128); +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_state[1].geom_reg_vdm_context_resume_task1, 136); +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_state[1].geom_reg_vdm_context_resume_task2, 144); +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_state[1].geom_reg_vdm_context_store_task3, 152); +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_state[1].geom_reg_vdm_context_store_task4, 160); +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_state[1].geom_reg_vdm_context_resume_task3, 168); +OFFSET_CHECK(struct rogue_fwif_geom_registers_caswitch, + geom_state[1].geom_reg_vdm_context_resume_task4, 176); +SIZE_CHECK(struct rogue_fwif_geom_registers_caswitch, 184); + +OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds0, 0); +OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds1, 8); +OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_terminate_pds, 16); +OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_terminate_pds1, 24); +OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_resume_pds0, 32); +OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds0_b, 40); +OFFSET_CHECK(struct rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_resume_pds0_b, 48); +SIZE_CHECK(struct rogue_fwif_cdm_registers_cswitch, 56); + +OFFSET_CHECK(struct rogue_fwif_static_rendercontext_state, ctxswitch_regs, 0); +SIZE_CHECK(struct rogue_fwif_static_rendercontext_state, 368); + +OFFSET_CHECK(struct rogue_fwif_static_computecontext_state, ctxswitch_regs, 0); +SIZE_CHECK(struct rogue_fwif_static_computecontext_state, 56); + +OFFSET_CHECK(struct rogue_fwif_cmd_common, frame_num, 0); +SIZE_CHECK(struct rogue_fwif_cmd_common, 4); + +OFFSET_CHECK(struct rogue_fwif_cmd_geom_frag_shared, cmn, 0); +OFFSET_CHECK(struct rogue_fwif_cmd_geom_frag_shared, hwrt_data_fw_addr, 4); +OFFSET_CHECK(struct rogue_fwif_cmd_geom_frag_shared, pr_buffer_fw_addr, 8); +SIZE_CHECK(struct rogue_fwif_cmd_geom_frag_shared, 16); + +#endif /* PVR_ROGUE_FWIF_SHARED_CHECK_H */ diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_stream.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_stream.h new file mode 100644 index 0000000000..1c2c4ebedc --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_stream.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_ROGUE_FWIF_STREAM_H +#define PVR_ROGUE_FWIF_STREAM_H + +/** + * DOC: Streams + * + * Commands are submitted to the kernel driver in the form of streams. + * + * A command stream has the following layout : + * - A 64-bit header containing: + * * A u32 containing the length of the main stream inclusive of the length of the header. + * * A u32 for padding. + * - The main stream data. + * - The extension stream (optional), which is composed of: + * * One or more headers. + * * The extension stream data, corresponding to the extension headers. + * + * The main stream provides the base command data. This has a fixed layout based on the features + * supported by a given GPU. + * + * The extension stream provides the command parameters that are required for BRNs & ERNs for the + * current GPU. This stream is comprised of one or more headers, followed by data for each given + * BRN/ERN. + * + * Each header is a u32 containing a bitmask of quirks & enhancements in the extension stream, a + * "type" field determining the set of quirks & enhancements the bitmask represents, and a + * continuation bit determining whether any more headers are present. The headers are then followed + * by command data; this is specific to each quirk/enhancement. All unused / reserved bits in the + * header must be set to 0. + * + * All parameters and headers in the main and extension streams must be naturally aligned. + * + * If a parameter appears in both the main and extension streams, then the extension parameter is + * used. + */ + +/* + * Stream extension header definition + */ +#define PVR_STREAM_EXTHDR_TYPE_SHIFT 29U +#define PVR_STREAM_EXTHDR_TYPE_MASK (7U << PVR_STREAM_EXTHDR_TYPE_SHIFT) +#define PVR_STREAM_EXTHDR_TYPE_MAX 8U +#define PVR_STREAM_EXTHDR_CONTINUATION BIT(28U) + +#define PVR_STREAM_EXTHDR_DATA_MASK ~(PVR_STREAM_EXTHDR_TYPE_MASK | PVR_STREAM_EXTHDR_CONTINUATION) + +/* + * Stream extension header - Geometry 0 + */ +#define PVR_STREAM_EXTHDR_TYPE_GEOM0 0U + +#define PVR_STREAM_EXTHDR_GEOM0_BRN49927 BIT(0U) + +#define PVR_STREAM_EXTHDR_GEOM0_VALID PVR_STREAM_EXTHDR_GEOM0_BRN49927 + +/* + * Stream extension header - Fragment 0 + */ +#define PVR_STREAM_EXTHDR_TYPE_FRAG0 0U + +#define PVR_STREAM_EXTHDR_FRAG0_BRN47217 BIT(0U) +#define PVR_STREAM_EXTHDR_FRAG0_BRN49927 BIT(1U) + +#define PVR_STREAM_EXTHDR_FRAG0_VALID PVR_STREAM_EXTHDR_FRAG0_BRN49927 + +/* + * Stream extension header - Compute 0 + */ +#define PVR_STREAM_EXTHDR_TYPE_COMPUTE0 0U + +#define PVR_STREAM_EXTHDR_COMPUTE0_BRN49927 BIT(0U) + +#define PVR_STREAM_EXTHDR_COMPUTE0_VALID PVR_STREAM_EXTHDR_COMPUTE0_BRN49927 + +#endif /* PVR_ROGUE_FWIF_STREAM_H */ diff --git a/drivers/gpu/drm/imagination/pvr_rogue_heap_config.h b/drivers/gpu/drm/imagination/pvr_rogue_heap_config.h new file mode 100644 index 0000000000..6847660067 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_rogue_heap_config.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_ROGUE_HEAP_CONFIG_H +#define PVR_ROGUE_HEAP_CONFIG_H + +#include + +/* + * ROGUE Device Virtual Address Space Definitions + * + * This file defines the ROGUE virtual address heaps that are used in + * application memory contexts. It also shows where the Firmware memory heap + * fits into this, but the firmware heap is only ever created in the + * kernel driver and never exposed to userspace. + * + * ROGUE_PDSCODEDATA_HEAP_BASE and ROGUE_USCCODE_HEAP_BASE will be programmed, + * on a global basis, into ROGUE_CR_PDS_EXEC_BASE and ROGUE_CR_USC_CODE_BASE_* + * respectively. Therefore if client drivers use multiple configs they must + * still be consistent with their definitions for these heaps. + * + * Base addresses have to be a multiple of 4MiB. + * Heaps must not start at 0x0000000000, as this is reserved for internal + * use within the driver. + * Range comments, those starting in column 0 below are a section heading of + * sorts and are above the heaps in that range. Often this is the reserved + * size of the heap within the range. + */ + +/* 0x00_0000_0000 ************************************************************/ + +/* 0x00_0000_0000 - 0x00_0040_0000 */ +/* 0 MiB to 4 MiB, size of 4 MiB : RESERVED */ + +/* 0x00_0040_0000 - 0x7F_FFC0_0000 **/ +/* 4 MiB to 512 GiB, size of 512 GiB less 4 MiB : RESERVED **/ + +/* 0x80_0000_0000 ************************************************************/ + +/* 0x80_0000_0000 - 0x9F_FFFF_FFFF **/ +/* 512 GiB to 640 GiB, size of 128 GiB : GENERAL_HEAP **/ +#define ROGUE_GENERAL_HEAP_BASE 0x8000000000ull +#define ROGUE_GENERAL_HEAP_SIZE SZ_128G + +/* 0xA0_0000_0000 - 0xAF_FFFF_FFFF */ +/* 640 GiB to 704 GiB, size of 64 GiB : FREE */ + +/* B0_0000_0000 - 0xB7_FFFF_FFFF */ +/* 704 GiB to 736 GiB, size of 32 GiB : FREE */ + +/* 0xB8_0000_0000 - 0xBF_FFFF_FFFF */ +/* 736 GiB to 768 GiB, size of 32 GiB : RESERVED */ + +/* 0xC0_0000_0000 ************************************************************/ + +/* 0xC0_0000_0000 - 0xD9_FFFF_FFFF */ +/* 768 GiB to 872 GiB, size of 104 GiB : FREE */ + +/* 0xDA_0000_0000 - 0xDA_FFFF_FFFF */ +/* 872 GiB to 876 GiB, size of 4 GiB : PDSCODEDATA_HEAP */ +#define ROGUE_PDSCODEDATA_HEAP_BASE 0xDA00000000ull +#define ROGUE_PDSCODEDATA_HEAP_SIZE SZ_4G + +/* 0xDB_0000_0000 - 0xDB_FFFF_FFFF */ +/* 876 GiB to 880 GiB, size of 256 MiB (reserved 4GiB) : BRN **/ +/* + * The BRN63142 quirk workaround requires Region Header memory to be at the top + * of a 16GiB aligned range. This is so when masked with 0x03FFFFFFFF the + * address will avoid aliasing PB addresses. Start at 879.75GiB. Size of 256MiB. + */ +#define ROGUE_RGNHDR_HEAP_BASE 0xDBF0000000ull +#define ROGUE_RGNHDR_HEAP_SIZE SZ_256M + +/* 0xDC_0000_0000 - 0xDF_FFFF_FFFF */ +/* 880 GiB to 896 GiB, size of 16 GiB : FREE */ + +/* 0xE0_0000_0000 - 0xE0_FFFF_FFFF */ +/* 896 GiB to 900 GiB, size of 4 GiB : USCCODE_HEAP */ +#define ROGUE_USCCODE_HEAP_BASE 0xE000000000ull +#define ROGUE_USCCODE_HEAP_SIZE SZ_4G + +/* 0xE1_0000_0000 - 0xE1_BFFF_FFFF */ +/* 900 GiB to 903 GiB, size of 3 GiB : RESERVED */ + +/* 0xE1_C000_000 - 0xE1_FFFF_FFFF */ +/* 903 GiB to 904 GiB, reserved 1 GiB, : FIRMWARE_HEAP */ +#define ROGUE_FW_HEAP_BASE 0xE1C0000000ull + +/* 0xE2_0000_0000 - 0xE3_FFFF_FFFF */ +/* 904 GiB to 912 GiB, size of 8 GiB : FREE */ + +/* 0xE4_0000_0000 - 0xE7_FFFF_FFFF */ +/* 912 GiB to 968 GiB, size of 16 GiB : TRANSFER_FRAG */ +#define ROGUE_TRANSFER_FRAG_HEAP_BASE 0xE400000000ull +#define ROGUE_TRANSFER_FRAG_HEAP_SIZE SZ_16G + +/* 0xE8_0000_0000 - 0xF1_FFFF_FFFF */ +/* 928 GiB to 968 GiB, size of 40 GiB : RESERVED */ + +/* 0xF2_0000_0000 - 0xF2_001F_FFFF **/ +/* 968 GiB to 969 GiB, size of 2 MiB : VISTEST_HEAP */ +#define ROGUE_VISTEST_HEAP_BASE 0xF200000000ull +#define ROGUE_VISTEST_HEAP_SIZE SZ_2M + +/* 0xF2_4000_0000 - 0xF2_FFFF_FFFF */ +/* 969 GiB to 972 GiB, size of 3 GiB : FREE */ + +/* 0xF3_0000_0000 - 0xFF_FFFF_FFFF */ +/* 972 GiB to 1024 GiB, size of 52 GiB : FREE */ + +/* 0xFF_FFFF_FFFF ************************************************************/ + +#endif /* PVR_ROGUE_HEAP_CONFIG_H */ diff --git a/drivers/gpu/drm/imagination/pvr_rogue_meta.h b/drivers/gpu/drm/imagination/pvr_rogue_meta.h new file mode 100644 index 0000000000..3020e6582d --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_rogue_meta.h @@ -0,0 +1,356 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_ROGUE_META_H +#define PVR_ROGUE_META_H + +/***** The META HW register definitions in the file are updated manually *****/ + +#include +#include + +/* + ****************************************************************************** + * META registers and MACROS + ***************************************************************************** + */ +#define META_CR_CTRLREG_BASE(t) (0x04800000U + (0x1000U * (t))) + +#define META_CR_TXPRIVEXT (0x048000E8) +#define META_CR_TXPRIVEXT_MINIM_EN BIT(7) + +#define META_CR_SYSC_JTAG_THREAD (0x04830030) +#define META_CR_SYSC_JTAG_THREAD_PRIV_EN (0x00000004) + +#define META_CR_PERF_COUNT0 (0x0480FFE0) +#define META_CR_PERF_COUNT1 (0x0480FFE8) +#define META_CR_PERF_COUNT_CTRL_SHIFT (28) +#define META_CR_PERF_COUNT_CTRL_MASK (0xF0000000) +#define META_CR_PERF_COUNT_CTRL_DCACHEHITS (8 << META_CR_PERF_COUNT_CTRL_SHIFT) +#define META_CR_PERF_COUNT_CTRL_ICACHEHITS (9 << META_CR_PERF_COUNT_CTRL_SHIFT) +#define META_CR_PERF_COUNT_CTRL_ICACHEMISS \ + (0xA << META_CR_PERF_COUNT_CTRL_SHIFT) +#define META_CR_PERF_COUNT_CTRL_ICORE (0xD << META_CR_PERF_COUNT_CTRL_SHIFT) +#define META_CR_PERF_COUNT_THR_SHIFT (24) +#define META_CR_PERF_COUNT_THR_MASK (0x0F000000) +#define META_CR_PERF_COUNT_THR_0 (0x1 << META_CR_PERF_COUNT_THR_SHIFT) +#define META_CR_PERF_COUNT_THR_1 (0x2 << META_CR_PERF_COUNT_THR_1) + +#define META_CR_TxVECINT_BHALT (0x04820500) +#define META_CR_PERF_ICORE0 (0x0480FFD0) +#define META_CR_PERF_ICORE1 (0x0480FFD8) +#define META_CR_PERF_ICORE_DCACHEMISS (0x8) + +#define META_CR_PERF_COUNT(ctrl, thr) \ + ((META_CR_PERF_COUNT_CTRL_##ctrl << META_CR_PERF_COUNT_CTRL_SHIFT) | \ + ((thr) << META_CR_PERF_COUNT_THR_SHIFT)) + +#define META_CR_TXUXXRXDT_OFFSET (META_CR_CTRLREG_BASE(0U) + 0x0000FFF0U) +#define META_CR_TXUXXRXRQ_OFFSET (META_CR_CTRLREG_BASE(0U) + 0x0000FFF8U) + +/* Poll for done. */ +#define META_CR_TXUXXRXRQ_DREADY_BIT (0x80000000U) +/* Set for read. */ +#define META_CR_TXUXXRXRQ_RDnWR_BIT (0x00010000U) +#define META_CR_TXUXXRXRQ_TX_S (12) +#define META_CR_TXUXXRXRQ_RX_S (4) +#define META_CR_TXUXXRXRQ_UXX_S (0) + +/* Internal ctrl regs. */ +#define META_CR_TXUIN_ID (0x0) +/* Data unit regs. */ +#define META_CR_TXUD0_ID (0x1) +/* Data unit regs. */ +#define META_CR_TXUD1_ID (0x2) +/* Address unit regs. */ +#define META_CR_TXUA0_ID (0x3) +/* Address unit regs. */ +#define META_CR_TXUA1_ID (0x4) +/* PC registers. */ +#define META_CR_TXUPC_ID (0x5) + +/* Macros to calculate register access values. */ +#define META_CR_CORE_REG(thr, reg_num, unit) \ + (((u32)(thr) << META_CR_TXUXXRXRQ_TX_S) | \ + ((u32)(reg_num) << META_CR_TXUXXRXRQ_RX_S) | \ + ((u32)(unit) << META_CR_TXUXXRXRQ_UXX_S)) + +#define META_CR_THR0_PC META_CR_CORE_REG(0, 0, META_CR_TXUPC_ID) +#define META_CR_THR0_PCX META_CR_CORE_REG(0, 1, META_CR_TXUPC_ID) +#define META_CR_THR0_SP META_CR_CORE_REG(0, 0, META_CR_TXUA0_ID) + +#define META_CR_THR1_PC META_CR_CORE_REG(1, 0, META_CR_TXUPC_ID) +#define META_CR_THR1_PCX META_CR_CORE_REG(1, 1, META_CR_TXUPC_ID) +#define META_CR_THR1_SP META_CR_CORE_REG(1, 0, META_CR_TXUA0_ID) + +#define SP_ACCESS(thread) META_CR_CORE_REG(thread, 0, META_CR_TXUA0_ID) +#define PC_ACCESS(thread) META_CR_CORE_REG(thread, 0, META_CR_TXUPC_ID) + +#define META_CR_COREREG_ENABLE (0x0000000U) +#define META_CR_COREREG_STATUS (0x0000010U) +#define META_CR_COREREG_DEFR (0x00000A0U) +#define META_CR_COREREG_PRIVEXT (0x00000E8U) + +#define META_CR_T0ENABLE_OFFSET \ + (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_ENABLE) +#define META_CR_T0STATUS_OFFSET \ + (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_STATUS) +#define META_CR_T0DEFR_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_DEFR) +#define META_CR_T0PRIVEXT_OFFSET \ + (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_PRIVEXT) + +#define META_CR_T1ENABLE_OFFSET \ + (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_ENABLE) +#define META_CR_T1STATUS_OFFSET \ + (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_STATUS) +#define META_CR_T1DEFR_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_DEFR) +#define META_CR_T1PRIVEXT_OFFSET \ + (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_PRIVEXT) + +#define META_CR_TXENABLE_ENABLE_BIT (0x00000001U) /* Set if running */ +#define META_CR_TXSTATUS_PRIV (0x00020000U) +#define META_CR_TXPRIVEXT_MINIM (0x00000080U) + +#define META_MEM_GLOBAL_RANGE_BIT (0x80000000U) + +#define META_CR_TXCLKCTRL (0x048000B0) +#define META_CR_TXCLKCTRL_ALL_ON (0x55111111) +#define META_CR_TXCLKCTRL_ALL_AUTO (0xAA222222) + +#define META_CR_MMCU_LOCAL_EBCTRL (0x04830600) +#define META_CR_MMCU_LOCAL_EBCTRL_ICWIN (0x3 << 14) +#define META_CR_MMCU_LOCAL_EBCTRL_DCWIN (0x3 << 6) +#define META_CR_SYSC_DCPART(n) (0x04830200 + (n) * 0x8) +#define META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE (0x1 << 31) +#define META_CR_SYSC_ICPART(n) (0x04830220 + (n) * 0x8) +#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF (0x8 << 16) +#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE (0xF) +#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE (0x7) +#define META_CR_MMCU_DCACHE_CTRL (0x04830018) +#define META_CR_MMCU_ICACHE_CTRL (0x04830020) +#define META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN (0x1) + +/* + ****************************************************************************** + * META LDR Format + ****************************************************************************** + */ +/* Block header structure. */ +struct rogue_meta_ldr_block_hdr { + u32 dev_id; + u32 sl_code; + u32 sl_data; + u16 pc_ctrl; + u16 crc; +}; + +/* High level data stream block structure. */ +struct rogue_meta_ldr_l1_data_blk { + u16 cmd; + u16 length; + u32 next; + u32 cmd_data[4]; +}; + +/* High level data stream block structure. */ +struct rogue_meta_ldr_l2_data_blk { + u16 tag; + u16 length; + u32 block_data[4]; +}; + +/* Config command structure. */ +struct rogue_meta_ldr_cfg_blk { + u32 type; + u32 block_data[4]; +}; + +/* Block type definitions */ +#define ROGUE_META_LDR_COMMENT_TYPE_MASK (0x0010U) +#define ROGUE_META_LDR_BLK_IS_COMMENT(x) (((x) & ROGUE_META_LDR_COMMENT_TYPE_MASK) != 0U) + +/* + * Command definitions + * Value Name Description + * 0 LoadMem Load memory with binary data. + * 1 LoadCore Load a set of core registers. + * 2 LoadMMReg Load a set of memory mapped registers. + * 3 StartThreads Set each thread PC and SP, then enable threads. + * 4 ZeroMem Zeros a memory region. + * 5 Config Perform a configuration command. + */ +#define ROGUE_META_LDR_CMD_MASK (0x000FU) + +#define ROGUE_META_LDR_CMD_LOADMEM (0x0000U) +#define ROGUE_META_LDR_CMD_LOADCORE (0x0001U) +#define ROGUE_META_LDR_CMD_LOADMMREG (0x0002U) +#define ROGUE_META_LDR_CMD_START_THREADS (0x0003U) +#define ROGUE_META_LDR_CMD_ZEROMEM (0x0004U) +#define ROGUE_META_LDR_CMD_CONFIG (0x0005U) + +/* + * Config Command definitions + * Value Name Description + * 0 Pause Pause for x times 100 instructions + * 1 Read Read a value from register - No value return needed. + * Utilises effects of issuing reads to certain registers + * 2 Write Write to mem location + * 3 MemSet Set mem to value + * 4 MemCheck check mem for specific value. + */ +#define ROGUE_META_LDR_CFG_PAUSE (0x0000) +#define ROGUE_META_LDR_CFG_READ (0x0001) +#define ROGUE_META_LDR_CFG_WRITE (0x0002) +#define ROGUE_META_LDR_CFG_MEMSET (0x0003) +#define ROGUE_META_LDR_CFG_MEMCHECK (0x0004) + +/* + ****************************************************************************** + * ROGUE FW segmented MMU definitions + ****************************************************************************** + */ +/* All threads can access the segment. */ +#define ROGUE_FW_SEGMMU_ALLTHRS (0xf << 8U) +/* Writable. */ +#define ROGUE_FW_SEGMMU_WRITEABLE (0x1U << 1U) +/* All threads can access and writable. */ +#define ROGUE_FW_SEGMMU_ALLTHRS_WRITEABLE \ + (ROGUE_FW_SEGMMU_ALLTHRS | ROGUE_FW_SEGMMU_WRITEABLE) + +/* Direct map region 10 used for mapping GPU memory - max 8MB. */ +#define ROGUE_FW_SEGMMU_DMAP_GPU_ID (10U) +#define ROGUE_FW_SEGMMU_DMAP_GPU_ADDR_START (0x07000000U) +#define ROGUE_FW_SEGMMU_DMAP_GPU_MAX_SIZE (0x00800000U) + +/* Segment IDs. */ +#define ROGUE_FW_SEGMMU_DATA_ID (1U) +#define ROGUE_FW_SEGMMU_BOOTLDR_ID (2U) +#define ROGUE_FW_SEGMMU_TEXT_ID (ROGUE_FW_SEGMMU_BOOTLDR_ID) + +/* + * SLC caching strategy in S7 and volcanic is emitted through the segment MMU. + * All the segments configured through the macro ROGUE_FW_SEGMMU_OUTADDR_TOP are + * CACHED in the SLC. + * The interface has been kept the same to simplify the code changes. + * The bifdm argument is ignored (no longer relevant) in S7 and volcanic. + */ +#define ROGUE_FW_SEGMMU_OUTADDR_TOP_VIVT_SLC(pers, slc_policy, mmu_ctx) \ + ((((u64)((pers) & 0x3)) << 52) | (((u64)((mmu_ctx) & 0xFF)) << 44) | \ + (((u64)((slc_policy) & 0x1)) << 40)) +#define ROGUE_FW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(mmu_ctx) \ + ROGUE_FW_SEGMMU_OUTADDR_TOP_VIVT_SLC(0x3, 0x0, mmu_ctx) +#define ROGUE_FW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED(mmu_ctx) \ + ROGUE_FW_SEGMMU_OUTADDR_TOP_VIVT_SLC(0x0, 0x1, mmu_ctx) + +/* + * To configure the Page Catalog and BIF-DM fed into the BIF for Garten + * accesses through this segment. + */ +#define ROGUE_FW_SEGMMU_OUTADDR_TOP_SLC(pc, bifdm) \ + (((u64)((u64)(pc) & 0xFU) << 44U) | ((u64)((u64)(bifdm) & 0xFU) << 40U)) + +#define ROGUE_FW_SEGMMU_META_BIFDM_ID (0x7U) + +/* META segments have 4kB minimum size. */ +#define ROGUE_FW_SEGMMU_ALIGN (0x1000U) + +/* Segmented MMU registers (n = segment id). */ +#define META_CR_MMCU_SEGMENT_N_BASE(n) (0x04850000U + ((n) * 0x10U)) +#define META_CR_MMCU_SEGMENT_N_LIMIT(n) (0x04850004U + ((n) * 0x10U)) +#define META_CR_MMCU_SEGMENT_N_OUTA0(n) (0x04850008U + ((n) * 0x10U)) +#define META_CR_MMCU_SEGMENT_N_OUTA1(n) (0x0485000CU + ((n) * 0x10U)) + +/* + * The following defines must be recalculated if the Meta MMU segments used + * to access Host-FW data are changed + * Current combinations are: + * - SLC uncached, META cached, FW base address 0x70000000 + * - SLC uncached, META uncached, FW base address 0xF0000000 + * - SLC cached, META cached, FW base address 0x10000000 + * - SLC cached, META uncached, FW base address 0x90000000 + */ +#define ROGUE_FW_SEGMMU_DATA_BASE_ADDRESS (0x10000000U) +#define ROGUE_FW_SEGMMU_DATA_META_CACHED (0x0U) +#define ROGUE_FW_SEGMMU_DATA_META_UNCACHED (META_MEM_GLOBAL_RANGE_BIT) +#define ROGUE_FW_SEGMMU_DATA_META_CACHE_MASK (META_MEM_GLOBAL_RANGE_BIT) +/* + * For non-VIVT SLCs the cacheability of the FW data in the SLC is selected in + * the PTEs for the FW data, not in the Meta Segment MMU, which means these + * defines have no real effect in those cases. + */ +#define ROGUE_FW_SEGMMU_DATA_VIVT_SLC_CACHED (0x0U) +#define ROGUE_FW_SEGMMU_DATA_VIVT_SLC_UNCACHED (0x60000000U) +#define ROGUE_FW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK (0x60000000U) + +/* + ****************************************************************************** + * ROGUE FW Bootloader defaults + ****************************************************************************** + */ +#define ROGUE_FW_BOOTLDR_META_ADDR (0x40000000U) +#define ROGUE_FW_BOOTLDR_DEVV_ADDR_0 (0xC0000000U) +#define ROGUE_FW_BOOTLDR_DEVV_ADDR_1 (0x000000E1) +#define ROGUE_FW_BOOTLDR_DEVV_ADDR \ + ((((u64)ROGUE_FW_BOOTLDR_DEVV_ADDR_1) << 32) | \ + ROGUE_FW_BOOTLDR_DEVV_ADDR_0) +#define ROGUE_FW_BOOTLDR_LIMIT (0x1FFFF000) +#define ROGUE_FW_MAX_BOOTLDR_OFFSET (0x1000) + +/* Bootloader configuration offset is in dwords (512 bytes) */ +#define ROGUE_FW_BOOTLDR_CONF_OFFSET (0x80) + +/* + ****************************************************************************** + * ROGUE META Stack + ****************************************************************************** + */ +#define ROGUE_META_STACK_SIZE (0x1000U) + +/* + ****************************************************************************** + * ROGUE META Core memory + ****************************************************************************** + */ +/* Code and data both map to the same physical memory. */ +#define ROGUE_META_COREMEM_CODE_ADDR (0x80000000U) +#define ROGUE_META_COREMEM_DATA_ADDR (0x82000000U) +#define ROGUE_META_COREMEM_OFFSET_MASK (0x01ffffffU) + +#define ROGUE_META_IS_COREMEM_CODE(a, b) \ + ({ \ + u32 _a = (a), _b = (b); \ + ((_a) >= ROGUE_META_COREMEM_CODE_ADDR) && \ + ((_a) < (ROGUE_META_COREMEM_CODE_ADDR + (_b))); \ + }) +#define ROGUE_META_IS_COREMEM_DATA(a, b) \ + ({ \ + u32 _a = (a), _b = (b); \ + ((_a) >= ROGUE_META_COREMEM_DATA_ADDR) && \ + ((_a) < (ROGUE_META_COREMEM_DATA_ADDR + (_b))); \ + }) +/* + ****************************************************************************** + * 2nd thread + ****************************************************************************** + */ +#define ROGUE_FW_THR1_PC (0x18930000) +#define ROGUE_FW_THR1_SP (0x78890000) + +/* + ****************************************************************************** + * META compatibility + ****************************************************************************** + */ + +#define META_CR_CORE_ID (0x04831000) +#define META_CR_CORE_ID_VER_SHIFT (16U) +#define META_CR_CORE_ID_VER_CLRMSK (0XFF00FFFFU) + +#define ROGUE_CR_META_MTP218_CORE_ID_VALUE 0x19 +#define ROGUE_CR_META_MTP219_CORE_ID_VALUE 0x1E +#define ROGUE_CR_META_LTP218_CORE_ID_VALUE 0x1C +#define ROGUE_CR_META_LTP217_CORE_ID_VALUE 0x1F + +#define ROGUE_FW_PROCESSOR_META "META" + +#endif /* PVR_ROGUE_META_H */ diff --git a/drivers/gpu/drm/imagination/pvr_rogue_mips.h b/drivers/gpu/drm/imagination/pvr_rogue_mips.h new file mode 100644 index 0000000000..41ed618fda --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_rogue_mips.h @@ -0,0 +1,335 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_ROGUE_MIPS_H +#define PVR_ROGUE_MIPS_H + +#include +#include + +/* Utility defines for memory management. */ +#define ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K (12) +#define ROGUE_MIPSFW_PAGE_SIZE_4K (0x1 << ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K) +#define ROGUE_MIPSFW_PAGE_MASK_4K (ROGUE_MIPSFW_PAGE_SIZE_4K - 1) +#define ROGUE_MIPSFW_LOG2_PAGE_SIZE_64K (16) +#define ROGUE_MIPSFW_PAGE_SIZE_64K (0x1 << ROGUE_MIPSFW_LOG2_PAGE_SIZE_64K) +#define ROGUE_MIPSFW_PAGE_MASK_64K (ROGUE_MIPSFW_PAGE_SIZE_64K - 1) +#define ROGUE_MIPSFW_LOG2_PAGE_SIZE_256K (18) +#define ROGUE_MIPSFW_PAGE_SIZE_256K (0x1 << ROGUE_MIPSFW_LOG2_PAGE_SIZE_256K) +#define ROGUE_MIPSFW_PAGE_MASK_256K (ROGUE_MIPSFW_PAGE_SIZE_256K - 1) +#define ROGUE_MIPSFW_LOG2_PAGE_SIZE_1MB (20) +#define ROGUE_MIPSFW_PAGE_SIZE_1MB (0x1 << ROGUE_MIPSFW_LOG2_PAGE_SIZE_1MB) +#define ROGUE_MIPSFW_PAGE_MASK_1MB (ROGUE_MIPSFW_PAGE_SIZE_1MB - 1) +#define ROGUE_MIPSFW_LOG2_PAGE_SIZE_4MB (22) +#define ROGUE_MIPSFW_PAGE_SIZE_4MB (0x1 << ROGUE_MIPSFW_LOG2_PAGE_SIZE_4MB) +#define ROGUE_MIPSFW_PAGE_MASK_4MB (ROGUE_MIPSFW_PAGE_SIZE_4MB - 1) +#define ROGUE_MIPSFW_LOG2_PTE_ENTRY_SIZE (2) +/* log2 page table sizes dependent on FW heap size and page size (for each OS). */ +#define ROGUE_MIPSFW_LOG2_PAGETABLE_SIZE_4K(pvr_dev) ((pvr_dev)->fw_dev.fw_heap_info.log2_size - \ + ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K + \ + ROGUE_MIPSFW_LOG2_PTE_ENTRY_SIZE) +#define ROGUE_MIPSFW_LOG2_PAGETABLE_SIZE_64K(pvr_dev) ((pvr_dev)->fw_dev.fw_heap_info.log2_size - \ + ROGUE_MIPSFW_LOG2_PAGE_SIZE_64K + \ + ROGUE_MIPSFW_LOG2_PTE_ENTRY_SIZE) +/* Maximum number of page table pages (both Host and MIPS pages). */ +#define ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES (4) +/* Total number of TLB entries. */ +#define ROGUE_MIPSFW_NUMBER_OF_TLB_ENTRIES (16) +/* "Uncached" caching policy. */ +#define ROGUE_MIPSFW_UNCACHED_CACHE_POLICY (2) +/* "Write-back write-allocate" caching policy. */ +#define ROGUE_MIPSFW_WRITEBACK_CACHE_POLICY (3) +/* "Write-through no write-allocate" caching policy. */ +#define ROGUE_MIPSFW_WRITETHROUGH_CACHE_POLICY (1) +/* Cached policy used by MIPS in case of physical bus on 32 bit. */ +#define ROGUE_MIPSFW_CACHED_POLICY (ROGUE_MIPSFW_WRITEBACK_CACHE_POLICY) +/* Cached policy used by MIPS in case of physical bus on more than 32 bit. */ +#define ROGUE_MIPSFW_CACHED_POLICY_ABOVE_32BIT (ROGUE_MIPSFW_WRITETHROUGH_CACHE_POLICY) +/* Total number of Remap entries. */ +#define ROGUE_MIPSFW_NUMBER_OF_REMAP_ENTRIES (2 * ROGUE_MIPSFW_NUMBER_OF_TLB_ENTRIES) + +/* MIPS EntryLo/PTE format. */ + +#define ROGUE_MIPSFW_ENTRYLO_READ_INHIBIT_SHIFT (31U) +#define ROGUE_MIPSFW_ENTRYLO_READ_INHIBIT_CLRMSK (0X7FFFFFFF) +#define ROGUE_MIPSFW_ENTRYLO_READ_INHIBIT_EN (0X80000000) + +#define ROGUE_MIPSFW_ENTRYLO_EXEC_INHIBIT_SHIFT (30U) +#define ROGUE_MIPSFW_ENTRYLO_EXEC_INHIBIT_CLRMSK (0XBFFFFFFF) +#define ROGUE_MIPSFW_ENTRYLO_EXEC_INHIBIT_EN (0X40000000) + +/* Page Frame Number */ +#define ROGUE_MIPSFW_ENTRYLO_PFN_SHIFT (6) +#define ROGUE_MIPSFW_ENTRYLO_PFN_ALIGNSHIFT (12) +/* Mask used for the MIPS Page Table in case of physical bus on 32 bit. */ +#define ROGUE_MIPSFW_ENTRYLO_PFN_MASK (0x03FFFFC0) +#define ROGUE_MIPSFW_ENTRYLO_PFN_SIZE (20) +/* Mask used for the MIPS Page Table in case of physical bus on more than 32 bit. */ +#define ROGUE_MIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT (0x3FFFFFC0) +#define ROGUE_MIPSFW_ENTRYLO_PFN_SIZE_ABOVE_32BIT (24) +#define ROGUE_MIPSFW_ADDR_TO_ENTRYLO_PFN_RSHIFT (ROGUE_MIPSFW_ENTRYLO_PFN_ALIGNSHIFT - \ + ROGUE_MIPSFW_ENTRYLO_PFN_SHIFT) + +#define ROGUE_MIPSFW_ENTRYLO_CACHE_POLICY_SHIFT (3U) +#define ROGUE_MIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK (0XFFFFFFC7) + +#define ROGUE_MIPSFW_ENTRYLO_DIRTY_SHIFT (2U) +#define ROGUE_MIPSFW_ENTRYLO_DIRTY_CLRMSK (0XFFFFFFFB) +#define ROGUE_MIPSFW_ENTRYLO_DIRTY_EN (0X00000004) + +#define ROGUE_MIPSFW_ENTRYLO_VALID_SHIFT (1U) +#define ROGUE_MIPSFW_ENTRYLO_VALID_CLRMSK (0XFFFFFFFD) +#define ROGUE_MIPSFW_ENTRYLO_VALID_EN (0X00000002) + +#define ROGUE_MIPSFW_ENTRYLO_GLOBAL_SHIFT (0U) +#define ROGUE_MIPSFW_ENTRYLO_GLOBAL_CLRMSK (0XFFFFFFFE) +#define ROGUE_MIPSFW_ENTRYLO_GLOBAL_EN (0X00000001) + +#define ROGUE_MIPSFW_ENTRYLO_DVG (ROGUE_MIPSFW_ENTRYLO_DIRTY_EN | \ + ROGUE_MIPSFW_ENTRYLO_VALID_EN | \ + ROGUE_MIPSFW_ENTRYLO_GLOBAL_EN) +#define ROGUE_MIPSFW_ENTRYLO_UNCACHED (ROGUE_MIPSFW_UNCACHED_CACHE_POLICY << \ + ROGUE_MIPSFW_ENTRYLO_CACHE_POLICY_SHIFT) +#define ROGUE_MIPSFW_ENTRYLO_DVG_UNCACHED (ROGUE_MIPSFW_ENTRYLO_DVG | \ + ROGUE_MIPSFW_ENTRYLO_UNCACHED) + +/* Remap Range Config Addr Out. */ +/* These defines refer to the upper half of the Remap Range Config register. */ +#define ROGUE_MIPSFW_REMAP_RANGE_ADDR_OUT_MASK (0x0FFFFFF0) +#define ROGUE_MIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT (4) /* wrt upper half of the register. */ +#define ROGUE_MIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT (12) +#define ROGUE_MIPSFW_ADDR_TO_RR_ADDR_OUT_RSHIFT (ROGUE_MIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT - \ + ROGUE_MIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT) + +/* + * Pages to trampoline problematic physical addresses: + * - ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN : 0x1FC0_0000 + * - ROGUE_MIPSFW_DATA_REMAP_PHYS_ADDR_IN : 0x1FC0_1000 + * - ROGUE_MIPSFW_CODE_REMAP_PHYS_ADDR_IN : 0x1FC0_2000 + * - (benign trampoline) : 0x1FC0_3000 + * that would otherwise be erroneously remapped by the MIPS wrapper. + * (see "Firmware virtual layout and remap configuration" section below) + */ + +#define ROGUE_MIPSFW_TRAMPOLINE_LOG2_NUMPAGES (2) +#define ROGUE_MIPSFW_TRAMPOLINE_NUMPAGES BIT(ROGUE_MIPSFW_TRAMPOLINE_LOG2_NUMPAGES) +#define ROGUE_MIPSFW_TRAMPOLINE_SIZE (ROGUE_MIPSFW_TRAMPOLINE_NUMPAGES << \ + ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K) +#define ROGUE_MIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE (ROGUE_MIPSFW_TRAMPOLINE_LOG2_NUMPAGES + \ + ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K) + +#define ROGUE_MIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR (ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN) +#define ROGUE_MIPSFW_TRAMPOLINE_OFFSET(a) ((a) - ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN) + +#define ROGUE_MIPSFW_SENSITIVE_ADDR(a) (ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN == \ + (~((1 << ROGUE_MIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE) - 1) \ + & (a))) + +/* Firmware virtual layout and remap configuration. */ +/* + * For each remap region we define: + * - the virtual base used by the Firmware to access code/data through that region + * - the microAptivAP physical address correspondent to the virtual base address, + * used as input address and remapped to the actual physical address + * - log2 of size of the region remapped by the MIPS wrapper, i.e. number of bits from + * the bottom of the base input address that survive onto the output address + * (this defines both the alignment and the maximum size of the remapped region) + * - one or more code/data segments within the remapped region. + */ + +/* Boot remap setup. */ +#define ROGUE_MIPSFW_BOOT_REMAP_VIRTUAL_BASE (0xBFC00000) +#define ROGUE_MIPSFW_BOOT_REMAP_PHYS_ADDR_IN (0x1FC00000) +#define ROGUE_MIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE (12) +#define ROGUE_MIPSFW_BOOT_NMI_CODE_VIRTUAL_BASE (ROGUE_MIPSFW_BOOT_REMAP_VIRTUAL_BASE) + +/* Data remap setup. */ +#define ROGUE_MIPSFW_DATA_REMAP_VIRTUAL_BASE (0xBFC01000) +#define ROGUE_MIPSFW_DATA_CACHED_REMAP_VIRTUAL_BASE (0x9FC01000) +#define ROGUE_MIPSFW_DATA_REMAP_PHYS_ADDR_IN (0x1FC01000) +#define ROGUE_MIPSFW_DATA_REMAP_LOG2_SEGMENT_SIZE (12) +#define ROGUE_MIPSFW_BOOT_NMI_DATA_VIRTUAL_BASE (ROGUE_MIPSFW_DATA_REMAP_VIRTUAL_BASE) + +/* Code remap setup. */ +#define ROGUE_MIPSFW_CODE_REMAP_VIRTUAL_BASE (0x9FC02000) +#define ROGUE_MIPSFW_CODE_REMAP_PHYS_ADDR_IN (0x1FC02000) +#define ROGUE_MIPSFW_CODE_REMAP_LOG2_SEGMENT_SIZE (12) +#define ROGUE_MIPSFW_EXCEPTIONS_VIRTUAL_BASE (ROGUE_MIPSFW_CODE_REMAP_VIRTUAL_BASE) + +/* Permanent mappings setup. */ +#define ROGUE_MIPSFW_PT_VIRTUAL_BASE (0xCF000000) +#define ROGUE_MIPSFW_REGISTERS_VIRTUAL_BASE (0xCF800000) +#define ROGUE_MIPSFW_STACK_VIRTUAL_BASE (0xCF600000) + +/* Bootloader configuration data. */ +/* + * Bootloader configuration offset (where ROGUE_MIPSFW_BOOT_DATA lives) + * within the bootloader/NMI data page. + */ +#define ROGUE_MIPSFW_BOOTLDR_CONF_OFFSET (0x0) + +/* NMI shared data. */ +/* Base address of the shared data within the bootloader/NMI data page. */ +#define ROGUE_MIPSFW_NMI_SHARED_DATA_BASE (0x100) +/* Size used by Debug dump data. */ +#define ROGUE_MIPSFW_NMI_SHARED_SIZE (0x2B0) +/* Offsets in the NMI shared area in 32-bit words. */ +#define ROGUE_MIPSFW_NMI_SYNC_FLAG_OFFSET (0x0) +#define ROGUE_MIPSFW_NMI_STATE_OFFSET (0x1) +#define ROGUE_MIPSFW_NMI_ERROR_STATE_SET (0x1) + +/* MIPS boot stage. */ +#define ROGUE_MIPSFW_BOOT_STAGE_OFFSET (0x400) + +/* + * MIPS private data in the bootloader data page. + * Memory below this offset is used by the FW only, no interface data allowed. + */ +#define ROGUE_MIPSFW_PRIVATE_DATA_OFFSET (0x800) + +struct rogue_mipsfw_boot_data { + u64 stack_phys_addr; + u64 reg_base; + u64 pt_phys_addr[ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES]; + u32 pt_log2_page_size; + u32 pt_num_pages; + u32 reserved1; + u32 reserved2; +}; + +#define ROGUE_MIPSFW_GET_OFFSET_IN_DWORDS(offset) ((offset) / sizeof(u32)) +#define ROGUE_MIPSFW_GET_OFFSET_IN_QWORDS(offset) ((offset) / sizeof(u64)) + +/* Used for compatibility checks. */ +#define ROGUE_MIPSFW_ARCHTYPE_VER_CLRMSK (0xFFFFE3FFU) +#define ROGUE_MIPSFW_ARCHTYPE_VER_SHIFT (10U) +#define ROGUE_MIPSFW_CORE_ID_VALUE (0x001U) +#define ROGUE_FW_PROCESSOR_MIPS "MIPS" + +/* microAptivAP cache line size. */ +#define ROGUE_MIPSFW_MICROAPTIVEAP_CACHELINE_SIZE (16U) + +/* + * The SOCIF transactions are identified with the top 16 bits of the physical address emitted by + * the MIPS. + */ +#define ROGUE_MIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN (16U) + +/* Values to put in the MIPS selectors for performance counters. */ +/* Icache accesses in COUNTER0. */ +#define ROGUE_MIPSFW_PERF_COUNT_CTRL_ICACHE_ACCESSES_C0 (9U) +/* Icache misses in COUNTER1. */ +#define ROGUE_MIPSFW_PERF_COUNT_CTRL_ICACHE_MISSES_C1 (9U) + +/* Dcache accesses in COUNTER0. */ +#define ROGUE_MIPSFW_PERF_COUNT_CTRL_DCACHE_ACCESSES_C0 (10U) +/* Dcache misses in COUNTER1. */ +#define ROGUE_MIPSFW_PERF_COUNT_CTRL_DCACHE_MISSES_C1 (11U) + +/* ITLB instruction accesses in COUNTER0. */ +#define ROGUE_MIPSFW_PERF_COUNT_CTRL_ITLB_INSTR_ACCESSES_C0 (5U) +/* JTLB instruction accesses misses in COUNTER1. */ +#define ROGUE_MIPSFW_PERF_COUNT_CTRL_JTLB_INSTR_MISSES_C1 (7U) + + /* Instructions completed in COUNTER0. */ +#define ROGUE_MIPSFW_PERF_COUNT_CTRL_INSTR_COMPLETED_C0 (1U) +/* JTLB data misses in COUNTER1. */ +#define ROGUE_MIPSFW_PERF_COUNT_CTRL_JTLB_DATA_MISSES_C1 (8U) + +/* Shift for the Event field in the MIPS perf ctrl registers. */ +#define ROGUE_MIPSFW_PERF_COUNT_CTRL_EVENT_SHIFT (5U) + +/* Additional flags for performance counters. See MIPS manual for further reference. */ +#define ROGUE_MIPSFW_PERF_COUNT_CTRL_COUNT_USER_MODE (8U) +#define ROGUE_MIPSFW_PERF_COUNT_CTRL_COUNT_KERNEL_MODE (2U) +#define ROGUE_MIPSFW_PERF_COUNT_CTRL_COUNT_EXL (1U) + +#define ROGUE_MIPSFW_C0_NBHWIRQ 8 + +/* Macros to decode C0_Cause register. */ +#define ROGUE_MIPSFW_C0_CAUSE_EXCCODE(cause) (((cause) & 0x7c) >> 2) +#define ROGUE_MIPSFW_C0_CAUSE_EXCCODE_FWERROR 9 +/* Use only when Coprocessor Unusable exception. */ +#define ROGUE_MIPSFW_C0_CAUSE_UNUSABLE_UNIT(cause) (((cause) >> 28) & 0x3) +#define ROGUE_MIPSFW_C0_CAUSE_PENDING_HWIRQ(cause) (((cause) & 0x3fc00) >> 10) +#define ROGUE_MIPSFW_C0_CAUSE_FDCIPENDING BIT(21) +#define ROGUE_MIPSFW_C0_CAUSE_IV BIT(23) +#define ROGUE_MIPSFW_C0_CAUSE_IC BIT(25) +#define ROGUE_MIPSFW_C0_CAUSE_PCIPENDING BIT(26) +#define ROGUE_MIPSFW_C0_CAUSE_TIPENDING BIT(30) +#define ROGUE_MIPSFW_C0_CAUSE_BRANCH_DELAY BIT(31) + +/* Macros to decode C0_Debug register. */ +#define ROGUE_MIPSFW_C0_DEBUG_EXCCODE(debug) (((debug) >> 10) & 0x1f) +#define ROGUE_MIPSFW_C0_DEBUG_DSS BIT(0) +#define ROGUE_MIPSFW_C0_DEBUG_DBP BIT(1) +#define ROGUE_MIPSFW_C0_DEBUG_DDBL BIT(2) +#define ROGUE_MIPSFW_C0_DEBUG_DDBS BIT(3) +#define ROGUE_MIPSFW_C0_DEBUG_DIB BIT(4) +#define ROGUE_MIPSFW_C0_DEBUG_DINT BIT(5) +#define ROGUE_MIPSFW_C0_DEBUG_DIBIMPR BIT(6) +#define ROGUE_MIPSFW_C0_DEBUG_DDBLIMPR BIT(18) +#define ROGUE_MIPSFW_C0_DEBUG_DDBSIMPR BIT(19) +#define ROGUE_MIPSFW_C0_DEBUG_IEXI BIT(20) +#define ROGUE_MIPSFW_C0_DEBUG_DBUSEP BIT(21) +#define ROGUE_MIPSFW_C0_DEBUG_CACHEEP BIT(22) +#define ROGUE_MIPSFW_C0_DEBUG_MCHECKP BIT(23) +#define ROGUE_MIPSFW_C0_DEBUG_IBUSEP BIT(24) +#define ROGUE_MIPSFW_C0_DEBUG_DM BIT(30) +#define ROGUE_MIPSFW_C0_DEBUG_DBD BIT(31) + +/* Macros to decode TLB entries. */ +#define ROGUE_MIPSFW_TLB_GET_MASK(page_mask) (((page_mask) >> 13) & 0XFFFFU) +/* Page size in KB. */ +#define ROGUE_MIPSFW_TLB_GET_PAGE_SIZE(page_mask) ((((page_mask) | 0x1FFF) + 1) >> 11) +/* Page size in KB. */ +#define ROGUE_MIPSFW_TLB_GET_PAGE_MASK(page_size) ((((page_size) << 11) - 1) & ~0x7FF) +#define ROGUE_MIPSFW_TLB_GET_VPN2(entry_hi) ((entry_hi) >> 13) +#define ROGUE_MIPSFW_TLB_GET_COHERENCY(entry_lo) (((entry_lo) >> 3) & 0x7U) +#define ROGUE_MIPSFW_TLB_GET_PFN(entry_lo) (((entry_lo) >> 6) & 0XFFFFFU) +/* GET_PA uses a non-standard PFN mask for 36 bit addresses. */ +#define ROGUE_MIPSFW_TLB_GET_PA(entry_lo) (((u64)(entry_lo) & \ + ROGUE_MIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT) << 6) +#define ROGUE_MIPSFW_TLB_GET_INHIBIT(entry_lo) (((entry_lo) >> 30) & 0x3U) +#define ROGUE_MIPSFW_TLB_GET_DGV(entry_lo) ((entry_lo) & 0x7U) +#define ROGUE_MIPSFW_TLB_GLOBAL BIT(0) +#define ROGUE_MIPSFW_TLB_VALID BIT(1) +#define ROGUE_MIPSFW_TLB_DIRTY BIT(2) +#define ROGUE_MIPSFW_TLB_XI BIT(30) +#define ROGUE_MIPSFW_TLB_RI BIT(31) + +#define ROGUE_MIPSFW_REMAP_GET_REGION_SIZE(region_size_encoding) (1 << (((region_size_encoding) \ + + 1) << 1)) + +struct rogue_mips_tlb_entry { + u32 tlb_page_mask; + u32 tlb_hi; + u32 tlb_lo0; + u32 tlb_lo1; +}; + +struct rogue_mips_remap_entry { + u32 remap_addr_in; /* Always 4k aligned. */ + u32 remap_addr_out; /* Always 4k aligned. */ + u32 remap_region_size; +}; + +struct rogue_mips_state { + u32 error_state; /* This must come first in the structure. */ + u32 error_epc; + u32 status_register; + u32 cause_register; + u32 bad_register; + u32 epc; + u32 sp; + u32 debug; + u32 depc; + u32 bad_instr; + u32 unmapped_address; + struct rogue_mips_tlb_entry tlb[ROGUE_MIPSFW_NUMBER_OF_TLB_ENTRIES]; + struct rogue_mips_remap_entry remap[ROGUE_MIPSFW_NUMBER_OF_REMAP_ENTRIES]; +}; + +#include "pvr_rogue_mips_check.h" + +#endif /* PVR_ROGUE_MIPS_H */ diff --git a/drivers/gpu/drm/imagination/pvr_rogue_mips_check.h b/drivers/gpu/drm/imagination/pvr_rogue_mips_check.h new file mode 100644 index 0000000000..824b4bf33a --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_rogue_mips_check.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_ROGUE_MIPS_CHECK_H +#define PVR_ROGUE_MIPS_CHECK_H + +#include + +static_assert(offsetof(struct rogue_mips_tlb_entry, tlb_page_mask) == 0, + "offsetof(struct rogue_mips_tlb_entry, tlb_page_mask) incorrect"); +static_assert(offsetof(struct rogue_mips_tlb_entry, tlb_hi) == 4, + "offsetof(struct rogue_mips_tlb_entry, tlb_hi) incorrect"); +static_assert(offsetof(struct rogue_mips_tlb_entry, tlb_lo0) == 8, + "offsetof(struct rogue_mips_tlb_entry, tlb_lo0) incorrect"); +static_assert(offsetof(struct rogue_mips_tlb_entry, tlb_lo1) == 12, + "offsetof(struct rogue_mips_tlb_entry, tlb_lo1) incorrect"); +static_assert(sizeof(struct rogue_mips_tlb_entry) == 16, + "struct rogue_mips_tlb_entry is incorrect size"); + +static_assert(offsetof(struct rogue_mips_remap_entry, remap_addr_in) == 0, + "offsetof(struct rogue_mips_remap_entry, remap_addr_in) incorrect"); +static_assert(offsetof(struct rogue_mips_remap_entry, remap_addr_out) == 4, + "offsetof(struct rogue_mips_remap_entry, remap_addr_out) incorrect"); +static_assert(offsetof(struct rogue_mips_remap_entry, remap_region_size) == 8, + "offsetof(struct rogue_mips_remap_entry, remap_region_size) incorrect"); +static_assert(sizeof(struct rogue_mips_remap_entry) == 12, + "struct rogue_mips_remap_entry is incorrect size"); + +static_assert(offsetof(struct rogue_mips_state, error_state) == 0, + "offsetof(struct rogue_mips_state, error_state) incorrect"); +static_assert(offsetof(struct rogue_mips_state, error_epc) == 4, + "offsetof(struct rogue_mips_state, error_epc) incorrect"); +static_assert(offsetof(struct rogue_mips_state, status_register) == 8, + "offsetof(struct rogue_mips_state, status_register) incorrect"); +static_assert(offsetof(struct rogue_mips_state, cause_register) == 12, + "offsetof(struct rogue_mips_state, cause_register) incorrect"); +static_assert(offsetof(struct rogue_mips_state, bad_register) == 16, + "offsetof(struct rogue_mips_state, bad_register) incorrect"); +static_assert(offsetof(struct rogue_mips_state, epc) == 20, + "offsetof(struct rogue_mips_state, epc) incorrect"); +static_assert(offsetof(struct rogue_mips_state, sp) == 24, + "offsetof(struct rogue_mips_state, sp) incorrect"); +static_assert(offsetof(struct rogue_mips_state, debug) == 28, + "offsetof(struct rogue_mips_state, debug) incorrect"); +static_assert(offsetof(struct rogue_mips_state, depc) == 32, + "offsetof(struct rogue_mips_state, depc) incorrect"); +static_assert(offsetof(struct rogue_mips_state, bad_instr) == 36, + "offsetof(struct rogue_mips_state, bad_instr) incorrect"); +static_assert(offsetof(struct rogue_mips_state, unmapped_address) == 40, + "offsetof(struct rogue_mips_state, unmapped_address) incorrect"); +static_assert(offsetof(struct rogue_mips_state, tlb) == 44, + "offsetof(struct rogue_mips_state, tlb) incorrect"); +static_assert(offsetof(struct rogue_mips_state, remap) == 300, + "offsetof(struct rogue_mips_state, remap) incorrect"); +static_assert(sizeof(struct rogue_mips_state) == 684, + "struct rogue_mips_state is incorrect size"); + +#endif /* PVR_ROGUE_MIPS_CHECK_H */ diff --git a/drivers/gpu/drm/imagination/pvr_rogue_mmu_defs.h b/drivers/gpu/drm/imagination/pvr_rogue_mmu_defs.h new file mode 100644 index 0000000000..f361ccdd54 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_rogue_mmu_defs.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +/* *** Autogenerated C -- do not edit *** */ + +#ifndef PVR_ROGUE_MMU_DEFS_H +#define PVR_ROGUE_MMU_DEFS_H + +#define ROGUE_MMU_DEFS_REVISION 0 + +#define ROGUE_BIF_DM_ENCODING_VERTEX (0x00000000U) +#define ROGUE_BIF_DM_ENCODING_PIXEL (0x00000001U) +#define ROGUE_BIF_DM_ENCODING_COMPUTE (0x00000002U) +#define ROGUE_BIF_DM_ENCODING_TLA (0x00000003U) +#define ROGUE_BIF_DM_ENCODING_PB_VCE (0x00000004U) +#define ROGUE_BIF_DM_ENCODING_PB_TE (0x00000005U) +#define ROGUE_BIF_DM_ENCODING_META (0x00000007U) +#define ROGUE_BIF_DM_ENCODING_HOST (0x00000008U) +#define ROGUE_BIF_DM_ENCODING_PM_ALIST (0x00000009U) + +#define ROGUE_MMUCTRL_VADDR_PC_INDEX_SHIFT (30U) +#define ROGUE_MMUCTRL_VADDR_PC_INDEX_CLRMSK (0xFFFFFF003FFFFFFFULL) +#define ROGUE_MMUCTRL_VADDR_PD_INDEX_SHIFT (21U) +#define ROGUE_MMUCTRL_VADDR_PD_INDEX_CLRMSK (0xFFFFFFFFC01FFFFFULL) +#define ROGUE_MMUCTRL_VADDR_PT_INDEX_SHIFT (12U) +#define ROGUE_MMUCTRL_VADDR_PT_INDEX_CLRMSK (0xFFFFFFFFFFE00FFFULL) + +#define ROGUE_MMUCTRL_ENTRIES_PC_VALUE (0x00000400U) +#define ROGUE_MMUCTRL_ENTRIES_PD_VALUE (0x00000200U) +#define ROGUE_MMUCTRL_ENTRIES_PT_VALUE (0x00000200U) + +#define ROGUE_MMUCTRL_ENTRY_SIZE_PC_VALUE (0x00000020U) +#define ROGUE_MMUCTRL_ENTRY_SIZE_PD_VALUE (0x00000040U) +#define ROGUE_MMUCTRL_ENTRY_SIZE_PT_VALUE (0x00000040U) + +#define ROGUE_MMUCTRL_PAGE_SIZE_MASK (0x00000007U) +#define ROGUE_MMUCTRL_PAGE_SIZE_4KB (0x00000000U) +#define ROGUE_MMUCTRL_PAGE_SIZE_16KB (0x00000001U) +#define ROGUE_MMUCTRL_PAGE_SIZE_64KB (0x00000002U) +#define ROGUE_MMUCTRL_PAGE_SIZE_256KB (0x00000003U) +#define ROGUE_MMUCTRL_PAGE_SIZE_1MB (0x00000004U) +#define ROGUE_MMUCTRL_PAGE_SIZE_2MB (0x00000005U) + +#define ROGUE_MMUCTRL_PAGE_4KB_RANGE_SHIFT (12U) +#define ROGUE_MMUCTRL_PAGE_4KB_RANGE_CLRMSK (0xFFFFFF0000000FFFULL) + +#define ROGUE_MMUCTRL_PAGE_16KB_RANGE_SHIFT (14U) +#define ROGUE_MMUCTRL_PAGE_16KB_RANGE_CLRMSK (0xFFFFFF0000003FFFULL) + +#define ROGUE_MMUCTRL_PAGE_64KB_RANGE_SHIFT (16U) +#define ROGUE_MMUCTRL_PAGE_64KB_RANGE_CLRMSK (0xFFFFFF000000FFFFULL) + +#define ROGUE_MMUCTRL_PAGE_256KB_RANGE_SHIFT (18U) +#define ROGUE_MMUCTRL_PAGE_256KB_RANGE_CLRMSK (0xFFFFFF000003FFFFULL) + +#define ROGUE_MMUCTRL_PAGE_1MB_RANGE_SHIFT (20U) +#define ROGUE_MMUCTRL_PAGE_1MB_RANGE_CLRMSK (0xFFFFFF00000FFFFFULL) + +#define ROGUE_MMUCTRL_PAGE_2MB_RANGE_SHIFT (21U) +#define ROGUE_MMUCTRL_PAGE_2MB_RANGE_CLRMSK (0xFFFFFF00001FFFFFULL) + +#define ROGUE_MMUCTRL_PT_BASE_4KB_RANGE_SHIFT (12U) +#define ROGUE_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK (0xFFFFFF0000000FFFULL) + +#define ROGUE_MMUCTRL_PT_BASE_16KB_RANGE_SHIFT (10U) +#define ROGUE_MMUCTRL_PT_BASE_16KB_RANGE_CLRMSK (0xFFFFFF00000003FFULL) + +#define ROGUE_MMUCTRL_PT_BASE_64KB_RANGE_SHIFT (8U) +#define ROGUE_MMUCTRL_PT_BASE_64KB_RANGE_CLRMSK (0xFFFFFF00000000FFULL) + +#define ROGUE_MMUCTRL_PT_BASE_256KB_RANGE_SHIFT (6U) +#define ROGUE_MMUCTRL_PT_BASE_256KB_RANGE_CLRMSK (0xFFFFFF000000003FULL) + +#define ROGUE_MMUCTRL_PT_BASE_1MB_RANGE_SHIFT (5U) +#define ROGUE_MMUCTRL_PT_BASE_1MB_RANGE_CLRMSK (0xFFFFFF000000001FULL) + +#define ROGUE_MMUCTRL_PT_BASE_2MB_RANGE_SHIFT (5U) +#define ROGUE_MMUCTRL_PT_BASE_2MB_RANGE_CLRMSK (0xFFFFFF000000001FULL) + +#define ROGUE_MMUCTRL_PT_DATA_PM_META_PROTECT_SHIFT (62U) +#define ROGUE_MMUCTRL_PT_DATA_PM_META_PROTECT_CLRMSK (0xBFFFFFFFFFFFFFFFULL) +#define ROGUE_MMUCTRL_PT_DATA_PM_META_PROTECT_EN (0x4000000000000000ULL) +#define ROGUE_MMUCTRL_PT_DATA_VP_PAGE_HI_SHIFT (40U) +#define ROGUE_MMUCTRL_PT_DATA_VP_PAGE_HI_CLRMSK (0xC00000FFFFFFFFFFULL) +#define ROGUE_MMUCTRL_PT_DATA_PAGE_SHIFT (12U) +#define ROGUE_MMUCTRL_PT_DATA_PAGE_CLRMSK (0xFFFFFF0000000FFFULL) +#define ROGUE_MMUCTRL_PT_DATA_VP_PAGE_LO_SHIFT (6U) +#define ROGUE_MMUCTRL_PT_DATA_VP_PAGE_LO_CLRMSK (0xFFFFFFFFFFFFF03FULL) +#define ROGUE_MMUCTRL_PT_DATA_ENTRY_PENDING_SHIFT (5U) +#define ROGUE_MMUCTRL_PT_DATA_ENTRY_PENDING_CLRMSK (0xFFFFFFFFFFFFFFDFULL) +#define ROGUE_MMUCTRL_PT_DATA_ENTRY_PENDING_EN (0x0000000000000020ULL) +#define ROGUE_MMUCTRL_PT_DATA_PM_SRC_SHIFT (4U) +#define ROGUE_MMUCTRL_PT_DATA_PM_SRC_CLRMSK (0xFFFFFFFFFFFFFFEFULL) +#define ROGUE_MMUCTRL_PT_DATA_PM_SRC_EN (0x0000000000000010ULL) +#define ROGUE_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_SHIFT (3U) +#define ROGUE_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_CLRMSK (0xFFFFFFFFFFFFFFF7ULL) +#define ROGUE_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN (0x0000000000000008ULL) +#define ROGUE_MMUCTRL_PT_DATA_CC_SHIFT (2U) +#define ROGUE_MMUCTRL_PT_DATA_CC_CLRMSK (0xFFFFFFFFFFFFFFFBULL) +#define ROGUE_MMUCTRL_PT_DATA_CC_EN (0x0000000000000004ULL) +#define ROGUE_MMUCTRL_PT_DATA_READ_ONLY_SHIFT (1U) +#define ROGUE_MMUCTRL_PT_DATA_READ_ONLY_CLRMSK (0xFFFFFFFFFFFFFFFDULL) +#define ROGUE_MMUCTRL_PT_DATA_READ_ONLY_EN (0x0000000000000002ULL) +#define ROGUE_MMUCTRL_PT_DATA_VALID_SHIFT (0U) +#define ROGUE_MMUCTRL_PT_DATA_VALID_CLRMSK (0xFFFFFFFFFFFFFFFEULL) +#define ROGUE_MMUCTRL_PT_DATA_VALID_EN (0x0000000000000001ULL) + +#define ROGUE_MMUCTRL_PD_DATA_ENTRY_PENDING_SHIFT (40U) +#define ROGUE_MMUCTRL_PD_DATA_ENTRY_PENDING_CLRMSK (0xFFFFFEFFFFFFFFFFULL) +#define ROGUE_MMUCTRL_PD_DATA_ENTRY_PENDING_EN (0x0000010000000000ULL) +#define ROGUE_MMUCTRL_PD_DATA_PT_BASE_SHIFT (5U) +#define ROGUE_MMUCTRL_PD_DATA_PT_BASE_CLRMSK (0xFFFFFF000000001FULL) +#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_SHIFT (1U) +#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK (0xFFFFFFFFFFFFFFF1ULL) +#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_4KB (0x0000000000000000ULL) +#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_16KB (0x0000000000000002ULL) +#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_64KB (0x0000000000000004ULL) +#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_256KB (0x0000000000000006ULL) +#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_1MB (0x0000000000000008ULL) +#define ROGUE_MMUCTRL_PD_DATA_PAGE_SIZE_2MB (0x000000000000000aULL) +#define ROGUE_MMUCTRL_PD_DATA_VALID_SHIFT (0U) +#define ROGUE_MMUCTRL_PD_DATA_VALID_CLRMSK (0xFFFFFFFFFFFFFFFEULL) +#define ROGUE_MMUCTRL_PD_DATA_VALID_EN (0x0000000000000001ULL) + +#define ROGUE_MMUCTRL_PC_DATA_PD_BASE_SHIFT (4U) +#define ROGUE_MMUCTRL_PC_DATA_PD_BASE_CLRMSK (0x0000000FU) +#define ROGUE_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT (12U) +#define ROGUE_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE (4096U) +#define ROGUE_MMUCTRL_PC_DATA_ENTRY_PENDING_SHIFT (1U) +#define ROGUE_MMUCTRL_PC_DATA_ENTRY_PENDING_CLRMSK (0xFFFFFFFDU) +#define ROGUE_MMUCTRL_PC_DATA_ENTRY_PENDING_EN (0x00000002U) +#define ROGUE_MMUCTRL_PC_DATA_VALID_SHIFT (0U) +#define ROGUE_MMUCTRL_PC_DATA_VALID_CLRMSK (0xFFFFFFFEU) +#define ROGUE_MMUCTRL_PC_DATA_VALID_EN (0x00000001U) + +#endif /* PVR_ROGUE_MMU_DEFS_H */ diff --git a/drivers/gpu/drm/imagination/pvr_stream.c b/drivers/gpu/drm/imagination/pvr_stream.c new file mode 100644 index 0000000000..975336a4fa --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_stream.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_device.h" +#include "pvr_rogue_fwif_stream.h" +#include "pvr_stream.h" + +#include +#include +#include +#include + +static __always_inline bool +stream_def_is_supported(struct pvr_device *pvr_dev, const struct pvr_stream_def *stream_def) +{ + if (stream_def->feature == PVR_FEATURE_NONE) + return true; + + if (!(stream_def->feature & PVR_FEATURE_NOT) && + pvr_device_has_feature(pvr_dev, stream_def->feature)) { + return true; + } + + if ((stream_def->feature & PVR_FEATURE_NOT) && + !pvr_device_has_feature(pvr_dev, stream_def->feature & ~PVR_FEATURE_NOT)) { + return true; + } + + return false; +} + +static int +pvr_stream_get_data(u8 *stream, u32 *stream_offset, u32 stream_size, u32 data_size, u32 align_size, + void *dest) +{ + *stream_offset = ALIGN(*stream_offset, align_size); + + if ((*stream_offset + data_size) > stream_size) + return -EINVAL; + + memcpy(dest, stream + *stream_offset, data_size); + + (*stream_offset) += data_size; + + return 0; +} + +/** + * pvr_stream_process_1() - Process a single stream and fill destination structure + * @pvr_dev: Device pointer. + * @stream_def: Stream definition. + * @nr_entries: Number of entries in &stream_def. + * @stream: Pointer to stream. + * @stream_offset: Starting offset within stream. + * @stream_size: Size of input stream, in bytes. + * @dest: Pointer to destination structure. + * @dest_size: Size of destination structure. + * @stream_offset_out: Pointer to variable to write updated stream offset to. May be NULL. + * + * Returns: + * * 0 on success, or + * * -%EINVAL on malformed stream. + */ +static int +pvr_stream_process_1(struct pvr_device *pvr_dev, const struct pvr_stream_def *stream_def, + u32 nr_entries, u8 *stream, u32 stream_offset, u32 stream_size, + u8 *dest, u32 dest_size, u32 *stream_offset_out) +{ + int err = 0; + u32 i; + + for (i = 0; i < nr_entries; i++) { + if (stream_def[i].offset >= dest_size) { + err = -EINVAL; + break; + } + + if (!stream_def_is_supported(pvr_dev, &stream_def[i])) + continue; + + switch (stream_def[i].size) { + case PVR_STREAM_SIZE_8: + err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u8), + sizeof(u8), dest + stream_def[i].offset); + if (err) + return err; + break; + + case PVR_STREAM_SIZE_16: + err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u16), + sizeof(u16), dest + stream_def[i].offset); + if (err) + return err; + break; + + case PVR_STREAM_SIZE_32: + err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u32), + sizeof(u32), dest + stream_def[i].offset); + if (err) + return err; + break; + + case PVR_STREAM_SIZE_64: + err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u64), + sizeof(u64), dest + stream_def[i].offset); + if (err) + return err; + break; + + case PVR_STREAM_SIZE_ARRAY: + err = pvr_stream_get_data(stream, &stream_offset, stream_size, + stream_def[i].array_size, sizeof(u64), + dest + stream_def[i].offset); + if (err) + return err; + break; + } + } + + if (stream_offset_out) + *stream_offset_out = stream_offset; + + return err; +} + +static int +pvr_stream_process_ext_stream(struct pvr_device *pvr_dev, + const struct pvr_stream_cmd_defs *cmd_defs, void *ext_stream, + u32 stream_offset, u32 ext_stream_size, void *dest) +{ + u32 musthave_masks[PVR_STREAM_EXTHDR_TYPE_MAX]; + u32 ext_header; + int err = 0; + u32 i; + + /* Copy "must have" mask from device. We clear this as we process the stream. */ + memcpy(musthave_masks, pvr_dev->stream_musthave_quirks[cmd_defs->type], + sizeof(musthave_masks)); + + do { + const struct pvr_stream_ext_header *header; + u32 type; + u32 data; + + err = pvr_stream_get_data(ext_stream, &stream_offset, ext_stream_size, sizeof(u32), + sizeof(ext_header), &ext_header); + if (err) + return err; + + type = (ext_header & PVR_STREAM_EXTHDR_TYPE_MASK) >> PVR_STREAM_EXTHDR_TYPE_SHIFT; + data = ext_header & PVR_STREAM_EXTHDR_DATA_MASK; + + if (type >= cmd_defs->ext_nr_headers) + return -EINVAL; + + header = &cmd_defs->ext_headers[type]; + if (data & ~header->valid_mask) + return -EINVAL; + + musthave_masks[type] &= ~data; + + for (i = 0; i < header->ext_streams_num; i++) { + const struct pvr_stream_ext_def *ext_def = &header->ext_streams[i]; + + if (!(ext_header & ext_def->header_mask)) + continue; + + if (!pvr_device_has_uapi_quirk(pvr_dev, ext_def->quirk)) + return -EINVAL; + + err = pvr_stream_process_1(pvr_dev, ext_def->stream, ext_def->stream_len, + ext_stream, stream_offset, + ext_stream_size, dest, + cmd_defs->dest_size, &stream_offset); + if (err) + return err; + } + } while (ext_header & PVR_STREAM_EXTHDR_CONTINUATION); + + /* + * Verify that "must have" mask is now zero. If it isn't then one of the "must have" quirks + * for this command was not present. + */ + for (i = 0; i < cmd_defs->ext_nr_headers; i++) { + if (musthave_masks[i]) + return -EINVAL; + } + + return 0; +} + +/** + * pvr_stream_process() - Build FW structure from stream + * @pvr_dev: Device pointer. + * @cmd_defs: Stream definition. + * @stream: Pointer to command stream. + * @stream_size: Size of command stream, in bytes. + * @dest_out: Pointer to destination buffer. + * + * Caller is responsible for freeing the output structure. + * + * Returns: + * * 0 on success, + * * -%ENOMEM on out of memory, or + * * -%EINVAL on malformed stream. + */ +int +pvr_stream_process(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs, + void *stream, u32 stream_size, void *dest_out) +{ + u32 stream_offset = 0; + u32 main_stream_len; + u32 padding; + int err; + + if (!stream || !stream_size) + return -EINVAL; + + err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u32), + sizeof(u32), &main_stream_len); + if (err) + return err; + + /* + * u32 after stream length is padding to ensure u64 alignment, but may be used for expansion + * in the future. Verify it's zero. + */ + err = pvr_stream_get_data(stream, &stream_offset, stream_size, sizeof(u32), + sizeof(u32), &padding); + if (err) + return err; + + if (main_stream_len < stream_offset || main_stream_len > stream_size || padding) + return -EINVAL; + + err = pvr_stream_process_1(pvr_dev, cmd_defs->main_stream, cmd_defs->main_stream_len, + stream, stream_offset, main_stream_len, dest_out, + cmd_defs->dest_size, &stream_offset); + if (err) + return err; + + if (stream_offset < stream_size) { + err = pvr_stream_process_ext_stream(pvr_dev, cmd_defs, stream, stream_offset, + stream_size, dest_out); + if (err) + return err; + } else { + u32 i; + + /* + * If we don't have an extension stream then there must not be any "must have" + * quirks for this command. + */ + for (i = 0; i < cmd_defs->ext_nr_headers; i++) { + if (pvr_dev->stream_musthave_quirks[cmd_defs->type][i]) + return -EINVAL; + } + } + + return 0; +} + +/** + * pvr_stream_create_musthave_masks() - Create "must have" masks for streams based on current device + * quirks + * @pvr_dev: Device pointer. + */ +void +pvr_stream_create_musthave_masks(struct pvr_device *pvr_dev) +{ + memset(pvr_dev->stream_musthave_quirks, 0, sizeof(pvr_dev->stream_musthave_quirks)); + + if (pvr_device_has_uapi_quirk(pvr_dev, 47217)) + pvr_dev->stream_musthave_quirks[PVR_STREAM_TYPE_FRAG][0] |= + PVR_STREAM_EXTHDR_FRAG0_BRN47217; + + if (pvr_device_has_uapi_quirk(pvr_dev, 49927)) { + pvr_dev->stream_musthave_quirks[PVR_STREAM_TYPE_GEOM][0] |= + PVR_STREAM_EXTHDR_GEOM0_BRN49927; + pvr_dev->stream_musthave_quirks[PVR_STREAM_TYPE_FRAG][0] |= + PVR_STREAM_EXTHDR_FRAG0_BRN49927; + pvr_dev->stream_musthave_quirks[PVR_STREAM_TYPE_COMPUTE][0] |= + PVR_STREAM_EXTHDR_COMPUTE0_BRN49927; + } +} diff --git a/drivers/gpu/drm/imagination/pvr_stream.h b/drivers/gpu/drm/imagination/pvr_stream.h new file mode 100644 index 0000000000..d92acb3a61 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_stream.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_STREAM_H +#define PVR_STREAM_H + +#include +#include +#include + +struct pvr_device; + +struct pvr_job; + +enum pvr_stream_type { + PVR_STREAM_TYPE_GEOM = 0, + PVR_STREAM_TYPE_FRAG, + PVR_STREAM_TYPE_COMPUTE, + PVR_STREAM_TYPE_TRANSFER, + PVR_STREAM_TYPE_STATIC_RENDER_CONTEXT, + PVR_STREAM_TYPE_STATIC_COMPUTE_CONTEXT, + + PVR_STREAM_TYPE_MAX +}; + +enum pvr_stream_size { + PVR_STREAM_SIZE_8 = 0, + PVR_STREAM_SIZE_16, + PVR_STREAM_SIZE_32, + PVR_STREAM_SIZE_64, + PVR_STREAM_SIZE_ARRAY, +}; + +#define PVR_FEATURE_NOT BIT(31) +#define PVR_FEATURE_NONE U32_MAX + +struct pvr_stream_def { + u32 offset; + enum pvr_stream_size size; + u32 array_size; + u32 feature; +}; + +struct pvr_stream_ext_def { + const struct pvr_stream_def *stream; + u32 stream_len; + u32 header_mask; + u32 quirk; +}; + +struct pvr_stream_ext_header { + const struct pvr_stream_ext_def *ext_streams; + u32 ext_streams_num; + u32 valid_mask; +}; + +struct pvr_stream_cmd_defs { + enum pvr_stream_type type; + + const struct pvr_stream_def *main_stream; + u32 main_stream_len; + + u32 ext_nr_headers; + const struct pvr_stream_ext_header *ext_headers; + + size_t dest_size; +}; + +int +pvr_stream_process(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs, + void *stream, u32 stream_size, void *dest_out); +void +pvr_stream_create_musthave_masks(struct pvr_device *pvr_dev); + +#endif /* PVR_STREAM_H */ diff --git a/drivers/gpu/drm/imagination/pvr_stream_defs.c b/drivers/gpu/drm/imagination/pvr_stream_defs.c new file mode 100644 index 0000000000..f8bd1a8c01 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_stream_defs.c @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_device_info.h" +#include "pvr_rogue_fwif_client.h" +#include "pvr_rogue_fwif_stream.h" +#include "pvr_stream.h" +#include "pvr_stream_defs.h" + +#include +#include + +#define PVR_STREAM_DEF_SET(owner, member, _size, _array_size, _feature) \ + { .offset = offsetof(struct owner, member), \ + .size = (_size), \ + .array_size = (_array_size), \ + .feature = (_feature) } + +#define PVR_STREAM_DEF(owner, member, member_size) \ + PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ ## member_size, 0, PVR_FEATURE_NONE) + +#define PVR_STREAM_DEF_FEATURE(owner, member, member_size, feature) \ + PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ ## member_size, 0, feature) + +#define PVR_STREAM_DEF_NOT_FEATURE(owner, member, member_size, feature) \ + PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ ## member_size, 0, \ + (feature) | PVR_FEATURE_NOT) + +#define PVR_STREAM_DEF_ARRAY(owner, member) \ + PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ARRAY, \ + sizeof(((struct owner *)0)->member), PVR_FEATURE_NONE) + +#define PVR_STREAM_DEF_ARRAY_FEATURE(owner, member, feature) \ + PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ARRAY, \ + sizeof(((struct owner *)0)->member), feature) + +#define PVR_STREAM_DEF_ARRAY_NOT_FEATURE(owner, member, feature) \ + PVR_STREAM_DEF_SET(owner, member, PVR_STREAM_SIZE_ARRAY, \ + sizeof(((struct owner *)0)->member), (feature) | PVR_FEATURE_NOT) + +/* + * When adding new parameters to the stream definition, the new parameters must go after the + * existing parameters, to preserve order. As parameters are naturally aligned, care must be taken + * with respect to implicit padding in the stream; padding should be minimised as much as possible. + */ +static const struct pvr_stream_def rogue_fwif_cmd_geom_stream[] = { + PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.vdm_ctrl_stream_base, 64), + PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.tpu_border_colour_table, 64), + PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_geom, regs.vdm_draw_indirect0, 64, + PVR_FEATURE_VDM_DRAWINDIRECT), + PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_geom, regs.vdm_draw_indirect1, 32, + PVR_FEATURE_VDM_DRAWINDIRECT), + PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.ppp_ctrl, 32), + PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.te_psg, 32), + PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.vdm_context_resume_task0_size, 32), + PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_geom, regs.vdm_context_resume_task3_size, 32, + PVR_FEATURE_VDM_OBJECT_LEVEL_LLS), + PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.view_idx, 32), + PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_geom, regs.pds_coeff_free_prog, 32, + PVR_FEATURE_TESSELLATION), +}; + +static const struct pvr_stream_def rogue_fwif_cmd_geom_stream_brn49927[] = { + PVR_STREAM_DEF(rogue_fwif_cmd_geom, regs.tpu, 32), +}; + +static const struct pvr_stream_ext_def cmd_geom_ext_streams_0[] = { + { + .stream = rogue_fwif_cmd_geom_stream_brn49927, + .stream_len = ARRAY_SIZE(rogue_fwif_cmd_geom_stream_brn49927), + .header_mask = PVR_STREAM_EXTHDR_GEOM0_BRN49927, + .quirk = 49927, + }, +}; + +static const struct pvr_stream_ext_header cmd_geom_ext_headers[] = { + { + .ext_streams = cmd_geom_ext_streams_0, + .ext_streams_num = ARRAY_SIZE(cmd_geom_ext_streams_0), + .valid_mask = PVR_STREAM_EXTHDR_GEOM0_VALID, + }, +}; + +const struct pvr_stream_cmd_defs pvr_cmd_geom_stream = { + .type = PVR_STREAM_TYPE_GEOM, + + .main_stream = rogue_fwif_cmd_geom_stream, + .main_stream_len = ARRAY_SIZE(rogue_fwif_cmd_geom_stream), + + .ext_nr_headers = ARRAY_SIZE(cmd_geom_ext_headers), + .ext_headers = cmd_geom_ext_headers, + + .dest_size = sizeof(struct rogue_fwif_cmd_geom), +}; + +static const struct pvr_stream_def rogue_fwif_cmd_frag_stream[] = { + PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_scissor_base, 64), + PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_dbias_base, 64), + PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_oclqry_base, 64), + PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_zlsctl, 64), + PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_zload_store_base, 64), + PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_stencil_load_store_base, 64), + PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.fb_cdc_zls, 64, + PVR_FEATURE_REQUIRES_FB_CDC_ZLS_SETUP), + PVR_STREAM_DEF_ARRAY(rogue_fwif_cmd_frag, regs.pbe_word), + PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.tpu_border_colour_table, 64), + PVR_STREAM_DEF_ARRAY(rogue_fwif_cmd_frag, regs.pds_bgnd), + PVR_STREAM_DEF_ARRAY(rogue_fwif_cmd_frag, regs.pds_pr_bgnd), + PVR_STREAM_DEF_ARRAY(rogue_fwif_cmd_frag, regs.usc_clear_register), + PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.usc_pixel_output_ctrl, 32), + PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_bgobjdepth, 32), + PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_bgobjvals, 32), + PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_aa, 32), + PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.isp_xtp_pipe_enable, 32, + PVR_FEATURE_S7_TOP_INFRASTRUCTURE), + PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_ctl, 32), + PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.event_pixel_pds_info, 32), + PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.pixel_phantom, 32, + PVR_FEATURE_CLUSTER_GROUPING), + PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.view_idx, 32), + PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.event_pixel_pds_data, 32), + PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.isp_oclqry_stride, 32, + PVR_FEATURE_GPU_MULTICORE_SUPPORT), + PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.isp_zls_pixels, 32, + PVR_FEATURE_ZLS_SUBTILE), + PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, regs.rgx_cr_blackpearl_fix, 32, + PVR_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE), + PVR_STREAM_DEF(rogue_fwif_cmd_frag, zls_stride, 32), + PVR_STREAM_DEF(rogue_fwif_cmd_frag, sls_stride, 32), + PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_frag, execute_count, 32, + PVR_FEATURE_GPU_MULTICORE_SUPPORT), +}; + +static const struct pvr_stream_def rogue_fwif_cmd_frag_stream_brn47217[] = { + PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.isp_oclqry_stride, 32), +}; + +static const struct pvr_stream_def rogue_fwif_cmd_frag_stream_brn49927[] = { + PVR_STREAM_DEF(rogue_fwif_cmd_frag, regs.tpu, 32), +}; + +static const struct pvr_stream_ext_def cmd_frag_ext_streams_0[] = { + { + .stream = rogue_fwif_cmd_frag_stream_brn47217, + .stream_len = ARRAY_SIZE(rogue_fwif_cmd_frag_stream_brn47217), + .header_mask = PVR_STREAM_EXTHDR_FRAG0_BRN47217, + .quirk = 47217, + }, + { + .stream = rogue_fwif_cmd_frag_stream_brn49927, + .stream_len = ARRAY_SIZE(rogue_fwif_cmd_frag_stream_brn49927), + .header_mask = PVR_STREAM_EXTHDR_FRAG0_BRN49927, + .quirk = 49927, + }, +}; + +static const struct pvr_stream_ext_header cmd_frag_ext_headers[] = { + { + .ext_streams = cmd_frag_ext_streams_0, + .ext_streams_num = ARRAY_SIZE(cmd_frag_ext_streams_0), + .valid_mask = PVR_STREAM_EXTHDR_FRAG0_VALID, + }, +}; + +const struct pvr_stream_cmd_defs pvr_cmd_frag_stream = { + .type = PVR_STREAM_TYPE_FRAG, + + .main_stream = rogue_fwif_cmd_frag_stream, + .main_stream_len = ARRAY_SIZE(rogue_fwif_cmd_frag_stream), + + .ext_nr_headers = ARRAY_SIZE(cmd_frag_ext_headers), + .ext_headers = cmd_frag_ext_headers, + + .dest_size = sizeof(struct rogue_fwif_cmd_frag), +}; + +static const struct pvr_stream_def rogue_fwif_cmd_compute_stream[] = { + PVR_STREAM_DEF(rogue_fwif_cmd_compute, regs.tpu_border_colour_table, 64), + PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.cdm_cb_queue, 64, + PVR_FEATURE_CDM_USER_MODE_QUEUE), + PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.cdm_cb_base, 64, + PVR_FEATURE_CDM_USER_MODE_QUEUE), + PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.cdm_cb, 64, + PVR_FEATURE_CDM_USER_MODE_QUEUE), + PVR_STREAM_DEF_NOT_FEATURE(rogue_fwif_cmd_compute, regs.cdm_ctrl_stream_base, 64, + PVR_FEATURE_CDM_USER_MODE_QUEUE), + PVR_STREAM_DEF(rogue_fwif_cmd_compute, regs.cdm_context_state_base_addr, 64), + PVR_STREAM_DEF(rogue_fwif_cmd_compute, regs.cdm_resume_pds1, 32), + PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.cdm_item, 32, + PVR_FEATURE_COMPUTE_MORTON_CAPABLE), + PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.compute_cluster, 32, + PVR_FEATURE_CLUSTER_GROUPING), + PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, regs.tpu_tag_cdm_ctrl, 32, + PVR_FEATURE_TPU_DM_GLOBAL_REGISTERS), + PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, stream_start_offset, 32, + PVR_FEATURE_CDM_USER_MODE_QUEUE), + PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_compute, execute_count, 32, + PVR_FEATURE_GPU_MULTICORE_SUPPORT), +}; + +static const struct pvr_stream_def rogue_fwif_cmd_compute_stream_brn49927[] = { + PVR_STREAM_DEF(rogue_fwif_cmd_compute, regs.tpu, 32), +}; + +static const struct pvr_stream_ext_def cmd_compute_ext_streams_0[] = { + { + .stream = rogue_fwif_cmd_compute_stream_brn49927, + .stream_len = ARRAY_SIZE(rogue_fwif_cmd_compute_stream_brn49927), + .header_mask = PVR_STREAM_EXTHDR_COMPUTE0_BRN49927, + .quirk = 49927, + }, +}; + +static const struct pvr_stream_ext_header cmd_compute_ext_headers[] = { + { + .ext_streams = cmd_compute_ext_streams_0, + .ext_streams_num = ARRAY_SIZE(cmd_compute_ext_streams_0), + .valid_mask = PVR_STREAM_EXTHDR_COMPUTE0_VALID, + }, +}; + +const struct pvr_stream_cmd_defs pvr_cmd_compute_stream = { + .type = PVR_STREAM_TYPE_COMPUTE, + + .main_stream = rogue_fwif_cmd_compute_stream, + .main_stream_len = ARRAY_SIZE(rogue_fwif_cmd_compute_stream), + + .ext_nr_headers = ARRAY_SIZE(cmd_compute_ext_headers), + .ext_headers = cmd_compute_ext_headers, + + .dest_size = sizeof(struct rogue_fwif_cmd_compute), +}; + +static const struct pvr_stream_def rogue_fwif_cmd_transfer_stream[] = { + PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.pds_bgnd0_base, 64), + PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.pds_bgnd1_base, 64), + PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.pds_bgnd3_sizeinfo, 64), + PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_mtile_base, 64), + PVR_STREAM_DEF_ARRAY(rogue_fwif_cmd_transfer, regs.pbe_wordx_mrty), + PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_bgobjvals, 32), + PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.usc_pixel_output_ctrl, 32), + PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.usc_clear_register0, 32), + PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.usc_clear_register1, 32), + PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.usc_clear_register2, 32), + PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.usc_clear_register3, 32), + PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_mtile_size, 32), + PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_render_origin, 32), + PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_ctl, 32), + PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_aa, 32), + PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.event_pixel_pds_info, 32), + PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.event_pixel_pds_code, 32), + PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.event_pixel_pds_data, 32), + PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_render, 32), + PVR_STREAM_DEF(rogue_fwif_cmd_transfer, regs.isp_rgn, 32), + PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_transfer, regs.isp_xtp_pipe_enable, 32, + PVR_FEATURE_S7_TOP_INFRASTRUCTURE), + PVR_STREAM_DEF_FEATURE(rogue_fwif_cmd_transfer, regs.frag_screen, 32, + PVR_FEATURE_GPU_MULTICORE_SUPPORT), +}; + +const struct pvr_stream_cmd_defs pvr_cmd_transfer_stream = { + .type = PVR_STREAM_TYPE_TRANSFER, + + .main_stream = rogue_fwif_cmd_transfer_stream, + .main_stream_len = ARRAY_SIZE(rogue_fwif_cmd_transfer_stream), + + .ext_nr_headers = 0, + + .dest_size = sizeof(struct rogue_fwif_cmd_transfer), +}; + +static const struct pvr_stream_def rogue_fwif_static_render_context_state_stream[] = { + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_reg_vdm_context_state_base_addr, 64), + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_reg_vdm_context_state_resume_addr, 64), + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_reg_ta_context_state_base_addr, 64), + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_state[0].geom_reg_vdm_context_store_task0, 64), + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_state[0].geom_reg_vdm_context_store_task1, 64), + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_state[0].geom_reg_vdm_context_store_task2, 64), + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_state[0].geom_reg_vdm_context_store_task3, 64), + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_state[0].geom_reg_vdm_context_store_task4, 64), + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_state[0].geom_reg_vdm_context_resume_task0, 64), + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_state[0].geom_reg_vdm_context_resume_task1, 64), + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_state[0].geom_reg_vdm_context_resume_task2, 64), + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_state[0].geom_reg_vdm_context_resume_task3, 64), + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_state[0].geom_reg_vdm_context_resume_task4, 64), + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_state[1].geom_reg_vdm_context_store_task0, 64), + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_state[1].geom_reg_vdm_context_store_task1, 64), + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_state[1].geom_reg_vdm_context_store_task2, 64), + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_state[1].geom_reg_vdm_context_store_task3, 64), + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_state[1].geom_reg_vdm_context_store_task4, 64), + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_state[1].geom_reg_vdm_context_resume_task0, 64), + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_state[1].geom_reg_vdm_context_resume_task1, 64), + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_state[1].geom_reg_vdm_context_resume_task2, 64), + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_state[1].geom_reg_vdm_context_resume_task3, 64), + PVR_STREAM_DEF(rogue_fwif_geom_registers_caswitch, + geom_state[1].geom_reg_vdm_context_resume_task4, 64), +}; + +const struct pvr_stream_cmd_defs pvr_static_render_context_state_stream = { + .type = PVR_STREAM_TYPE_STATIC_RENDER_CONTEXT, + + .main_stream = rogue_fwif_static_render_context_state_stream, + .main_stream_len = ARRAY_SIZE(rogue_fwif_static_render_context_state_stream), + + .ext_nr_headers = 0, + + .dest_size = sizeof(struct rogue_fwif_geom_registers_caswitch), +}; + +static const struct pvr_stream_def rogue_fwif_static_compute_context_state_stream[] = { + PVR_STREAM_DEF(rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds0, 64), + PVR_STREAM_DEF(rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds1, 64), + PVR_STREAM_DEF(rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_terminate_pds, 64), + PVR_STREAM_DEF(rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_terminate_pds1, 64), + PVR_STREAM_DEF(rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_resume_pds0, 64), + PVR_STREAM_DEF(rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_context_pds0_b, 64), + PVR_STREAM_DEF(rogue_fwif_cdm_registers_cswitch, cdmreg_cdm_resume_pds0_b, 64), +}; + +const struct pvr_stream_cmd_defs pvr_static_compute_context_state_stream = { + .type = PVR_STREAM_TYPE_STATIC_COMPUTE_CONTEXT, + + .main_stream = rogue_fwif_static_compute_context_state_stream, + .main_stream_len = ARRAY_SIZE(rogue_fwif_static_compute_context_state_stream), + + .ext_nr_headers = 0, + + .dest_size = sizeof(struct rogue_fwif_cdm_registers_cswitch), +}; diff --git a/drivers/gpu/drm/imagination/pvr_stream_defs.h b/drivers/gpu/drm/imagination/pvr_stream_defs.h new file mode 100644 index 0000000000..f33b821658 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_stream_defs.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_STREAM_DEFS_H +#define PVR_STREAM_DEFS_H + +#include "pvr_stream.h" + +extern const struct pvr_stream_cmd_defs pvr_cmd_geom_stream; +extern const struct pvr_stream_cmd_defs pvr_cmd_frag_stream; +extern const struct pvr_stream_cmd_defs pvr_cmd_compute_stream; +extern const struct pvr_stream_cmd_defs pvr_cmd_transfer_stream; +extern const struct pvr_stream_cmd_defs pvr_static_render_context_state_stream; +extern const struct pvr_stream_cmd_defs pvr_static_compute_context_state_stream; + +#endif /* PVR_STREAM_DEFS_H */ diff --git a/drivers/gpu/drm/imagination/pvr_sync.c b/drivers/gpu/drm/imagination/pvr_sync.c new file mode 100644 index 0000000000..129f646d14 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_sync.c @@ -0,0 +1,289 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include + +#include +#include +#include +#include + +#include "pvr_device.h" +#include "pvr_queue.h" +#include "pvr_sync.h" + +static int +pvr_check_sync_op(const struct drm_pvr_sync_op *sync_op) +{ + u8 handle_type; + + if (sync_op->flags & ~DRM_PVR_SYNC_OP_FLAGS_MASK) + return -EINVAL; + + handle_type = sync_op->flags & DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK; + if (handle_type != DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ && + handle_type != DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_TIMELINE_SYNCOBJ) + return -EINVAL; + + if (handle_type == DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ && + sync_op->value != 0) + return -EINVAL; + + return 0; +} + +static void +pvr_sync_signal_free(struct pvr_sync_signal *sig_sync) +{ + if (!sig_sync) + return; + + drm_syncobj_put(sig_sync->syncobj); + dma_fence_chain_free(sig_sync->chain); + dma_fence_put(sig_sync->fence); + kfree(sig_sync); +} + +void +pvr_sync_signal_array_cleanup(struct xarray *array) +{ + struct pvr_sync_signal *sig_sync; + unsigned long i; + + xa_for_each(array, i, sig_sync) + pvr_sync_signal_free(sig_sync); + + xa_destroy(array); +} + +static struct pvr_sync_signal * +pvr_sync_signal_array_add(struct xarray *array, struct drm_file *file, u32 handle, u64 point) +{ + struct pvr_sync_signal *sig_sync; + struct dma_fence *cur_fence; + int err; + u32 id; + + sig_sync = kzalloc(sizeof(*sig_sync), GFP_KERNEL); + if (!sig_sync) + return ERR_PTR(-ENOMEM); + + sig_sync->handle = handle; + sig_sync->point = point; + + if (point > 0) { + sig_sync->chain = dma_fence_chain_alloc(); + if (!sig_sync->chain) { + err = -ENOMEM; + goto err_free_sig_sync; + } + } + + sig_sync->syncobj = drm_syncobj_find(file, handle); + if (!sig_sync->syncobj) { + err = -EINVAL; + goto err_free_sig_sync; + } + + /* Retrieve the current fence attached to that point. It's + * perfectly fine to get a NULL fence here, it just means there's + * no fence attached to that point yet. + */ + if (!drm_syncobj_find_fence(file, handle, point, 0, &cur_fence)) + sig_sync->fence = cur_fence; + + err = xa_alloc(array, &id, sig_sync, xa_limit_32b, GFP_KERNEL); + if (err) + goto err_free_sig_sync; + + return sig_sync; + +err_free_sig_sync: + pvr_sync_signal_free(sig_sync); + return ERR_PTR(err); +} + +static struct pvr_sync_signal * +pvr_sync_signal_array_search(struct xarray *array, u32 handle, u64 point) +{ + struct pvr_sync_signal *sig_sync; + unsigned long i; + + xa_for_each(array, i, sig_sync) { + if (handle == sig_sync->handle && point == sig_sync->point) + return sig_sync; + } + + return NULL; +} + +static struct pvr_sync_signal * +pvr_sync_signal_array_get(struct xarray *array, struct drm_file *file, u32 handle, u64 point) +{ + struct pvr_sync_signal *sig_sync; + + sig_sync = pvr_sync_signal_array_search(array, handle, point); + if (sig_sync) + return sig_sync; + + return pvr_sync_signal_array_add(array, file, handle, point); +} + +int +pvr_sync_signal_array_collect_ops(struct xarray *array, + struct drm_file *file, + u32 sync_op_count, + const struct drm_pvr_sync_op *sync_ops) +{ + for (u32 i = 0; i < sync_op_count; i++) { + struct pvr_sync_signal *sig_sync; + int ret; + + if (!(sync_ops[i].flags & DRM_PVR_SYNC_OP_FLAG_SIGNAL)) + continue; + + ret = pvr_check_sync_op(&sync_ops[i]); + if (ret) + return ret; + + sig_sync = pvr_sync_signal_array_get(array, file, + sync_ops[i].handle, + sync_ops[i].value); + if (IS_ERR(sig_sync)) + return PTR_ERR(sig_sync); + } + + return 0; +} + +int +pvr_sync_signal_array_update_fences(struct xarray *array, + u32 sync_op_count, + const struct drm_pvr_sync_op *sync_ops, + struct dma_fence *done_fence) +{ + for (u32 i = 0; i < sync_op_count; i++) { + struct dma_fence *old_fence; + struct pvr_sync_signal *sig_sync; + + if (!(sync_ops[i].flags & DRM_PVR_SYNC_OP_FLAG_SIGNAL)) + continue; + + sig_sync = pvr_sync_signal_array_search(array, sync_ops[i].handle, + sync_ops[i].value); + if (WARN_ON(!sig_sync)) + return -EINVAL; + + old_fence = sig_sync->fence; + sig_sync->fence = dma_fence_get(done_fence); + dma_fence_put(old_fence); + + if (WARN_ON(!sig_sync->fence)) + return -EINVAL; + } + + return 0; +} + +void +pvr_sync_signal_array_push_fences(struct xarray *array) +{ + struct pvr_sync_signal *sig_sync; + unsigned long i; + + xa_for_each(array, i, sig_sync) { + if (sig_sync->chain) { + drm_syncobj_add_point(sig_sync->syncobj, sig_sync->chain, + sig_sync->fence, sig_sync->point); + sig_sync->chain = NULL; + } else { + drm_syncobj_replace_fence(sig_sync->syncobj, sig_sync->fence); + } + } +} + +static int +pvr_sync_add_dep_to_job(struct drm_sched_job *job, struct dma_fence *f) +{ + struct dma_fence_unwrap iter; + u32 native_fence_count = 0; + struct dma_fence *uf; + int err = 0; + + dma_fence_unwrap_for_each(uf, &iter, f) { + if (pvr_queue_fence_is_ufo_backed(uf)) + native_fence_count++; + } + + /* No need to unwrap the fence if it's fully non-native. */ + if (!native_fence_count) + return drm_sched_job_add_dependency(job, f); + + dma_fence_unwrap_for_each(uf, &iter, f) { + /* There's no dma_fence_unwrap_stop() helper cleaning up the refs + * owned by dma_fence_unwrap(), so let's just iterate over all + * entries without doing anything when something failed. + */ + if (err) + continue; + + if (pvr_queue_fence_is_ufo_backed(uf)) { + struct drm_sched_fence *s_fence = to_drm_sched_fence(uf); + + /* If this is a native dependency, we wait for the scheduled fence, + * and we will let pvr_queue_run_job() issue FW waits. + */ + err = drm_sched_job_add_dependency(job, + dma_fence_get(&s_fence->scheduled)); + } else { + err = drm_sched_job_add_dependency(job, dma_fence_get(uf)); + } + } + + dma_fence_put(f); + return err; +} + +int +pvr_sync_add_deps_to_job(struct pvr_file *pvr_file, struct drm_sched_job *job, + u32 sync_op_count, + const struct drm_pvr_sync_op *sync_ops, + struct xarray *signal_array) +{ + int err = 0; + + if (!sync_op_count) + return 0; + + for (u32 i = 0; i < sync_op_count; i++) { + struct pvr_sync_signal *sig_sync; + struct dma_fence *fence; + + if (sync_ops[i].flags & DRM_PVR_SYNC_OP_FLAG_SIGNAL) + continue; + + err = pvr_check_sync_op(&sync_ops[i]); + if (err) + return err; + + sig_sync = pvr_sync_signal_array_search(signal_array, sync_ops[i].handle, + sync_ops[i].value); + if (sig_sync) { + if (WARN_ON(!sig_sync->fence)) + return -EINVAL; + + fence = dma_fence_get(sig_sync->fence); + } else { + err = drm_syncobj_find_fence(from_pvr_file(pvr_file), sync_ops[i].handle, + sync_ops[i].value, 0, &fence); + if (err) + return err; + } + + err = pvr_sync_add_dep_to_job(job, fence); + if (err) + return err; + } + + return 0; +} diff --git a/drivers/gpu/drm/imagination/pvr_sync.h b/drivers/gpu/drm/imagination/pvr_sync.h new file mode 100644 index 0000000000..db6ccfda10 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_sync.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_SYNC_H +#define PVR_SYNC_H + +#include + +/* Forward declaration from . */ +struct xarray; + +/* Forward declaration from . */ +struct drm_file; + +/* Forward declaration from . */ +struct drm_sched_job; + +/* Forward declaration from "pvr_device.h". */ +struct pvr_file; + +/** + * struct pvr_sync_signal - Object encoding a syncobj signal operation + * + * The job submission logic collects all signal operations in an array of + * pvr_sync_signal objects. This array also serves as a cache to get the + * latest dma_fence when multiple jobs are submitted at once, and one job + * signals a syncobj point that's later waited on by a subsequent job. + */ +struct pvr_sync_signal { + /** @handle: Handle of the syncobj to signal. */ + u32 handle; + + /** + * @point: Point to signal in the syncobj. + * + * Only relevant for timeline syncobjs. + */ + u64 point; + + /** @syncobj: Syncobj retrieved from the handle. */ + struct drm_syncobj *syncobj; + + /** + * @chain: Chain object used to link the new fence with the + * existing timeline syncobj. + * + * Should be zero when manipulating a regular syncobj. + */ + struct dma_fence_chain *chain; + + /** + * @fence: New fence object to attach to the syncobj. + * + * This pointer starts with the current fence bound to + * the pair. + */ + struct dma_fence *fence; +}; + +void +pvr_sync_signal_array_cleanup(struct xarray *array); + +int +pvr_sync_signal_array_collect_ops(struct xarray *array, + struct drm_file *file, + u32 sync_op_count, + const struct drm_pvr_sync_op *sync_ops); + +int +pvr_sync_signal_array_update_fences(struct xarray *array, + u32 sync_op_count, + const struct drm_pvr_sync_op *sync_ops, + struct dma_fence *done_fence); + +void +pvr_sync_signal_array_push_fences(struct xarray *array); + +int +pvr_sync_add_deps_to_job(struct pvr_file *pvr_file, struct drm_sched_job *job, + u32 sync_op_count, + const struct drm_pvr_sync_op *sync_ops, + struct xarray *signal_array); + +#endif /* PVR_SYNC_H */ diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c new file mode 100644 index 0000000000..e59517ba03 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -0,0 +1,1090 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_vm.h" + +#include "pvr_device.h" +#include "pvr_drv.h" +#include "pvr_gem.h" +#include "pvr_mmu.h" +#include "pvr_rogue_fwif.h" +#include "pvr_rogue_heap_config.h" + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/** + * DOC: Memory context + * + * This is the "top level" datatype in the VM code. It's exposed in the public + * API as an opaque handle. + */ + +/** + * struct pvr_vm_context - Context type used to represent a single VM. + */ +struct pvr_vm_context { + /** + * @pvr_dev: The PowerVR device to which this context is bound. + * This binding is immutable for the life of the context. + */ + struct pvr_device *pvr_dev; + + /** @mmu_ctx: The context for binding to physical memory. */ + struct pvr_mmu_context *mmu_ctx; + + /** @gpuvm_mgr: GPUVM object associated with this context. */ + struct drm_gpuvm gpuvm_mgr; + + /** @lock: Global lock on this VM. */ + struct mutex lock; + + /** + * @fw_mem_ctx_obj: Firmware object representing firmware memory + * context. + */ + struct pvr_fw_object *fw_mem_ctx_obj; + + /** @ref_count: Reference count of object. */ + struct kref ref_count; + + /** + * @dummy_gem: GEM object to enable VM reservation. All private BOs + * should use the @dummy_gem.resv and not their own _resv field. + */ + struct drm_gem_object dummy_gem; +}; + +static inline +struct pvr_vm_context *to_pvr_vm_context(struct drm_gpuvm *gpuvm) +{ + return container_of(gpuvm, struct pvr_vm_context, gpuvm_mgr); +} + +struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx) +{ + if (vm_ctx) + kref_get(&vm_ctx->ref_count); + + return vm_ctx; +} + +/** + * pvr_vm_get_page_table_root_addr() - Get the DMA address of the root of the + * page table structure behind a VM context. + * @vm_ctx: Target VM context. + */ +dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx) +{ + return pvr_mmu_get_root_table_dma_addr(vm_ctx->mmu_ctx); +} + +/** + * pvr_vm_get_dma_resv() - Expose the dma_resv owned by the VM context. + * @vm_ctx: Target VM context. + * + * This is used to allow private BOs to share a dma_resv for faster fence + * updates. + * + * Returns: The dma_resv pointer. + */ +struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx) +{ + return vm_ctx->dummy_gem.resv; +} + +/** + * DOC: Memory mappings + */ + +/** + * struct pvr_vm_gpuva - Wrapper type representing a single VM mapping. + */ +struct pvr_vm_gpuva { + /** @base: The wrapped drm_gpuva object. */ + struct drm_gpuva base; +}; + +enum pvr_vm_bind_type { + PVR_VM_BIND_TYPE_MAP, + PVR_VM_BIND_TYPE_UNMAP, +}; + +/** + * struct pvr_vm_bind_op - Context of a map/unmap operation. + */ +struct pvr_vm_bind_op { + /** @type: Map or unmap. */ + enum pvr_vm_bind_type type; + + /** @pvr_obj: Object associated with mapping (map only). */ + struct pvr_gem_object *pvr_obj; + + /** + * @vm_ctx: VM context where the mapping will be created or destroyed. + */ + struct pvr_vm_context *vm_ctx; + + /** @mmu_op_ctx: MMU op context. */ + struct pvr_mmu_op_context *mmu_op_ctx; + + /** @gpuvm_bo: Prealloced wrapped BO for attaching to the gpuvm. */ + struct drm_gpuvm_bo *gpuvm_bo; + + /** + * @new_va: Prealloced VA mapping object (init in callback). + * Used when creating a mapping. + */ + struct pvr_vm_gpuva *new_va; + + /** + * @prev_va: Prealloced VA mapping object (init in callback). + * Used when a mapping or unmapping operation overlaps an existing + * mapping and splits away the beginning into a new mapping. + */ + struct pvr_vm_gpuva *prev_va; + + /** + * @next_va: Prealloced VA mapping object (init in callback). + * Used when a mapping or unmapping operation overlaps an existing + * mapping and splits away the end into a new mapping. + */ + struct pvr_vm_gpuva *next_va; + + /** @offset: Offset into @pvr_obj to begin mapping from. */ + u64 offset; + + /** @device_addr: Device-virtual address at the start of the mapping. */ + u64 device_addr; + + /** @size: Size of the desired mapping. */ + u64 size; +}; + +/** + * pvr_vm_bind_op_exec() - Execute a single bind op. + * @bind_op: Bind op context. + * + * Returns: + * * 0 on success, + * * Any error code returned by drm_gpuva_sm_map(), drm_gpuva_sm_unmap(), or + * a callback function. + */ +static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op) +{ + switch (bind_op->type) { + case PVR_VM_BIND_TYPE_MAP: + return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr, + bind_op, bind_op->device_addr, + bind_op->size, + gem_from_pvr_gem(bind_op->pvr_obj), + bind_op->offset); + + case PVR_VM_BIND_TYPE_UNMAP: + return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr, + bind_op, bind_op->device_addr, + bind_op->size); + } + + /* + * This shouldn't happen unless something went wrong + * in drm_sched. + */ + WARN_ON(1); + return -EINVAL; +} + +static void pvr_vm_bind_op_fini(struct pvr_vm_bind_op *bind_op) +{ + drm_gpuvm_bo_put(bind_op->gpuvm_bo); + + kfree(bind_op->new_va); + kfree(bind_op->prev_va); + kfree(bind_op->next_va); + + if (bind_op->pvr_obj) + pvr_gem_object_put(bind_op->pvr_obj); + + if (bind_op->mmu_op_ctx) + pvr_mmu_op_context_destroy(bind_op->mmu_op_ctx); +} + +static int +pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op, + struct pvr_vm_context *vm_ctx, + struct pvr_gem_object *pvr_obj, u64 offset, + u64 device_addr, u64 size) +{ + struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj); + const bool is_user = vm_ctx != vm_ctx->pvr_dev->kernel_vm_ctx; + const u64 pvr_obj_size = pvr_gem_object_size(pvr_obj); + struct sg_table *sgt; + u64 offset_plus_size; + int err; + + if (check_add_overflow(offset, size, &offset_plus_size)) + return -EINVAL; + + if (is_user && + !pvr_find_heap_containing(vm_ctx->pvr_dev, device_addr, size)) { + return -EINVAL; + } + + if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size) || + offset & ~PAGE_MASK || size & ~PAGE_MASK || + offset >= pvr_obj_size || offset_plus_size > pvr_obj_size) + return -EINVAL; + + bind_op->type = PVR_VM_BIND_TYPE_MAP; + + dma_resv_lock(obj->resv, NULL); + bind_op->gpuvm_bo = drm_gpuvm_bo_obtain(&vm_ctx->gpuvm_mgr, obj); + dma_resv_unlock(obj->resv); + if (IS_ERR(bind_op->gpuvm_bo)) + return PTR_ERR(bind_op->gpuvm_bo); + + bind_op->new_va = kzalloc(sizeof(*bind_op->new_va), GFP_KERNEL); + bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL); + bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL); + if (!bind_op->new_va || !bind_op->prev_va || !bind_op->next_va) { + err = -ENOMEM; + goto err_bind_op_fini; + } + + /* Pin pages so they're ready for use. */ + sgt = pvr_gem_object_get_pages_sgt(pvr_obj); + err = PTR_ERR_OR_ZERO(sgt); + if (err) + goto err_bind_op_fini; + + bind_op->mmu_op_ctx = + pvr_mmu_op_context_create(vm_ctx->mmu_ctx, sgt, offset, size); + err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx); + if (err) { + bind_op->mmu_op_ctx = NULL; + goto err_bind_op_fini; + } + + bind_op->pvr_obj = pvr_obj; + bind_op->vm_ctx = vm_ctx; + bind_op->device_addr = device_addr; + bind_op->size = size; + bind_op->offset = offset; + + return 0; + +err_bind_op_fini: + pvr_vm_bind_op_fini(bind_op); + + return err; +} + +static int +pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op, + struct pvr_vm_context *vm_ctx, u64 device_addr, + u64 size) +{ + int err; + + if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size)) + return -EINVAL; + + bind_op->type = PVR_VM_BIND_TYPE_UNMAP; + + bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL); + bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL); + if (!bind_op->prev_va || !bind_op->next_va) { + err = -ENOMEM; + goto err_bind_op_fini; + } + + bind_op->mmu_op_ctx = + pvr_mmu_op_context_create(vm_ctx->mmu_ctx, NULL, 0, 0); + err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx); + if (err) { + bind_op->mmu_op_ctx = NULL; + goto err_bind_op_fini; + } + + bind_op->vm_ctx = vm_ctx; + bind_op->device_addr = device_addr; + bind_op->size = size; + + return 0; + +err_bind_op_fini: + pvr_vm_bind_op_fini(bind_op); + + return err; +} + +/** + * pvr_vm_gpuva_map() - Insert a mapping into a memory context. + * @op: gpuva op containing the remap details. + * @op_ctx: Operation context. + * + * Context: Called by drm_gpuvm_sm_map following a successful mapping while + * @op_ctx.vm_ctx mutex is held. + * + * Return: + * * 0 on success, or + * * Any error returned by pvr_mmu_map(). + */ +static int +pvr_vm_gpuva_map(struct drm_gpuva_op *op, void *op_ctx) +{ + struct pvr_gem_object *pvr_gem = gem_to_pvr_gem(op->map.gem.obj); + struct pvr_vm_bind_op *ctx = op_ctx; + int err; + + if ((op->map.gem.offset | op->map.va.range) & ~PVR_DEVICE_PAGE_MASK) + return -EINVAL; + + err = pvr_mmu_map(ctx->mmu_op_ctx, op->map.va.range, pvr_gem->flags, + op->map.va.addr); + if (err) + return err; + + drm_gpuva_map(&ctx->vm_ctx->gpuvm_mgr, &ctx->new_va->base, &op->map); + drm_gpuva_link(&ctx->new_va->base, ctx->gpuvm_bo); + ctx->new_va = NULL; + + return 0; +} + +/** + * pvr_vm_gpuva_unmap() - Remove a mapping from a memory context. + * @op: gpuva op containing the unmap details. + * @op_ctx: Operation context. + * + * Context: Called by drm_gpuvm_sm_unmap following a successful unmapping while + * @op_ctx.vm_ctx mutex is held. + * + * Return: + * * 0 on success, or + * * Any error returned by pvr_mmu_unmap(). + */ +static int +pvr_vm_gpuva_unmap(struct drm_gpuva_op *op, void *op_ctx) +{ + struct pvr_vm_bind_op *ctx = op_ctx; + + int err = pvr_mmu_unmap(ctx->mmu_op_ctx, op->unmap.va->va.addr, + op->unmap.va->va.range); + + if (err) + return err; + + drm_gpuva_unmap(&op->unmap); + drm_gpuva_unlink(op->unmap.va); + + return 0; +} + +/** + * pvr_vm_gpuva_remap() - Remap a mapping within a memory context. + * @op: gpuva op containing the remap details. + * @op_ctx: Operation context. + * + * Context: Called by either drm_gpuvm_sm_map or drm_gpuvm_sm_unmap when a + * mapping or unmapping operation causes a region to be split. The + * @op_ctx.vm_ctx mutex is held. + * + * Return: + * * 0 on success, or + * * Any error returned by pvr_vm_gpuva_unmap() or pvr_vm_gpuva_unmap(). + */ +static int +pvr_vm_gpuva_remap(struct drm_gpuva_op *op, void *op_ctx) +{ + struct pvr_vm_bind_op *ctx = op_ctx; + u64 va_start = 0, va_range = 0; + int err; + + drm_gpuva_op_remap_to_unmap_range(&op->remap, &va_start, &va_range); + err = pvr_mmu_unmap(ctx->mmu_op_ctx, va_start, va_range); + if (err) + return err; + + /* No actual remap required: the page table tree depth is fixed to 3, + * and we use 4k page table entries only for now. + */ + drm_gpuva_remap(&ctx->prev_va->base, &ctx->next_va->base, &op->remap); + + if (op->remap.prev) { + pvr_gem_object_get(gem_to_pvr_gem(ctx->prev_va->base.gem.obj)); + drm_gpuva_link(&ctx->prev_va->base, ctx->gpuvm_bo); + ctx->prev_va = NULL; + } + + if (op->remap.next) { + pvr_gem_object_get(gem_to_pvr_gem(ctx->next_va->base.gem.obj)); + drm_gpuva_link(&ctx->next_va->base, ctx->gpuvm_bo); + ctx->next_va = NULL; + } + + drm_gpuva_unlink(op->remap.unmap->va); + + return 0; +} + +/* + * Public API + * + * For an overview of these functions, see *DOC: Public API* in "pvr_vm.h". + */ + +/** + * pvr_device_addr_is_valid() - Tests whether a device-virtual address + * is valid. + * @device_addr: Virtual device address to test. + * + * Return: + * * %true if @device_addr is within the valid range for a device page + * table and is aligned to the device page size, or + * * %false otherwise. + */ +bool +pvr_device_addr_is_valid(u64 device_addr) +{ + return (device_addr & ~PVR_PAGE_TABLE_ADDR_MASK) == 0 && + (device_addr & ~PVR_DEVICE_PAGE_MASK) == 0; +} + +/** + * pvr_device_addr_and_size_are_valid() - Tests whether a device-virtual + * address and associated size are both valid. + * @vm_ctx: Target VM context. + * @device_addr: Virtual device address to test. + * @size: Size of the range based at @device_addr to test. + * + * Calling pvr_device_addr_is_valid() twice (once on @size, and again on + * @device_addr + @size) to verify a device-virtual address range initially + * seems intuitive, but it produces a false-negative when the address range + * is right at the end of device-virtual address space. + * + * This function catches that corner case, as well as checking that + * @size is non-zero. + * + * Return: + * * %true if @device_addr is device page aligned; @size is device page + * aligned; the range specified by @device_addr and @size is within the + * bounds of the device-virtual address space, and @size is non-zero, or + * * %false otherwise. + */ +bool +pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx, + u64 device_addr, u64 size) +{ + return pvr_device_addr_is_valid(device_addr) && + drm_gpuvm_range_valid(&vm_ctx->gpuvm_mgr, device_addr, size) && + size != 0 && (size & ~PVR_DEVICE_PAGE_MASK) == 0 && + (device_addr + size <= PVR_PAGE_TABLE_ADDR_SPACE_SIZE); +} + +static void pvr_gpuvm_free(struct drm_gpuvm *gpuvm) +{ + kfree(to_pvr_vm_context(gpuvm)); +} + +static const struct drm_gpuvm_ops pvr_vm_gpuva_ops = { + .vm_free = pvr_gpuvm_free, + .sm_step_map = pvr_vm_gpuva_map, + .sm_step_remap = pvr_vm_gpuva_remap, + .sm_step_unmap = pvr_vm_gpuva_unmap, +}; + +static void +fw_mem_context_init(void *cpu_ptr, void *priv) +{ + struct rogue_fwif_fwmemcontext *fw_mem_ctx = cpu_ptr; + struct pvr_vm_context *vm_ctx = priv; + + fw_mem_ctx->pc_dev_paddr = pvr_vm_get_page_table_root_addr(vm_ctx); + fw_mem_ctx->page_cat_base_reg_set = ROGUE_FW_BIF_INVALID_PCSET; +} + +/** + * pvr_vm_create_context() - Create a new VM context. + * @pvr_dev: Target PowerVR device. + * @is_userspace_context: %true if this context is for userspace. This will + * create a firmware memory context for the VM context + * and disable warnings when tearing down mappings. + * + * Return: + * * A handle to the newly-minted VM context on success, + * * -%EINVAL if the feature "virtual address space bits" on @pvr_dev is + * missing or has an unsupported value, + * * -%ENOMEM if allocation of the structure behind the opaque handle fails, + * or + * * Any error encountered while setting up internal structures. + */ +struct pvr_vm_context * +pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context) +{ + struct drm_device *drm_dev = from_pvr_device(pvr_dev); + + struct pvr_vm_context *vm_ctx; + u16 device_addr_bits; + + int err; + + err = PVR_FEATURE_VALUE(pvr_dev, virtual_address_space_bits, + &device_addr_bits); + if (err) { + drm_err(drm_dev, + "Failed to get device virtual address space bits\n"); + return ERR_PTR(err); + } + + if (device_addr_bits != PVR_PAGE_TABLE_ADDR_BITS) { + drm_err(drm_dev, + "Device has unsupported virtual address space size\n"); + return ERR_PTR(-EINVAL); + } + + vm_ctx = kzalloc(sizeof(*vm_ctx), GFP_KERNEL); + if (!vm_ctx) + return ERR_PTR(-ENOMEM); + + vm_ctx->pvr_dev = pvr_dev; + + vm_ctx->mmu_ctx = pvr_mmu_context_create(pvr_dev); + err = PTR_ERR_OR_ZERO(vm_ctx->mmu_ctx); + if (err) + goto err_free; + + if (is_userspace_context) { + err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_fwmemcontext), + PVR_BO_FW_FLAGS_DEVICE_UNCACHED, + fw_mem_context_init, vm_ctx, &vm_ctx->fw_mem_ctx_obj); + + if (err) + goto err_page_table_destroy; + } + + drm_gem_private_object_init(&pvr_dev->base, &vm_ctx->dummy_gem, 0); + drm_gpuvm_init(&vm_ctx->gpuvm_mgr, + is_userspace_context ? "PowerVR-user-VM" : "PowerVR-FW-VM", + 0, &pvr_dev->base, &vm_ctx->dummy_gem, + 0, 1ULL << device_addr_bits, 0, 0, &pvr_vm_gpuva_ops); + + mutex_init(&vm_ctx->lock); + kref_init(&vm_ctx->ref_count); + + return vm_ctx; + +err_page_table_destroy: + pvr_mmu_context_destroy(vm_ctx->mmu_ctx); + +err_free: + kfree(vm_ctx); + + return ERR_PTR(err); +} + +/** + * pvr_vm_context_release() - Teardown a VM context. + * @ref_count: Pointer to reference counter of the VM context. + * + * This function ensures that no mappings are left dangling by unmapping them + * all in order of ascending device-virtual address. + */ +static void +pvr_vm_context_release(struct kref *ref_count) +{ + struct pvr_vm_context *vm_ctx = + container_of(ref_count, struct pvr_vm_context, ref_count); + + if (vm_ctx->fw_mem_ctx_obj) + pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj); + + WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start, + vm_ctx->gpuvm_mgr.mm_range)); + + pvr_mmu_context_destroy(vm_ctx->mmu_ctx); + drm_gem_private_object_fini(&vm_ctx->dummy_gem); + mutex_destroy(&vm_ctx->lock); + + drm_gpuvm_put(&vm_ctx->gpuvm_mgr); +} + +/** + * pvr_vm_context_lookup() - Look up VM context from handle + * @pvr_file: Pointer to pvr_file structure. + * @handle: Object handle. + * + * Takes reference on VM context object. Call pvr_vm_context_put() to release. + * + * Returns: + * * The requested object on success, or + * * %NULL on failure (object does not exist in list, or is not a VM context) + */ +struct pvr_vm_context * +pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle) +{ + struct pvr_vm_context *vm_ctx; + + xa_lock(&pvr_file->vm_ctx_handles); + vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle); + if (vm_ctx) + kref_get(&vm_ctx->ref_count); + + xa_unlock(&pvr_file->vm_ctx_handles); + + return vm_ctx; +} + +/** + * pvr_vm_context_put() - Release a reference on a VM context + * @vm_ctx: Target VM context. + * + * Returns: + * * %true if the VM context was destroyed, or + * * %false if there are any references still remaining. + */ +bool +pvr_vm_context_put(struct pvr_vm_context *vm_ctx) +{ + if (vm_ctx) + return kref_put(&vm_ctx->ref_count, pvr_vm_context_release); + + return true; +} + +/** + * pvr_destroy_vm_contexts_for_file: Destroy any VM contexts associated with the + * given file. + * @pvr_file: Pointer to pvr_file structure. + * + * Removes all vm_contexts associated with @pvr_file from the device VM context + * list and drops initial references. vm_contexts will then be destroyed once + * all outstanding references are dropped. + */ +void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file) +{ + struct pvr_vm_context *vm_ctx; + unsigned long handle; + + xa_for_each(&pvr_file->vm_ctx_handles, handle, vm_ctx) { + /* vm_ctx is not used here because that would create a race with xa_erase */ + pvr_vm_context_put(xa_erase(&pvr_file->vm_ctx_handles, handle)); + } +} + +static int +pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec) +{ + struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv; + struct pvr_gem_object *pvr_obj = bind_op->pvr_obj; + + /* Unmap operations don't have an object to lock. */ + if (!pvr_obj) + return 0; + + /* Acquire lock on the GEM being mapped. */ + return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj)); +} + +/** + * pvr_vm_map() - Map a section of physical memory into a section of + * device-virtual memory. + * @vm_ctx: Target VM context. + * @pvr_obj: Target PowerVR memory object. + * @pvr_obj_offset: Offset into @pvr_obj to map from. + * @device_addr: Virtual device address at the start of the requested mapping. + * @size: Size of the requested mapping. + * + * No handle is returned to represent the mapping. Instead, callers should + * remember @device_addr and use that as a handle. + * + * Return: + * * 0 on success, + * * -%EINVAL if @device_addr is not a valid page-aligned device-virtual + * address; the region specified by @pvr_obj_offset and @size does not fall + * entirely within @pvr_obj, or any part of the specified region of @pvr_obj + * is not device-virtual page-aligned, + * * Any error encountered while performing internal operations required to + * destroy the mapping (returned from pvr_vm_gpuva_map or + * pvr_vm_gpuva_remap). + */ +int +pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj, + u64 pvr_obj_offset, u64 device_addr, u64 size) +{ + struct pvr_vm_bind_op bind_op = {0}; + struct drm_gpuvm_exec vm_exec = { + .vm = &vm_ctx->gpuvm_mgr, + .flags = DRM_EXEC_INTERRUPTIBLE_WAIT | + DRM_EXEC_IGNORE_DUPLICATES, + .extra = { + .fn = pvr_vm_lock_extra, + .priv = &bind_op, + }, + }; + + int err = pvr_vm_bind_op_map_init(&bind_op, vm_ctx, pvr_obj, + pvr_obj_offset, device_addr, + size); + + if (err) + return err; + + pvr_gem_object_get(pvr_obj); + + err = drm_gpuvm_exec_lock(&vm_exec); + if (err) + goto err_cleanup; + + err = pvr_vm_bind_op_exec(&bind_op); + + drm_gpuvm_exec_unlock(&vm_exec); + +err_cleanup: + pvr_vm_bind_op_fini(&bind_op); + + return err; +} + +/** + * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory. + * @vm_ctx: Target VM context. + * @device_addr: Virtual device address at the start of the target mapping. + * @size: Size of the target mapping. + * + * Return: + * * 0 on success, + * * -%EINVAL if @device_addr is not a valid page-aligned device-virtual + * address, + * * Any error encountered while performing internal operations required to + * destroy the mapping (returned from pvr_vm_gpuva_unmap or + * pvr_vm_gpuva_remap). + */ +int +pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size) +{ + struct pvr_vm_bind_op bind_op = {0}; + struct drm_gpuvm_exec vm_exec = { + .vm = &vm_ctx->gpuvm_mgr, + .flags = DRM_EXEC_INTERRUPTIBLE_WAIT | + DRM_EXEC_IGNORE_DUPLICATES, + .extra = { + .fn = pvr_vm_lock_extra, + .priv = &bind_op, + }, + }; + + int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr, + size); + if (err) + return err; + + err = drm_gpuvm_exec_lock(&vm_exec); + if (err) + goto err_cleanup; + + err = pvr_vm_bind_op_exec(&bind_op); + + drm_gpuvm_exec_unlock(&vm_exec); + +err_cleanup: + pvr_vm_bind_op_fini(&bind_op); + + return err; +} + +/* Static data areas are determined by firmware. */ +static const struct drm_pvr_static_data_area static_data_areas[] = { + { + .area_usage = DRM_PVR_STATIC_DATA_AREA_FENCE, + .location_heap_id = DRM_PVR_HEAP_GENERAL, + .offset = 0, + .size = 128, + }, + { + .area_usage = DRM_PVR_STATIC_DATA_AREA_YUV_CSC, + .location_heap_id = DRM_PVR_HEAP_GENERAL, + .offset = 128, + .size = 1024, + }, + { + .area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC, + .location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA, + .offset = 0, + .size = 128, + }, + { + .area_usage = DRM_PVR_STATIC_DATA_AREA_EOT, + .location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA, + .offset = 128, + .size = 128, + }, + { + .area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC, + .location_heap_id = DRM_PVR_HEAP_USC_CODE, + .offset = 0, + .size = 128, + }, +}; + +#define GET_RESERVED_SIZE(last_offset, last_size) round_up((last_offset) + (last_size), PAGE_SIZE) + +/* + * The values given to GET_RESERVED_SIZE() are taken from the last entry in the corresponding + * static data area for each heap. + */ +static const struct drm_pvr_heap pvr_heaps[] = { + [DRM_PVR_HEAP_GENERAL] = { + .base = ROGUE_GENERAL_HEAP_BASE, + .size = ROGUE_GENERAL_HEAP_SIZE, + .flags = 0, + .page_size_log2 = PVR_DEVICE_PAGE_SHIFT, + }, + [DRM_PVR_HEAP_PDS_CODE_DATA] = { + .base = ROGUE_PDSCODEDATA_HEAP_BASE, + .size = ROGUE_PDSCODEDATA_HEAP_SIZE, + .flags = 0, + .page_size_log2 = PVR_DEVICE_PAGE_SHIFT, + }, + [DRM_PVR_HEAP_USC_CODE] = { + .base = ROGUE_USCCODE_HEAP_BASE, + .size = ROGUE_USCCODE_HEAP_SIZE, + .flags = 0, + .page_size_log2 = PVR_DEVICE_PAGE_SHIFT, + }, + [DRM_PVR_HEAP_RGNHDR] = { + .base = ROGUE_RGNHDR_HEAP_BASE, + .size = ROGUE_RGNHDR_HEAP_SIZE, + .flags = 0, + .page_size_log2 = PVR_DEVICE_PAGE_SHIFT, + }, + [DRM_PVR_HEAP_VIS_TEST] = { + .base = ROGUE_VISTEST_HEAP_BASE, + .size = ROGUE_VISTEST_HEAP_SIZE, + .flags = 0, + .page_size_log2 = PVR_DEVICE_PAGE_SHIFT, + }, + [DRM_PVR_HEAP_TRANSFER_FRAG] = { + .base = ROGUE_TRANSFER_FRAG_HEAP_BASE, + .size = ROGUE_TRANSFER_FRAG_HEAP_SIZE, + .flags = 0, + .page_size_log2 = PVR_DEVICE_PAGE_SHIFT, + }, +}; + +int +pvr_static_data_areas_get(const struct pvr_device *pvr_dev, + struct drm_pvr_ioctl_dev_query_args *args) +{ + struct drm_pvr_dev_query_static_data_areas query = {0}; + int err; + + if (!args->pointer) { + args->size = sizeof(struct drm_pvr_dev_query_static_data_areas); + return 0; + } + + err = PVR_UOBJ_GET(query, args->size, args->pointer); + if (err < 0) + return err; + + if (!query.static_data_areas.array) { + query.static_data_areas.count = ARRAY_SIZE(static_data_areas); + query.static_data_areas.stride = sizeof(struct drm_pvr_static_data_area); + goto copy_out; + } + + if (query.static_data_areas.count > ARRAY_SIZE(static_data_areas)) + query.static_data_areas.count = ARRAY_SIZE(static_data_areas); + + err = PVR_UOBJ_SET_ARRAY(&query.static_data_areas, static_data_areas); + if (err < 0) + return err; + +copy_out: + err = PVR_UOBJ_SET(args->pointer, args->size, query); + if (err < 0) + return err; + + args->size = sizeof(query); + return 0; +} + +int +pvr_heap_info_get(const struct pvr_device *pvr_dev, + struct drm_pvr_ioctl_dev_query_args *args) +{ + struct drm_pvr_dev_query_heap_info query = {0}; + u64 dest; + int err; + + if (!args->pointer) { + args->size = sizeof(struct drm_pvr_dev_query_heap_info); + return 0; + } + + err = PVR_UOBJ_GET(query, args->size, args->pointer); + if (err < 0) + return err; + + if (!query.heaps.array) { + query.heaps.count = ARRAY_SIZE(pvr_heaps); + query.heaps.stride = sizeof(struct drm_pvr_heap); + goto copy_out; + } + + if (query.heaps.count > ARRAY_SIZE(pvr_heaps)) + query.heaps.count = ARRAY_SIZE(pvr_heaps); + + /* Region header heap is only present if BRN63142 is present. */ + dest = query.heaps.array; + for (size_t i = 0; i < query.heaps.count; i++) { + struct drm_pvr_heap heap = pvr_heaps[i]; + + if (i == DRM_PVR_HEAP_RGNHDR && !PVR_HAS_QUIRK(pvr_dev, 63142)) + heap.size = 0; + + err = PVR_UOBJ_SET(dest, query.heaps.stride, heap); + if (err < 0) + return err; + + dest += query.heaps.stride; + } + +copy_out: + err = PVR_UOBJ_SET(args->pointer, args->size, query); + if (err < 0) + return err; + + args->size = sizeof(query); + return 0; +} + +/** + * pvr_heap_contains_range() - Determine if a given heap contains the specified + * device-virtual address range. + * @pvr_heap: Target heap. + * @start: Inclusive start of the target range. + * @end: Inclusive end of the target range. + * + * It is an error to call this function with values of @start and @end that do + * not satisfy the condition @start <= @end. + */ +static __always_inline bool +pvr_heap_contains_range(const struct drm_pvr_heap *pvr_heap, u64 start, u64 end) +{ + return pvr_heap->base <= start && end < pvr_heap->base + pvr_heap->size; +} + +/** + * pvr_find_heap_containing() - Find a heap which contains the specified + * device-virtual address range. + * @pvr_dev: Target PowerVR device. + * @start: Start of the target range. + * @size: Size of the target range. + * + * Return: + * * A pointer to a constant instance of struct drm_pvr_heap representing the + * heap containing the entire range specified by @start and @size on + * success, or + * * %NULL if no such heap exists. + */ +const struct drm_pvr_heap * +pvr_find_heap_containing(struct pvr_device *pvr_dev, u64 start, u64 size) +{ + u64 end; + + if (check_add_overflow(start, size - 1, &end)) + return NULL; + + /* + * There are no guarantees about the order of address ranges in + * &pvr_heaps, so iterate over the entire array for a heap whose + * range completely encompasses the given range. + */ + for (u32 heap_id = 0; heap_id < ARRAY_SIZE(pvr_heaps); heap_id++) { + /* Filter heaps that present only with an associated quirk */ + if (heap_id == DRM_PVR_HEAP_RGNHDR && + !PVR_HAS_QUIRK(pvr_dev, 63142)) { + continue; + } + + if (pvr_heap_contains_range(&pvr_heaps[heap_id], start, end)) + return &pvr_heaps[heap_id]; + } + + return NULL; +} + +/** + * pvr_vm_find_gem_object() - Look up a buffer object from a given + * device-virtual address. + * @vm_ctx: [IN] Target VM context. + * @device_addr: [IN] Virtual device address at the start of the required + * object. + * @mapped_offset_out: [OUT] Pointer to location to write offset of the start + * of the mapped region within the buffer object. May be + * %NULL if this information is not required. + * @mapped_size_out: [OUT] Pointer to location to write size of the mapped + * region. May be %NULL if this information is not required. + * + * If successful, a reference will be taken on the buffer object. The caller + * must drop the reference with pvr_gem_object_put(). + * + * Return: + * * The PowerVR buffer object mapped at @device_addr if one exists, or + * * %NULL otherwise. + */ +struct pvr_gem_object * +pvr_vm_find_gem_object(struct pvr_vm_context *vm_ctx, u64 device_addr, + u64 *mapped_offset_out, u64 *mapped_size_out) +{ + struct pvr_gem_object *pvr_obj; + struct drm_gpuva *va; + + mutex_lock(&vm_ctx->lock); + + va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr, device_addr, 1); + if (!va) + goto err_unlock; + + pvr_obj = gem_to_pvr_gem(va->gem.obj); + pvr_gem_object_get(pvr_obj); + + if (mapped_offset_out) + *mapped_offset_out = va->gem.offset; + if (mapped_size_out) + *mapped_size_out = va->va.range; + + mutex_unlock(&vm_ctx->lock); + + return pvr_obj; + +err_unlock: + mutex_unlock(&vm_ctx->lock); + + return NULL; +} + +/** + * pvr_vm_get_fw_mem_context: Get object representing firmware memory context + * @vm_ctx: Target VM context. + * + * Returns: + * * FW object representing firmware memory context, or + * * %NULL if this VM context does not have a firmware memory context. + */ +struct pvr_fw_object * +pvr_vm_get_fw_mem_context(struct pvr_vm_context *vm_ctx) +{ + return vm_ctx->fw_mem_ctx_obj; +} diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h new file mode 100644 index 0000000000..f2a6463f2b --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_vm.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_VM_H +#define PVR_VM_H + +#include "pvr_rogue_mmu_defs.h" + +#include + +#include + +/* Forward declaration from "pvr_device.h" */ +struct pvr_device; +struct pvr_file; + +/* Forward declaration from "pvr_gem.h" */ +struct pvr_gem_object; + +/* Forward declaration from "pvr_vm.c" */ +struct pvr_vm_context; + +/* Forward declaration from */ +struct drm_pvr_ioctl_get_heap_info_args; + +/* Forward declaration from */ +struct drm_exec; + +/* Functions defined in pvr_vm.c */ + +bool pvr_device_addr_is_valid(u64 device_addr); +bool pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx, + u64 device_addr, u64 size); + +struct pvr_vm_context *pvr_vm_create_context(struct pvr_device *pvr_dev, + bool is_userspace_context); + +int pvr_vm_map(struct pvr_vm_context *vm_ctx, + struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset, + u64 device_addr, u64 size); +int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size); + +dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx); +struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx); + +int pvr_static_data_areas_get(const struct pvr_device *pvr_dev, + struct drm_pvr_ioctl_dev_query_args *args); +int pvr_heap_info_get(const struct pvr_device *pvr_dev, + struct drm_pvr_ioctl_dev_query_args *args); +const struct drm_pvr_heap *pvr_find_heap_containing(struct pvr_device *pvr_dev, + u64 addr, u64 size); + +struct pvr_gem_object *pvr_vm_find_gem_object(struct pvr_vm_context *vm_ctx, + u64 device_addr, + u64 *mapped_offset_out, + u64 *mapped_size_out); + +struct pvr_fw_object * +pvr_vm_get_fw_mem_context(struct pvr_vm_context *vm_ctx); + +struct pvr_vm_context *pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle); +struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx); +bool pvr_vm_context_put(struct pvr_vm_context *vm_ctx); +void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file); + +#endif /* PVR_VM_H */ diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c new file mode 100644 index 0000000000..b7fef3c797 --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#include "pvr_device.h" +#include "pvr_fw_mips.h" +#include "pvr_gem.h" +#include "pvr_mmu.h" +#include "pvr_rogue_mips.h" +#include "pvr_vm.h" +#include "pvr_vm_mips.h" + +#include +#include +#include +#include +#include + +/** + * pvr_vm_mips_init() - Initialise MIPS FW pagetable + * @pvr_dev: Target PowerVR device. + * + * Returns: + * * 0 on success, + * * -%EINVAL, + * * Any error returned by pvr_gem_object_create(), or + * * And error returned by pvr_gem_object_vmap(). + */ +int +pvr_vm_mips_init(struct pvr_device *pvr_dev) +{ + u32 pt_size = 1 << ROGUE_MIPSFW_LOG2_PAGETABLE_SIZE_4K(pvr_dev); + struct device *dev = from_pvr_device(pvr_dev)->dev; + struct pvr_fw_mips_data *mips_data; + u32 phys_bus_width; + int page_nr; + int err; + + /* Page table size must be at most ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * 4k pages. */ + if (pt_size > ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * SZ_4K) + return -EINVAL; + + if (PVR_FEATURE_VALUE(pvr_dev, phys_bus_width, &phys_bus_width)) + return -EINVAL; + + mips_data = drmm_kzalloc(from_pvr_device(pvr_dev), sizeof(*mips_data), GFP_KERNEL); + if (!mips_data) + return -ENOMEM; + + for (page_nr = 0; page_nr < ARRAY_SIZE(mips_data->pt_pages); page_nr++) { + mips_data->pt_pages[page_nr] = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!mips_data->pt_pages[page_nr]) { + err = -ENOMEM; + goto err_free_pages; + } + + mips_data->pt_dma_addr[page_nr] = dma_map_page(dev, mips_data->pt_pages[page_nr], 0, + PAGE_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(dev, mips_data->pt_dma_addr[page_nr])) { + err = -ENOMEM; + __free_page(mips_data->pt_pages[page_nr]); + goto err_free_pages; + } + } + + mips_data->pt = vmap(mips_data->pt_pages, pt_size >> PAGE_SHIFT, VM_MAP, + pgprot_writecombine(PAGE_KERNEL)); + if (!mips_data->pt) { + err = -ENOMEM; + goto err_free_pages; + } + + mips_data->pfn_mask = (phys_bus_width > 32) ? ROGUE_MIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT : + ROGUE_MIPSFW_ENTRYLO_PFN_MASK; + + mips_data->cache_policy = (phys_bus_width > 32) ? ROGUE_MIPSFW_CACHED_POLICY_ABOVE_32BIT : + ROGUE_MIPSFW_CACHED_POLICY; + + pvr_dev->fw_dev.processor_data.mips_data = mips_data; + + return 0; + +err_free_pages: + while (--page_nr >= 0) { + dma_unmap_page(from_pvr_device(pvr_dev)->dev, + mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE); + + __free_page(mips_data->pt_pages[page_nr]); + } + + return err; +} + +/** + * pvr_vm_mips_fini() - Release MIPS FW pagetable + * @pvr_dev: Target PowerVR device. + */ +void +pvr_vm_mips_fini(struct pvr_device *pvr_dev) +{ + struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; + struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data; + int page_nr; + + vunmap(mips_data->pt); + for (page_nr = ARRAY_SIZE(mips_data->pt_pages) - 1; page_nr >= 0; page_nr--) { + dma_unmap_page(from_pvr_device(pvr_dev)->dev, + mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE); + + __free_page(mips_data->pt_pages[page_nr]); + } + + fw_dev->processor_data.mips_data = NULL; +} + +static u32 +get_mips_pte_flags(bool read, bool write, u32 cache_policy) +{ + u32 flags = 0; + + if (read && write) /* Read/write. */ + flags |= ROGUE_MIPSFW_ENTRYLO_DIRTY_EN; + else if (write) /* Write only. */ + flags |= ROGUE_MIPSFW_ENTRYLO_READ_INHIBIT_EN; + else + WARN_ON(!read); + + flags |= cache_policy << ROGUE_MIPSFW_ENTRYLO_CACHE_POLICY_SHIFT; + + flags |= ROGUE_MIPSFW_ENTRYLO_VALID_EN | ROGUE_MIPSFW_ENTRYLO_GLOBAL_EN; + + return flags; +} + +/** + * pvr_vm_mips_map() - Map a FW object into MIPS address space + * @pvr_dev: Target PowerVR device. + * @fw_obj: FW object to map. + * + * Returns: + * * 0 on success, + * * -%EINVAL if object does not reside within FW address space, or + * * Any error returned by pvr_fw_object_get_dma_addr(). + */ +int +pvr_vm_mips_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj) +{ + struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; + struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data; + struct pvr_gem_object *pvr_obj = fw_obj->gem; + const u64 start = fw_obj->fw_mm_node.start; + const u64 size = fw_obj->fw_mm_node.size; + u64 end; + u32 cache_policy; + u32 pte_flags; + s32 start_pfn; + s32 end_pfn; + s32 pfn; + int err; + + if (check_add_overflow(start, size - 1, &end)) + return -EINVAL; + + if (start < ROGUE_FW_HEAP_BASE || + start >= ROGUE_FW_HEAP_BASE + fw_dev->fw_heap_info.raw_size || + end < ROGUE_FW_HEAP_BASE || + end >= ROGUE_FW_HEAP_BASE + fw_dev->fw_heap_info.raw_size || + (start & ROGUE_MIPSFW_PAGE_MASK_4K) || + ((end + 1) & ROGUE_MIPSFW_PAGE_MASK_4K)) + return -EINVAL; + + start_pfn = (start & fw_dev->fw_heap_info.offset_mask) >> ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K; + end_pfn = (end & fw_dev->fw_heap_info.offset_mask) >> ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K; + + if (pvr_obj->flags & PVR_BO_FW_FLAGS_DEVICE_UNCACHED) + cache_policy = ROGUE_MIPSFW_UNCACHED_CACHE_POLICY; + else + cache_policy = mips_data->cache_policy; + + pte_flags = get_mips_pte_flags(true, true, cache_policy); + + for (pfn = start_pfn; pfn <= end_pfn; pfn++) { + dma_addr_t dma_addr; + u32 pte; + + err = pvr_fw_object_get_dma_addr(fw_obj, + (pfn - start_pfn) << + ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K, + &dma_addr); + if (err) + goto err_unmap_pages; + + pte = ((dma_addr >> ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K) + << ROGUE_MIPSFW_ENTRYLO_PFN_SHIFT) & mips_data->pfn_mask; + pte |= pte_flags; + + WRITE_ONCE(mips_data->pt[pfn], pte); + } + + pvr_mmu_flush_request_all(pvr_dev); + + return 0; + +err_unmap_pages: + while (--pfn >= start_pfn) + WRITE_ONCE(mips_data->pt[pfn], 0); + + pvr_mmu_flush_request_all(pvr_dev); + WARN_ON(pvr_mmu_flush_exec(pvr_dev, true)); + + return err; +} + +/** + * pvr_vm_mips_unmap() - Unmap a FW object into MIPS address space + * @pvr_dev: Target PowerVR device. + * @fw_obj: FW object to unmap. + */ +void +pvr_vm_mips_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj) +{ + struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; + struct pvr_fw_mips_data *mips_data = fw_dev->processor_data.mips_data; + const u64 start = fw_obj->fw_mm_node.start; + const u64 size = fw_obj->fw_mm_node.size; + const u64 end = start + size; + + const u32 start_pfn = (start & fw_dev->fw_heap_info.offset_mask) >> + ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K; + const u32 end_pfn = (end & fw_dev->fw_heap_info.offset_mask) >> + ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K; + + for (u32 pfn = start_pfn; pfn < end_pfn; pfn++) + WRITE_ONCE(mips_data->pt[pfn], 0); + + pvr_mmu_flush_request_all(pvr_dev); + WARN_ON(pvr_mmu_flush_exec(pvr_dev, true)); +} diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.h b/drivers/gpu/drm/imagination/pvr_vm_mips.h new file mode 100644 index 0000000000..0fd59f68fb --- /dev/null +++ b/drivers/gpu/drm/imagination/pvr_vm_mips.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright (c) 2023 Imagination Technologies Ltd. */ + +#ifndef PVR_VM_MIPS_H +#define PVR_VM_MIPS_H + +/* Forward declaration from pvr_device.h. */ +struct pvr_device; + +/* Forward declaration from pvr_gem.h. */ +struct pvr_fw_object; + +int +pvr_vm_mips_init(struct pvr_device *pvr_dev); +void +pvr_vm_mips_fini(struct pvr_device *pvr_dev); +int +pvr_vm_mips_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj); +void +pvr_vm_mips_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj); + +#endif /* PVR_VM_MIPS_H */ diff --git a/drivers/gpu/drm/imx/dcss/dcss-drv.c b/drivers/gpu/drm/imx/dcss/dcss-drv.c index b61cec0cc7..ad5f29ea8f 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-drv.c +++ b/drivers/gpu/drm/imx/dcss/dcss-drv.c @@ -80,7 +80,7 @@ err: return err; } -static int dcss_drv_platform_remove(struct platform_device *pdev) +static void dcss_drv_platform_remove(struct platform_device *pdev) { struct dcss_drv *mdrv = dev_get_drvdata(&pdev->dev); @@ -88,8 +88,6 @@ static int dcss_drv_platform_remove(struct platform_device *pdev) dcss_dev_destroy(mdrv->dcss); kfree(mdrv); - - return 0; } static void dcss_drv_platform_shutdown(struct platform_device *pdev) @@ -120,7 +118,7 @@ MODULE_DEVICE_TABLE(of, dcss_of_match); static struct platform_driver dcss_platform_driver = { .probe = dcss_drv_platform_probe, - .remove = dcss_drv_platform_remove, + .remove_new = dcss_drv_platform_remove, .shutdown = dcss_drv_platform_shutdown, .driver = { .name = "imx-dcss", diff --git a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c index 989eca32d3..53840ab054 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c +++ b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c @@ -12,8 +12,10 @@ #include #include #include -#include +#include #include +#include +#include #include #include @@ -617,7 +619,6 @@ static int imx_ldb_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - const struct of_device_id *of_id = of_match_device(imx_ldb_dt_ids, dev); struct device_node *child; struct imx_ldb *imx_ldb; int dual; @@ -638,9 +639,7 @@ static int imx_ldb_probe(struct platform_device *pdev) regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0); imx_ldb->dev = dev; - - if (of_id) - imx_ldb->lvds_mux = of_id->data; + imx_ldb->lvds_mux = device_get_match_data(dev); dual = of_property_read_bool(np, "fsl,dual-channel"); if (dual) diff --git a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c index 4beb3b4bd6..43ddf3a981 100644 --- a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c +++ b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c @@ -506,14 +506,12 @@ static int imx_lcdc_probe(struct platform_device *pdev) return 0; } -static int imx_lcdc_remove(struct platform_device *pdev) +static void imx_lcdc_remove(struct platform_device *pdev) { struct drm_device *drm = platform_get_drvdata(pdev); drm_dev_unregister(drm); drm_atomic_helper_shutdown(drm); - - return 0; } static void imx_lcdc_shutdown(struct platform_device *pdev) @@ -527,7 +525,7 @@ static struct platform_driver imx_lcdc_driver = { .of_match_table = imx_lcdc_of_dev_id, }, .probe = imx_lcdc_probe, - .remove = imx_lcdc_remove, + .remove_new = imx_lcdc_remove, .shutdown = imx_lcdc_shutdown, }; module_platform_driver(imx_lcdc_driver); diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c index 24035b5344..169b83987c 100644 --- a/drivers/gpu/drm/kmb/kmb_drv.c +++ b/drivers/gpu/drm/kmb/kmb_drv.c @@ -448,7 +448,7 @@ static const struct drm_driver kmb_driver = { .minor = DRIVER_MINOR, }; -static int kmb_remove(struct platform_device *pdev) +static void kmb_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct drm_device *drm = dev_get_drvdata(dev); @@ -473,7 +473,6 @@ static int kmb_remove(struct platform_device *pdev) /* Unregister DSI host */ kmb_dsi_host_unregister(kmb->kmb_dsi); drm_atomic_helper_shutdown(drm); - return 0; } static int kmb_probe(struct platform_device *pdev) @@ -621,7 +620,7 @@ static SIMPLE_DEV_PM_OPS(kmb_pm_ops, kmb_pm_suspend, kmb_pm_resume); static struct platform_driver kmb_platform_driver = { .probe = kmb_probe, - .remove = kmb_remove, + .remove_new = kmb_remove, .driver = { .name = "kmb-drm", .pm = &kmb_pm_ops, diff --git a/drivers/gpu/drm/lima/lima_ctx.c b/drivers/gpu/drm/lima/lima_ctx.c index 891d5cd501..8389f2d7d0 100644 --- a/drivers/gpu/drm/lima/lima_ctx.c +++ b/drivers/gpu/drm/lima/lima_ctx.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /* Copyright 2018-2019 Qiang Yu */ +#include #include #include "lima_device.h" diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c index 02cef0cea6..0bf7105c87 100644 --- a/drivers/gpu/drm/lima/lima_device.c +++ b/drivers/gpu/drm/lima/lima_device.c @@ -514,7 +514,7 @@ int lima_device_suspend(struct device *dev) /* check any task running */ for (i = 0; i < lima_pipe_num; i++) { - if (atomic_read(&ldev->pipe[i].base.hw_rq_count)) + if (atomic_read(&ldev->pipe[i].base.credit_count)) return -EBUSY; } diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index 295f0353a0..c3bf8cda84 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -123,7 +123,7 @@ int lima_sched_task_init(struct lima_sched_task *task, for (i = 0; i < num_bos; i++) drm_gem_object_get(&bos[i]->base.base); - err = drm_sched_job_init(&task->base, &context->base, vm); + err = drm_sched_job_init(&task->base, &context->base, 1, vm); if (err) { kfree(task->bos); return err; @@ -488,7 +488,7 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name) INIT_WORK(&pipe->recover_work, lima_sched_recover_work); - return drm_sched_init(&pipe->base, &lima_sched_ops, + return drm_sched_init(&pipe->base, &lima_sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT, 1, lima_job_hang_limit, diff --git a/drivers/gpu/drm/loongson/Kconfig b/drivers/gpu/drm/loongson/Kconfig index df6946d505..8e59753e53 100644 --- a/drivers/gpu/drm/loongson/Kconfig +++ b/drivers/gpu/drm/loongson/Kconfig @@ -3,6 +3,7 @@ config DRM_LOONGSON tristate "DRM support for Loongson Graphics" depends on DRM && PCI && MMU + depends on LOONGARCH || MIPS || COMPILE_TEST select DRM_KMS_HELPER select DRM_TTM select I2C diff --git a/drivers/gpu/drm/loongson/lsdc_i2c.c b/drivers/gpu/drm/loongson/lsdc_i2c.c index 9625d0b1d0..ce90c25536 100644 --- a/drivers/gpu/drm/loongson/lsdc_i2c.c +++ b/drivers/gpu/drm/loongson/lsdc_i2c.c @@ -154,7 +154,6 @@ int lsdc_create_i2c_chan(struct drm_device *ddev, adapter = &li2c->adapter; adapter->algo_data = &li2c->bit; adapter->owner = THIS_MODULE; - adapter->class = I2C_CLASS_DDC; adapter->dev.parent = ddev->dev; adapter->nr = -1; diff --git a/drivers/gpu/drm/loongson/lsdc_plane.c b/drivers/gpu/drm/loongson/lsdc_plane.c index 0d50946332..d227a2c1dc 100644 --- a/drivers/gpu/drm/loongson/lsdc_plane.c +++ b/drivers/gpu/drm/loongson/lsdc_plane.c @@ -9,7 +9,6 @@ #include #include #include -#include #include "lsdc_drv.h" #include "lsdc_regs.h" diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile index d4d193f602..5e4436403b 100644 --- a/drivers/gpu/drm/mediatek/Makefile +++ b/drivers/gpu/drm/mediatek/Makefile @@ -16,7 +16,8 @@ mediatek-drm-y := mtk_disp_aal.o \ mtk_dsi.o \ mtk_dpi.o \ mtk_ethdr.o \ - mtk_mdp_rdma.o + mtk_mdp_rdma.o \ + mtk_padding.o obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c index f47f417d8b..8519e9bade 100644 --- a/drivers/gpu/drm/mediatek/mtk_cec.c +++ b/drivers/gpu/drm/mediatek/mtk_cec.c @@ -185,7 +185,6 @@ static int mtk_cec_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mtk_cec *cec; - struct resource *res; int ret; cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL); @@ -195,8 +194,7 @@ static int mtk_cec_probe(struct platform_device *pdev) platform_set_drvdata(pdev, cec); spin_lock_init(&cec->lock); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - cec->regs = devm_ioremap_resource(dev, res); + cec->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(cec->regs)) { ret = PTR_ERR(cec->regs); dev_err(dev, "Failed to ioremap cec: %d\n", ret); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_aal.c b/drivers/gpu/drm/mediatek/mtk_disp_aal.c index 2209159d88..40fe403086 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_aal.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_aal.c @@ -168,7 +168,6 @@ static int mtk_disp_aal_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mtk_disp_aal *priv; - struct resource *res; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -181,8 +180,7 @@ static int mtk_disp_aal_probe(struct platform_device *pdev) return PTR_ERR(priv->clk); } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->regs = devm_ioremap_resource(dev, res); + priv->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->regs)) { dev_err(dev, "failed to ioremap aal\n"); return PTR_ERR(priv->regs); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c index 4234ff7485..465cddce0d 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c @@ -153,7 +153,6 @@ static int mtk_disp_ccorr_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mtk_disp_ccorr *priv; - struct resource *res; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -166,8 +165,7 @@ static int mtk_disp_ccorr_probe(struct platform_device *pdev) return PTR_ERR(priv->clk); } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->regs = devm_ioremap_resource(dev, res); + priv->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->regs)) { dev_err(dev, "failed to ioremap ccorr\n"); return PTR_ERR(priv->regs); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h index 1311562d25..74fa563393 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h @@ -110,6 +110,8 @@ void mtk_ovl_adaptor_connect(struct device *dev, struct device *mmsys_dev, unsigned int next); void mtk_ovl_adaptor_disconnect(struct device *dev, struct device *mmsys_dev, unsigned int next); +int mtk_ovl_adaptor_power_on(struct device *dev); +void mtk_ovl_adaptor_power_off(struct device *dev); int mtk_ovl_adaptor_clk_enable(struct device *dev); void mtk_ovl_adaptor_clk_disable(struct device *dev); void mtk_ovl_adaptor_config(struct device *dev, unsigned int w, @@ -151,6 +153,8 @@ void mtk_rdma_disable_vblank(struct device *dev); const u32 *mtk_rdma_get_formats(struct device *dev); size_t mtk_rdma_get_num_formats(struct device *dev); +int mtk_mdp_rdma_power_on(struct device *dev); +void mtk_mdp_rdma_power_off(struct device *dev); int mtk_mdp_rdma_clk_enable(struct device *dev); void mtk_mdp_rdma_clk_disable(struct device *dev); void mtk_mdp_rdma_start(struct device *dev, struct cmdq_pkt *cmdq_pkt); @@ -160,4 +164,8 @@ void mtk_mdp_rdma_config(struct device *dev, struct mtk_mdp_rdma_cfg *cfg, const u32 *mtk_mdp_rdma_get_formats(struct device *dev); size_t mtk_mdp_rdma_get_num_formats(struct device *dev); +int mtk_padding_clk_enable(struct device *dev); +void mtk_padding_clk_disable(struct device *dev); +void mtk_padding_start(struct device *dev); +void mtk_padding_stop(struct device *dev); #endif diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c index 6bf6367853..12a37f740b 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c @@ -27,13 +27,14 @@ #define MTK_OVL_ADAPTOR_LAYER_NUM 4 enum mtk_ovl_adaptor_comp_type { - OVL_ADAPTOR_TYPE_RDMA = 0, - OVL_ADAPTOR_TYPE_MERGE, OVL_ADAPTOR_TYPE_ETHDR, + OVL_ADAPTOR_TYPE_MDP_RDMA, + OVL_ADAPTOR_TYPE_MERGE, OVL_ADAPTOR_TYPE_NUM, }; enum mtk_ovl_adaptor_comp_id { + OVL_ADAPTOR_ETHDR0, OVL_ADAPTOR_MDP_RDMA0, OVL_ADAPTOR_MDP_RDMA1, OVL_ADAPTOR_MDP_RDMA2, @@ -46,13 +47,14 @@ enum mtk_ovl_adaptor_comp_id { OVL_ADAPTOR_MERGE1, OVL_ADAPTOR_MERGE2, OVL_ADAPTOR_MERGE3, - OVL_ADAPTOR_ETHDR0, OVL_ADAPTOR_ID_MAX }; struct ovl_adaptor_comp_match { enum mtk_ovl_adaptor_comp_type type; + enum mtk_ddp_comp_id comp_id; int alias_id; + const struct mtk_ddp_comp_funcs *funcs; }; struct mtk_disp_ovl_adaptor { @@ -62,25 +64,44 @@ struct mtk_disp_ovl_adaptor { }; static const char * const private_comp_stem[OVL_ADAPTOR_TYPE_NUM] = { - [OVL_ADAPTOR_TYPE_RDMA] = "vdo1-rdma", - [OVL_ADAPTOR_TYPE_MERGE] = "merge", [OVL_ADAPTOR_TYPE_ETHDR] = "ethdr", + [OVL_ADAPTOR_TYPE_MDP_RDMA] = "vdo1-rdma", + [OVL_ADAPTOR_TYPE_MERGE] = "merge", +}; + +static const struct mtk_ddp_comp_funcs ethdr = { + .clk_enable = mtk_ethdr_clk_enable, + .clk_disable = mtk_ethdr_clk_disable, + .start = mtk_ethdr_start, + .stop = mtk_ethdr_stop, +}; + +static const struct mtk_ddp_comp_funcs merge = { + .clk_enable = mtk_merge_clk_enable, + .clk_disable = mtk_merge_clk_disable, +}; + +static const struct mtk_ddp_comp_funcs rdma = { + .power_on = mtk_mdp_rdma_power_on, + .power_off = mtk_mdp_rdma_power_off, + .clk_enable = mtk_mdp_rdma_clk_enable, + .clk_disable = mtk_mdp_rdma_clk_disable, }; static const struct ovl_adaptor_comp_match comp_matches[OVL_ADAPTOR_ID_MAX] = { - [OVL_ADAPTOR_MDP_RDMA0] = { OVL_ADAPTOR_TYPE_RDMA, 0 }, - [OVL_ADAPTOR_MDP_RDMA1] = { OVL_ADAPTOR_TYPE_RDMA, 1 }, - [OVL_ADAPTOR_MDP_RDMA2] = { OVL_ADAPTOR_TYPE_RDMA, 2 }, - [OVL_ADAPTOR_MDP_RDMA3] = { OVL_ADAPTOR_TYPE_RDMA, 3 }, - [OVL_ADAPTOR_MDP_RDMA4] = { OVL_ADAPTOR_TYPE_RDMA, 4 }, - [OVL_ADAPTOR_MDP_RDMA5] = { OVL_ADAPTOR_TYPE_RDMA, 5 }, - [OVL_ADAPTOR_MDP_RDMA6] = { OVL_ADAPTOR_TYPE_RDMA, 6 }, - [OVL_ADAPTOR_MDP_RDMA7] = { OVL_ADAPTOR_TYPE_RDMA, 7 }, - [OVL_ADAPTOR_MERGE0] = { OVL_ADAPTOR_TYPE_MERGE, 1 }, - [OVL_ADAPTOR_MERGE1] = { OVL_ADAPTOR_TYPE_MERGE, 2 }, - [OVL_ADAPTOR_MERGE2] = { OVL_ADAPTOR_TYPE_MERGE, 3 }, - [OVL_ADAPTOR_MERGE3] = { OVL_ADAPTOR_TYPE_MERGE, 4 }, - [OVL_ADAPTOR_ETHDR0] = { OVL_ADAPTOR_TYPE_ETHDR, 0 }, + [OVL_ADAPTOR_ETHDR0] = { OVL_ADAPTOR_TYPE_ETHDR, DDP_COMPONENT_ETHDR_MIXER, 0, ðdr }, + [OVL_ADAPTOR_MDP_RDMA0] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA0, 0, &rdma }, + [OVL_ADAPTOR_MDP_RDMA1] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA1, 1, &rdma }, + [OVL_ADAPTOR_MDP_RDMA2] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA2, 2, &rdma }, + [OVL_ADAPTOR_MDP_RDMA3] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA3, 3, &rdma }, + [OVL_ADAPTOR_MDP_RDMA4] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA4, 4, &rdma }, + [OVL_ADAPTOR_MDP_RDMA5] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA5, 5, &rdma }, + [OVL_ADAPTOR_MDP_RDMA6] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA6, 6, &rdma }, + [OVL_ADAPTOR_MDP_RDMA7] = { OVL_ADAPTOR_TYPE_MDP_RDMA, DDP_COMPONENT_MDP_RDMA7, 7, &rdma }, + [OVL_ADAPTOR_MERGE0] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE1, 1, &merge }, + [OVL_ADAPTOR_MERGE1] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE2, 2, &merge }, + [OVL_ADAPTOR_MERGE2] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE3, 3, &merge }, + [OVL_ADAPTOR_MERGE3] = { OVL_ADAPTOR_TYPE_MERGE, DDP_COMPONENT_MERGE4, 4, &merge }, }; void mtk_ovl_adaptor_layer_config(struct device *dev, unsigned int idx, @@ -172,68 +193,112 @@ void mtk_ovl_adaptor_config(struct device *dev, unsigned int w, void mtk_ovl_adaptor_start(struct device *dev) { + int i; struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); - mtk_ethdr_start(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]); + for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) { + if (!ovl_adaptor->ovl_adaptor_comp[i] || + !comp_matches[i].funcs->start) + continue; + + comp_matches[i].funcs->start(ovl_adaptor->ovl_adaptor_comp[i]); + } } void mtk_ovl_adaptor_stop(struct device *dev) { + int i; struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); - mtk_ethdr_stop(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]); + for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) { + if (!ovl_adaptor->ovl_adaptor_comp[i] || + !comp_matches[i].funcs->stop) + continue; + + comp_matches[i].funcs->stop(ovl_adaptor->ovl_adaptor_comp[i]); + } } -int mtk_ovl_adaptor_clk_enable(struct device *dev) +/** + * power_off - Power off the devices in OVL adaptor + * @dev: Device to be powered off + * @num: Number of the devices to be powered off + * + * Calls the .power_off() ovl_adaptor component callback if it is present. + */ +static inline void power_off(struct device *dev, int num) { struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); - struct device *comp; - int ret; int i; - for (i = 0; i < OVL_ADAPTOR_MERGE0; i++) { - comp = ovl_adaptor->ovl_adaptor_comp[i]; - ret = pm_runtime_get_sync(comp); + if (num > OVL_ADAPTOR_ID_MAX) + num = OVL_ADAPTOR_ID_MAX; + + for (i = num - 1; i >= 0; i--) { + if (!ovl_adaptor->ovl_adaptor_comp[i] || + !comp_matches[i].funcs->power_off) + continue; + + comp_matches[i].funcs->power_off(ovl_adaptor->ovl_adaptor_comp[i]); + } +} + +/** + * mtk_ovl_adaptor_power_on - Power on the devices in OVL adaptor + * @dev: Device to be powered on + * + * Different from OVL, OVL adaptor is a pseudo device so + * we didn't define it in the device tree, pm_runtime_resume_and_get() + * called by .atomic_enable() power on no device in OVL adaptor, + * we have to implement a function to do the job instead. + * + * Return: Zero for success or negative number for failure. + */ +int mtk_ovl_adaptor_power_on(struct device *dev) +{ + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + int i, ret; + + for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) { + if (!ovl_adaptor->ovl_adaptor_comp[i] || + !comp_matches[i].funcs->power_on) + continue; + + ret = comp_matches[i].funcs->power_on(ovl_adaptor->ovl_adaptor_comp[i]); if (ret < 0) { dev_err(dev, "Failed to enable power domain %d, err %d\n", i, ret); - goto pwr_err; + power_off(dev, i); + return ret; } } + return 0; +} + +void mtk_ovl_adaptor_power_off(struct device *dev) +{ + power_off(dev, OVL_ADAPTOR_ID_MAX); +} + +int mtk_ovl_adaptor_clk_enable(struct device *dev) +{ + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + struct device *comp; + int ret; + int i; for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) { comp = ovl_adaptor->ovl_adaptor_comp[i]; - - if (i < OVL_ADAPTOR_MERGE0) - ret = mtk_mdp_rdma_clk_enable(comp); - else if (i < OVL_ADAPTOR_ETHDR0) - ret = mtk_merge_clk_enable(comp); - else - ret = mtk_ethdr_clk_enable(comp); + if (!comp || !comp_matches[i].funcs->clk_enable) + continue; + ret = comp_matches[i].funcs->clk_enable(comp); if (ret) { dev_err(dev, "Failed to enable clock %d, err %d\n", i, ret); - goto clk_err; + while (--i >= 0) + comp_matches[i].funcs->clk_disable(comp); + return ret; } } - - return ret; - -clk_err: - while (--i >= 0) { - comp = ovl_adaptor->ovl_adaptor_comp[i]; - if (i < OVL_ADAPTOR_MERGE0) - mtk_mdp_rdma_clk_disable(comp); - else if (i < OVL_ADAPTOR_ETHDR0) - mtk_merge_clk_disable(comp); - else - mtk_ethdr_clk_disable(comp); - } - i = OVL_ADAPTOR_MERGE0; - -pwr_err: - while (--i >= 0) - pm_runtime_put(ovl_adaptor->ovl_adaptor_comp[i]); - - return ret; + return 0; } void mtk_ovl_adaptor_clk_disable(struct device *dev) @@ -244,15 +309,11 @@ void mtk_ovl_adaptor_clk_disable(struct device *dev) for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) { comp = ovl_adaptor->ovl_adaptor_comp[i]; - - if (i < OVL_ADAPTOR_MERGE0) { - mtk_mdp_rdma_clk_disable(comp); + if (!comp || !comp_matches[i].funcs->clk_disable) + continue; + comp_matches[i].funcs->clk_disable(comp); + if (i < OVL_ADAPTOR_MERGE0) pm_runtime_put(comp); - } else if (i < OVL_ADAPTOR_ETHDR0) { - mtk_merge_clk_disable(comp); - } else { - mtk_ethdr_clk_disable(comp); - } } } @@ -314,40 +375,31 @@ size_t mtk_ovl_adaptor_get_num_formats(struct device *dev) void mtk_ovl_adaptor_add_comp(struct device *dev, struct mtk_mutex *mutex) { - mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA0); - mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA1); - mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA2); - mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA3); - mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA4); - mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA5); - mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA6); - mtk_mutex_add_comp(mutex, DDP_COMPONENT_MDP_RDMA7); - mtk_mutex_add_comp(mutex, DDP_COMPONENT_MERGE1); - mtk_mutex_add_comp(mutex, DDP_COMPONENT_MERGE2); - mtk_mutex_add_comp(mutex, DDP_COMPONENT_MERGE3); - mtk_mutex_add_comp(mutex, DDP_COMPONENT_MERGE4); - mtk_mutex_add_comp(mutex, DDP_COMPONENT_ETHDR_MIXER); + int i; + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + + for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) { + if (!ovl_adaptor->ovl_adaptor_comp[i]) + continue; + mtk_mutex_add_comp(mutex, comp_matches[i].comp_id); + } } void mtk_ovl_adaptor_remove_comp(struct device *dev, struct mtk_mutex *mutex) { - mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA0); - mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA1); - mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA2); - mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA3); - mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA4); - mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA5); - mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA6); - mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MDP_RDMA7); - mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MERGE1); - mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MERGE2); - mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MERGE3); - mtk_mutex_remove_comp(mutex, DDP_COMPONENT_MERGE4); - mtk_mutex_remove_comp(mutex, DDP_COMPONENT_ETHDR_MIXER); + int i; + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); + + for (i = 0; i < OVL_ADAPTOR_ID_MAX; i++) { + if (!ovl_adaptor->ovl_adaptor_comp[i]) + continue; + mtk_mutex_remove_comp(mutex, comp_matches[i].comp_id); + } } void mtk_ovl_adaptor_connect(struct device *dev, struct device *mmsys_dev, unsigned int next) { + mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_ETHDR_MIXER, next); mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1); mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1); mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2); @@ -355,11 +407,11 @@ void mtk_ovl_adaptor_connect(struct device *dev, struct device *mmsys_dev, unsig mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER); mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER); mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER); - mtk_mmsys_ddp_connect(mmsys_dev, DDP_COMPONENT_ETHDR_MIXER, next); } void mtk_ovl_adaptor_disconnect(struct device *dev, struct device *mmsys_dev, unsigned int next) { + mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_ETHDR_MIXER, next); mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1); mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1); mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2); @@ -367,7 +419,6 @@ void mtk_ovl_adaptor_disconnect(struct device *dev, struct device *mmsys_dev, un mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER); mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER); mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER); - mtk_mmsys_ddp_disconnect(mmsys_dev, DDP_COMPONENT_ETHDR_MIXER, next); } static int ovl_adaptor_comp_get_id(struct device *dev, struct device_node *node, @@ -386,17 +437,10 @@ static int ovl_adaptor_comp_get_id(struct device *dev, struct device_node *node, } static const struct of_device_id mtk_ovl_adaptor_comp_dt_ids[] = { - { - .compatible = "mediatek,mt8195-vdo1-rdma", - .data = (void *)OVL_ADAPTOR_TYPE_RDMA, - }, { - .compatible = "mediatek,mt8195-disp-merge", - .data = (void *)OVL_ADAPTOR_TYPE_MERGE, - }, { - .compatible = "mediatek,mt8195-disp-ethdr", - .data = (void *)OVL_ADAPTOR_TYPE_ETHDR, - }, - {}, + { .compatible = "mediatek,mt8195-disp-ethdr", .data = (void *)OVL_ADAPTOR_TYPE_ETHDR }, + { .compatible = "mediatek,mt8195-disp-merge", .data = (void *)OVL_ADAPTOR_TYPE_MERGE }, + { .compatible = "mediatek,mt8195-vdo1-rdma", .data = (void *)OVL_ADAPTOR_TYPE_MDP_RDMA }, + { /* sentinel */ } }; static int compare_of(struct device *dev, void *data) @@ -531,16 +575,15 @@ static int mtk_disp_ovl_adaptor_probe(struct platform_device *pdev) return ret; } -static int mtk_disp_ovl_adaptor_remove(struct platform_device *pdev) +static void mtk_disp_ovl_adaptor_remove(struct platform_device *pdev) { component_master_del(&pdev->dev, &mtk_disp_ovl_adaptor_master_ops); pm_runtime_disable(&pdev->dev); - return 0; } struct platform_driver mtk_disp_ovl_adaptor_driver = { .probe = mtk_disp_ovl_adaptor_probe, - .remove = mtk_disp_ovl_adaptor_remove, + .remove_new = mtk_disp_ovl_adaptor_remove, .driver = { .name = "mediatek-disp-ovl-adaptor", .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index d645b85f97..3d17de34d7 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -723,7 +723,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); - ret = pm_runtime_resume_and_get(comp->dev); + ret = mtk_ddp_comp_power_on(comp); if (ret < 0) { DRM_DEV_ERROR(comp->dev, "Failed to enable power domain: %d\n", ret); return; @@ -733,7 +733,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, ret = mtk_crtc_ddp_hw_init(mtk_crtc); if (ret) { - pm_runtime_put(comp->dev); + mtk_ddp_comp_power_off(comp); return; } @@ -746,7 +746,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; - int i, ret; + int i; DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); if (!mtk_crtc->enabled) @@ -776,9 +776,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, drm_crtc_vblank_off(crtc); mtk_crtc_ddp_hw_fini(mtk_crtc); - ret = pm_runtime_put(comp->dev); - if (ret < 0) - DRM_DEV_ERROR(comp->dev, "Failed to disable power domain: %d\n", ret); + mtk_ddp_comp_power_off(comp); mtk_crtc->enabled = false; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index 3046c04093..a9b5a21cde 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -398,6 +398,8 @@ static const struct mtk_ddp_comp_funcs ddp_ufoe = { }; static const struct mtk_ddp_comp_funcs ddp_ovl_adaptor = { + .power_on = mtk_ovl_adaptor_power_on, + .power_off = mtk_ovl_adaptor_power_off, .clk_enable = mtk_ovl_adaptor_clk_enable, .clk_disable = mtk_ovl_adaptor_clk_disable, .config = mtk_ovl_adaptor_config, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index 4bae55bdb0..15b2eafff4 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h @@ -7,6 +7,7 @@ #define MTK_DRM_DDP_COMP_H #include +#include #include #include #include @@ -46,6 +47,8 @@ enum mtk_ddp_comp_type { struct mtk_ddp_comp; struct cmdq_pkt; struct mtk_ddp_comp_funcs { + int (*power_on)(struct device *dev); + void (*power_off)(struct device *dev); int (*clk_enable)(struct device *dev); void (*clk_disable)(struct device *dev); void (*config)(struct device *dev, unsigned int w, @@ -92,6 +95,23 @@ struct mtk_ddp_comp { const struct mtk_ddp_comp_funcs *funcs; }; +static inline int mtk_ddp_comp_power_on(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->power_on) + return comp->funcs->power_on(comp->dev); + else + return pm_runtime_resume_and_get(comp->dev); + return 0; +} + +static inline void mtk_ddp_comp_power_off(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->power_off) + comp->funcs->power_off(comp->dev); + else + pm_runtime_put(comp->dev); +} + static inline int mtk_ddp_comp_clk_enable(struct mtk_ddp_comp *comp) { if (comp->funcs && comp->funcs->clk_enable) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 2b0c35cacb..14a1e0157c 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -5,7 +5,6 @@ */ #include -#include #include #include #include @@ -611,9 +610,6 @@ static int mtk_drm_bind(struct device *dev) struct drm_device *drm; int ret, i; - if (!iommu_present(&platform_bus_type)) - return -EPROBE_DEFER; - pdev = of_find_device_by_node(private->mutex_node); if (!pdev) { dev_err(dev, "Waiting for disp-mutex device %pOF\n", @@ -1003,6 +999,7 @@ static struct platform_driver * const mtk_drm_drivers[] = { &mtk_dsi_driver, &mtk_ethdr_driver, &mtk_mdp_rdma_driver, + &mtk_padding_driver, }; static int __init mtk_drm_init(void) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h index 6f98fff4f1..33fadb08dc 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h @@ -77,5 +77,5 @@ extern struct platform_driver mtk_dpi_driver; extern struct platform_driver mtk_dsi_driver; extern struct platform_driver mtk_ethdr_driver; extern struct platform_driver mtk_mdp_rdma_driver; - +extern struct platform_driver mtk_padding_driver; #endif /* MTK_DRM_DRV_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.c b/drivers/gpu/drm/mediatek/mtk_ethdr.c index db7ac666ec..6a5d0c345a 100644 --- a/drivers/gpu/drm/mediatek/mtk_ethdr.c +++ b/drivers/gpu/drm/mediatek/mtk_ethdr.c @@ -346,10 +346,9 @@ static int mtk_ethdr_probe(struct platform_device *pdev) return ret; } -static int mtk_ethdr_remove(struct platform_device *pdev) +static void mtk_ethdr_remove(struct platform_device *pdev) { component_del(&pdev->dev, &mtk_ethdr_component_ops); - return 0; } static const struct of_device_id mtk_ethdr_driver_dt_match[] = { @@ -361,7 +360,7 @@ MODULE_DEVICE_TABLE(of, mtk_ethdr_driver_dt_match); struct platform_driver mtk_ethdr_driver = { .probe = mtk_ethdr_probe, - .remove = mtk_ethdr_remove, + .remove_new = mtk_ethdr_remove, .driver = { .name = "mediatek-disp-ethdr", .owner = THIS_MODULE, diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c index d675c954be..54e46e440e 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c @@ -297,7 +297,6 @@ static int mtk_hdmi_ddc_probe(struct platform_device *pdev) strscpy(ddc->adap.name, "mediatek-hdmi-ddc", sizeof(ddc->adap.name)); ddc->adap.owner = THIS_MODULE; - ddc->adap.class = I2C_CLASS_DDC; ddc->adap.algo = &mtk_hdmi_ddc_algorithm; ddc->adap.retries = 3; ddc->adap.dev.of_node = dev->of_node; diff --git a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c index c7233d0ac2..ee9ce9b6d0 100644 --- a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c @@ -242,6 +242,22 @@ size_t mtk_mdp_rdma_get_num_formats(struct device *dev) return ARRAY_SIZE(formats); } +int mtk_mdp_rdma_power_on(struct device *dev) +{ + int ret = pm_runtime_resume_and_get(dev); + + if (ret < 0) { + dev_err(dev, "Failed to power on: %d\n", ret); + return ret; + } + return 0; +} + +void mtk_mdp_rdma_power_off(struct device *dev) +{ + pm_runtime_put(dev); +} + int mtk_mdp_rdma_clk_enable(struct device *dev) { struct mtk_mdp_rdma *rdma = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/mediatek/mtk_padding.c b/drivers/gpu/drm/mediatek/mtk_padding.c new file mode 100644 index 0000000000..0d6451c149 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_padding.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023 MediaTek Inc. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "mtk_disp_drv.h" +#include "mtk_drm_crtc.h" +#include "mtk_drm_ddp_comp.h" + +#define PADDING_CONTROL_REG 0x00 +#define PADDING_BYPASS BIT(0) +#define PADDING_ENABLE BIT(1) +#define PADDING_PIC_SIZE_REG 0x04 +#define PADDING_H_REG 0x08 /* horizontal */ +#define PADDING_V_REG 0x0c /* vertical */ +#define PADDING_COLOR_REG 0x10 + +/** + * struct mtk_padding - Basic information of the Padding + * @clk: Clock of the module + * @reg: Virtual address of the Padding for CPU to access + * @cmdq_reg: CMDQ setting of the Padding + * + * Every Padding should have different clock source, register base, and + * CMDQ settings, we stored these differences all together. + */ +struct mtk_padding { + struct clk *clk; + void __iomem *reg; + struct cmdq_client_reg cmdq_reg; +}; + +int mtk_padding_clk_enable(struct device *dev) +{ + struct mtk_padding *padding = dev_get_drvdata(dev); + + return clk_prepare_enable(padding->clk); +} + +void mtk_padding_clk_disable(struct device *dev) +{ + struct mtk_padding *padding = dev_get_drvdata(dev); + + clk_disable_unprepare(padding->clk); +} + +void mtk_padding_start(struct device *dev) +{ + struct mtk_padding *padding = dev_get_drvdata(dev); + + writel(PADDING_ENABLE | PADDING_BYPASS, + padding->reg + PADDING_CONTROL_REG); + + /* + * Notice that even the padding is in bypass mode, + * all the settings must be cleared to 0 or + * undefined behaviors could happen + */ + writel(0, padding->reg + PADDING_PIC_SIZE_REG); + writel(0, padding->reg + PADDING_H_REG); + writel(0, padding->reg + PADDING_V_REG); + writel(0, padding->reg + PADDING_COLOR_REG); +} + +void mtk_padding_stop(struct device *dev) +{ + struct mtk_padding *padding = dev_get_drvdata(dev); + + writel(0, padding->reg + PADDING_CONTROL_REG); +} + +static int mtk_padding_bind(struct device *dev, struct device *master, void *data) +{ + return 0; +} + +static void mtk_padding_unbind(struct device *dev, struct device *master, void *data) +{ +} + +static const struct component_ops mtk_padding_component_ops = { + .bind = mtk_padding_bind, + .unbind = mtk_padding_unbind, +}; + +static int mtk_padding_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_padding *priv; + struct resource *res; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) { + dev_err(dev, "failed to get clk\n"); + return PTR_ERR(priv->clk); + } + + priv->reg = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(priv->reg)) { + dev_err(dev, "failed to do ioremap\n"); + return PTR_ERR(priv->reg); + } + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0); + if (ret) { + dev_err(dev, "failed to get gce client reg\n"); + return ret; + } +#endif + + platform_set_drvdata(pdev, priv); + + ret = devm_pm_runtime_enable(dev); + if (ret) + return ret; + + ret = component_add(dev, &mtk_padding_component_ops); + if (ret) { + pm_runtime_disable(dev); + return dev_err_probe(dev, ret, "failed to add component\n"); + } + + return 0; +} + +static int mtk_padding_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &mtk_padding_component_ops); + return 0; +} + +static const struct of_device_id mtk_padding_driver_dt_match[] = { + { .compatible = "mediatek,mt8188-disp-padding" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mtk_padding_driver_dt_match); + +struct platform_driver mtk_padding_driver = { + .probe = mtk_padding_probe, + .remove = mtk_padding_remove, + .driver = { + .name = "mediatek-disp-padding", + .owner = THIS_MODULE, + .of_match_table = mtk_padding_driver_dt_match, + }, +}; diff --git a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c index e5fe4e994f..a6bc1bdb3d 100644 --- a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c +++ b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c @@ -323,13 +323,11 @@ static int meson_dw_mipi_dsi_probe(struct platform_device *pdev) return 0; } -static int meson_dw_mipi_dsi_remove(struct platform_device *pdev) +static void meson_dw_mipi_dsi_remove(struct platform_device *pdev) { struct meson_dw_mipi_dsi *mipi_dsi = platform_get_drvdata(pdev); dw_mipi_dsi_remove(mipi_dsi->dmd); - - return 0; } static const struct of_device_id meson_dw_mipi_dsi_of_table[] = { @@ -340,7 +338,7 @@ MODULE_DEVICE_TABLE(of, meson_dw_mipi_dsi_of_table); static struct platform_driver meson_dw_mipi_dsi_platform_driver = { .probe = meson_dw_mipi_dsi_probe, - .remove = meson_dw_mipi_dsi_remove, + .remove_new = meson_dw_mipi_dsi_remove, .driver = { .name = DRIVER_NAME, .of_match_table = meson_dw_mipi_dsi_of_table, diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c index 0c48bdf3e7..423eb302be 100644 --- a/drivers/gpu/drm/mgag200/mgag200_i2c.c +++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c @@ -106,7 +106,6 @@ int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c) i2c->data = BIT(info->i2c.data_bit); i2c->clock = BIT(info->i2c.clock_bit); i2c->adapter.owner = THIS_MODULE; - i2c->adapter.class = I2C_CLASS_DDC; i2c->adapter.dev.parent = dev->dev; i2c->dev = dev; i2c_set_adapdata(&i2c->adapter, i2c); diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index ad70b611b4..f202f26ada 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -17,6 +17,7 @@ config DRM_MSM select DRM_DP_AUX_BUS select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HELPER + select DRM_EXEC select DRM_KMS_HELPER select DRM_PANEL select DRM_BRIDGE diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 49671364fd..b1173128b5 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -63,6 +63,7 @@ msm-$(CONFIG_DRM_MSM_DPU) += \ disp/dpu1/dpu_encoder_phys_wb.o \ disp/dpu1/dpu_formats.o \ disp/dpu1/dpu_hw_catalog.o \ + disp/dpu1/dpu_hw_cdm.o \ disp/dpu1/dpu_hw_ctl.o \ disp/dpu1/dpu_hw_dsc.o \ disp/dpu1/dpu_hw_dsc_1_2.o \ diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index e5916c1067..c003f97018 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -684,7 +684,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); - u32 regbit; + u32 hbb; int ret; gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003); @@ -820,18 +820,15 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); - /* Set the highest bank bit */ - if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu)) - regbit = 2; - else - regbit = 1; + BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13); + hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13; - gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, regbit << 7); - gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, regbit << 1); + gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, hbb << 7); + gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, hbb << 1); if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) || adreno_is_a540(adreno_gpu)) - gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, regbit); + gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, hbb); /* Disable All flat shading optimization (ALLFLATOPTDIS) */ gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, (1 << 10)); @@ -1785,5 +1782,11 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) /* Set up the preemption specific bits and pieces for each ringbuffer */ a5xx_preempt_init(gpu); + /* Set the highest bank bit */ + if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu)) + adreno_gpu->ubwc_config.highest_bank_bit = 15; + else + adreno_gpu->ubwc_config.highest_bank_bit = 14; + return gpu; } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index edc1eeef84..792a4c60a2 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -1270,81 +1270,95 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu) gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]); } -static void a6xx_set_ubwc_config(struct msm_gpu *gpu) +static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu) { - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); /* Unknown, introduced with A650 family, related to UBWC mode/ver 4 */ - u32 rgb565_predicator = 0; + gpu->ubwc_config.rgb565_predicator = 0; /* Unknown, introduced with A650 family */ - u32 uavflagprd_inv = 0; + gpu->ubwc_config.uavflagprd_inv = 0; /* Whether the minimum access length is 64 bits */ - u32 min_acc_len = 0; + gpu->ubwc_config.min_acc_len = 0; /* Entirely magic, per-GPU-gen value */ - u32 ubwc_mode = 0; + gpu->ubwc_config.ubwc_mode = 0; /* * The Highest Bank Bit value represents the bit of the highest DDR bank. - * We then subtract 13 from it (13 is the minimum value allowed by hw) and - * write the lowest two bits of the remaining value as hbb_lo and the - * one above it as hbb_hi to the hardware. This should ideally use DRAM - * type detection. + * This should ideally use DRAM type detection. */ - u32 hbb_hi = 0; - u32 hbb_lo = 2; - /* Unknown, introduced with A640/680 */ - u32 amsbc = 0; + gpu->ubwc_config.highest_bank_bit = 15; - if (adreno_is_a610(adreno_gpu)) { - /* HBB = 14 */ - hbb_lo = 1; - min_acc_len = 1; - ubwc_mode = 1; + if (adreno_is_a610(gpu)) { + gpu->ubwc_config.highest_bank_bit = 13; + gpu->ubwc_config.min_acc_len = 1; + gpu->ubwc_config.ubwc_mode = 1; } - /* a618 is using the hw default values */ - if (adreno_is_a618(adreno_gpu)) - return; + if (adreno_is_a618(gpu)) + gpu->ubwc_config.highest_bank_bit = 14; - if (adreno_is_a619_holi(adreno_gpu)) - hbb_lo = 0; + if (adreno_is_a619(gpu)) + /* TODO: Should be 14 but causes corruption at e.g. 1920x1200 on DP */ + gpu->ubwc_config.highest_bank_bit = 13; + + if (adreno_is_a619_holi(gpu)) + gpu->ubwc_config.highest_bank_bit = 13; - if (adreno_is_a640_family(adreno_gpu)) - amsbc = 1; + if (adreno_is_a640_family(gpu)) + gpu->ubwc_config.amsbc = 1; - if (adreno_is_a650(adreno_gpu) || - adreno_is_a660(adreno_gpu) || - adreno_is_a690(adreno_gpu) || - adreno_is_a730(adreno_gpu) || - adreno_is_a740_family(adreno_gpu)) { + if (adreno_is_a650(gpu) || + adreno_is_a660(gpu) || + adreno_is_a690(gpu) || + adreno_is_a730(gpu) || + adreno_is_a740_family(gpu)) { /* TODO: get ddr type from bootloader and use 2 for LPDDR4 */ - hbb_lo = 3; - amsbc = 1; - rgb565_predicator = 1; - uavflagprd_inv = 2; + gpu->ubwc_config.highest_bank_bit = 16; + gpu->ubwc_config.amsbc = 1; + gpu->ubwc_config.rgb565_predicator = 1; + gpu->ubwc_config.uavflagprd_inv = 2; } - if (adreno_is_7c3(adreno_gpu)) { - hbb_lo = 1; - amsbc = 1; - rgb565_predicator = 1; - uavflagprd_inv = 2; + if (adreno_is_7c3(gpu)) { + gpu->ubwc_config.highest_bank_bit = 14; + gpu->ubwc_config.amsbc = 1; + gpu->ubwc_config.rgb565_predicator = 1; + gpu->ubwc_config.uavflagprd_inv = 2; } +} + +static void a6xx_set_ubwc_config(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + /* + * We subtract 13 from the highest bank bit (13 is the minimum value + * allowed by hw) and write the lowest two bits of the remaining value + * as hbb_lo and the one above it as hbb_hi to the hardware. + */ + BUG_ON(adreno_gpu->ubwc_config.highest_bank_bit < 13); + u32 hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13; + u32 hbb_hi = hbb >> 2; + u32 hbb_lo = hbb & 3; gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, - rgb565_predicator << 11 | hbb_hi << 10 | amsbc << 4 | - min_acc_len << 3 | hbb_lo << 1 | ubwc_mode); + adreno_gpu->ubwc_config.rgb565_predicator << 11 | + hbb_hi << 10 | adreno_gpu->ubwc_config.amsbc << 4 | + adreno_gpu->ubwc_config.min_acc_len << 3 | + hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode); gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, hbb_hi << 4 | - min_acc_len << 3 | hbb_lo << 1 | ubwc_mode); + adreno_gpu->ubwc_config.min_acc_len << 3 | + hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode); gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, hbb_hi << 10 | - uavflagprd_inv << 4 | min_acc_len << 3 | - hbb_lo << 1 | ubwc_mode); + adreno_gpu->ubwc_config.uavflagprd_inv << 4 | + adreno_gpu->ubwc_config.min_acc_len << 3 | + hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode); if (adreno_is_a7xx(adreno_gpu)) gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL, FIELD_PREP(GENMASK(8, 5), hbb_lo)); - gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, min_acc_len << 23 | hbb_lo << 21); + gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, + adreno_gpu->ubwc_config.min_acc_len << 23 | hbb_lo << 21); } static int a6xx_cp_init(struct msm_gpu *gpu) @@ -1780,7 +1794,7 @@ static int hw_init(struct msm_gpu *gpu) else gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x1fffff); - gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1); + gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, BIT(7) | 0x1); /* Set weights for bicubic filtering */ if (adreno_is_a650_family(adreno_gpu)) { @@ -2911,5 +2925,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a6xx_fault_handler); + a6xx_calc_ubwc_config(adreno_gpu); + return gpu; } diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 3ee14646ab..2ce7d7b169 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -841,7 +841,8 @@ static void suspend_scheduler(struct msm_gpu *gpu) */ for (i = 0; i < gpu->nr_rings; i++) { struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched; - kthread_park(sched->thread); + + drm_sched_wqueue_stop(sched); } } @@ -851,7 +852,8 @@ static void resume_scheduler(struct msm_gpu *gpu) for (i = 0; i < gpu->nr_rings; i++) { struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched; - kthread_unpark(sched->thread); + + drm_sched_wqueue_start(sched); } } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 3fe9fd240c..074fb49870 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -373,6 +373,9 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx, return -EINVAL; *value = ctx->aspace->va_size; return 0; + case MSM_PARAM_HIGHEST_BANK_BIT: + *value = adreno_gpu->ubwc_config.highest_bank_bit; + return 0; default: DBG("%s: invalid param: %u", gpu->name, param); return -EINVAL; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 80b3f63121..bc14df96fe 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -165,6 +165,15 @@ struct adreno_gpu { /* firmware: */ const struct firmware *fw[ADRENO_FW_MAX]; + struct { + u32 rgb565_predicator; + u32 uavflagprd_inv; + u32 min_acc_len; + u32 ubwc_mode; + u32 highest_bank_bit; + u32 amsbc; + } ubwc_config; + /* * Register offsets are different between some GPUs. * GPU specific offsets will be exported by GPU specific diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h new file mode 100644 index 0000000000..eb5dfff2ec --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_10_0_sm8650.h @@ -0,0 +1,457 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_10_0_SM8650_H +#define _DPU_10_0_SM8650_H + +static const struct dpu_caps sm8650_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .max_mixer_blendstages = 0xb, + .has_src_split = true, + .has_dim_layer = true, + .has_idle_pc = true, + .has_3d_merge = true, + .max_linewidth = 8192, + .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE, +}; + +static const struct dpu_mdp_cfg sm8650_mdp = { + .name = "top_0", + .base = 0, .len = 0x494, + .features = BIT(DPU_MDP_PERIPH_0_REMOVED), + .clk_ctrls = { + [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 }, + }, +}; + +/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */ +static const struct dpu_ctl_cfg sm8650_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x15000, .len = 0x1000, + .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY), + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), + }, { + .name = "ctl_1", .id = CTL_1, + .base = 0x16000, .len = 0x1000, + .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY), + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), + }, { + .name = "ctl_2", .id = CTL_2, + .base = 0x17000, .len = 0x1000, + .features = CTL_SM8550_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11), + }, { + .name = "ctl_3", .id = CTL_3, + .base = 0x18000, .len = 0x1000, + .features = CTL_SM8550_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12), + }, { + .name = "ctl_4", .id = CTL_4, + .base = 0x19000, .len = 0x1000, + .features = CTL_SM8550_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13), + }, { + .name = "ctl_5", .id = CTL_5, + .base = 0x1a000, .len = 0x1000, + .features = CTL_SM8550_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23), + }, +}; + +static const struct dpu_sspp_cfg sm8650_sspp[] = { + { + .name = "sspp_0", .id = SSPP_VIG0, + .base = 0x4000, .len = 0x344, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_3, + .xin_id = 0, + .type = SSPP_TYPE_VIG, + }, { + .name = "sspp_1", .id = SSPP_VIG1, + .base = 0x6000, .len = 0x344, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_3, + .xin_id = 4, + .type = SSPP_TYPE_VIG, + }, { + .name = "sspp_2", .id = SSPP_VIG2, + .base = 0x8000, .len = 0x344, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_3, + .xin_id = 8, + .type = SSPP_TYPE_VIG, + }, { + .name = "sspp_3", .id = SSPP_VIG3, + .base = 0xa000, .len = 0x344, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_3, + .xin_id = 12, + .type = SSPP_TYPE_VIG, + }, { + .name = "sspp_8", .id = SSPP_DMA0, + .base = 0x24000, .len = 0x344, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 1, + .type = SSPP_TYPE_DMA, + }, { + .name = "sspp_9", .id = SSPP_DMA1, + .base = 0x26000, .len = 0x344, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 5, + .type = SSPP_TYPE_DMA, + }, { + .name = "sspp_10", .id = SSPP_DMA2, + .base = 0x28000, .len = 0x344, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 9, + .type = SSPP_TYPE_DMA, + }, { + .name = "sspp_11", .id = SSPP_DMA3, + .base = 0x2a000, .len = 0x344, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 13, + .type = SSPP_TYPE_DMA, + }, { + .name = "sspp_12", .id = SSPP_DMA4, + .base = 0x2c000, .len = 0x344, + .features = DMA_CURSOR_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 14, + .type = SSPP_TYPE_DMA, + }, { + .name = "sspp_13", .id = SSPP_DMA5, + .base = 0x2e000, .len = 0x344, + .features = DMA_CURSOR_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 15, + .type = SSPP_TYPE_DMA, + }, +}; + +static const struct dpu_lm_cfg sm8650_lm[] = { + { + .name = "lm_0", .id = LM_0, + .base = 0x44000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_1, + .pingpong = PINGPONG_0, + .dspp = DSPP_0, + }, { + .name = "lm_1", .id = LM_1, + .base = 0x45000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_0, + .pingpong = PINGPONG_1, + .dspp = DSPP_1, + }, { + .name = "lm_2", .id = LM_2, + .base = 0x46000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_3, + .pingpong = PINGPONG_2, + }, { + .name = "lm_3", .id = LM_3, + .base = 0x47000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_2, + .pingpong = PINGPONG_3, + }, { + .name = "lm_4", .id = LM_4, + .base = 0x48000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_5, + .pingpong = PINGPONG_4, + }, { + .name = "lm_5", .id = LM_5, + .base = 0x49000, .len = 0x400, + .features = MIXER_SDM845_MASK, + .sblk = &sdm845_lm_sblk, + .lm_pair = LM_4, + .pingpong = PINGPONG_5, + }, +}; + +static const struct dpu_dspp_cfg sm8650_dspp[] = { + { + .name = "dspp_0", .id = DSPP_0, + .base = 0x54000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &sdm845_dspp_sblk, + }, { + .name = "dspp_1", .id = DSPP_1, + .base = 0x56000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &sdm845_dspp_sblk, + }, { + .name = "dspp_2", .id = DSPP_2, + .base = 0x58000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &sdm845_dspp_sblk, + }, { + .name = "dspp_3", .id = DSPP_3, + .base = 0x5a000, .len = 0x1800, + .features = DSPP_SC7180_MASK, + .sblk = &sdm845_dspp_sblk, + }, +}; + +static const struct dpu_pingpong_cfg sm8650_pp[] = { + { + .name = "pingpong_0", .id = PINGPONG_0, + .base = 0x69000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_0, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), + }, { + .name = "pingpong_1", .id = PINGPONG_1, + .base = 0x6a000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_0, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), + }, { + .name = "pingpong_2", .id = PINGPONG_2, + .base = 0x6b000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_1, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), + }, { + .name = "pingpong_3", .id = PINGPONG_3, + .base = 0x6c000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_1, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), + }, { + .name = "pingpong_4", .id = PINGPONG_4, + .base = 0x6d000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_2, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), + }, { + .name = "pingpong_5", .id = PINGPONG_5, + .base = 0x6e000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_2, + .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), + }, { + .name = "pingpong_6", .id = PINGPONG_6, + .base = 0x66000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_3, + }, { + .name = "pingpong_7", .id = PINGPONG_7, + .base = 0x66400, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_3, + }, { + .name = "pingpong_8", .id = PINGPONG_8, + .base = 0x7e000, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_4, + }, { + .name = "pingpong_9", .id = PINGPONG_9, + .base = 0x7e400, .len = 0, + .features = BIT(DPU_PINGPONG_DITHER), + .sblk = &sc7280_pp_sblk, + .merge_3d = MERGE_3D_4, + }, +}; + +static const struct dpu_merge_3d_cfg sm8650_merge_3d[] = { + { + .name = "merge_3d_0", .id = MERGE_3D_0, + .base = 0x4e000, .len = 0x8, + }, { + .name = "merge_3d_1", .id = MERGE_3D_1, + .base = 0x4f000, .len = 0x8, + }, { + .name = "merge_3d_2", .id = MERGE_3D_2, + .base = 0x50000, .len = 0x8, + }, { + .name = "merge_3d_3", .id = MERGE_3D_3, + .base = 0x66700, .len = 0x8, + }, { + .name = "merge_3d_4", .id = MERGE_3D_4, + .base = 0x7e700, .len = 0x8, + }, +}; + +/* + * NOTE: Each display compression engine (DCE) contains dual hard + * slice DSC encoders so both share same base address but with + * its own different sub block address. + */ +static const struct dpu_dsc_cfg sm8650_dsc[] = { + { + .name = "dce_0_0", .id = DSC_0, + .base = 0x80000, .len = 0x6, + .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &dsc_sblk_0, + }, { + .name = "dce_0_1", .id = DSC_1, + .base = 0x80000, .len = 0x6, + .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &dsc_sblk_1, + }, { + .name = "dce_1_0", .id = DSC_2, + .base = 0x81000, .len = 0x6, + .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &dsc_sblk_0, + }, { + .name = "dce_1_1", .id = DSC_3, + .base = 0x81000, .len = 0x6, + .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN), + .sblk = &dsc_sblk_1, + }, { + .name = "dce_2_0", .id = DSC_4, + .base = 0x82000, .len = 0x6, + .features = BIT(DPU_DSC_HW_REV_1_2), + .sblk = &dsc_sblk_0, + }, { + .name = "dce_2_1", .id = DSC_5, + .base = 0x82000, .len = 0x6, + .features = BIT(DPU_DSC_HW_REV_1_2), + .sblk = &dsc_sblk_1, + }, +}; + +static const struct dpu_wb_cfg sm8650_wb[] = { + { + .name = "wb_2", .id = WB_2, + .base = 0x65000, .len = 0x2c8, + .features = WB_SM8250_MASK, + .format_list = wb2_formats_rgb, + .num_formats = ARRAY_SIZE(wb2_formats_rgb), + .xin_id = 6, + .vbif_idx = VBIF_RT, + .maxlinewidth = 4096, + .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4), + }, +}; + +static const struct dpu_intf_cfg sm8650_intf[] = { + { + .name = "intf_0", .id = INTF_0, + .base = 0x34000, .len = 0x280, + .features = INTF_SC7280_MASK, + .type = INTF_DP, + .controller_id = MSM_DP_CONTROLLER_0, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25), + }, { + .name = "intf_1", .id = INTF_1, + .base = 0x35000, .len = 0x300, + .features = INTF_SC7280_MASK, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_0, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27), + .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2), + }, { + .name = "intf_2", .id = INTF_2, + .base = 0x36000, .len = 0x300, + .features = INTF_SC7280_MASK, + .type = INTF_DSI, + .controller_id = MSM_DSI_CONTROLLER_1, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29), + .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2), + }, { + .name = "intf_3", .id = INTF_3, + .base = 0x37000, .len = 0x280, + .features = INTF_SC7280_MASK, + .type = INTF_DP, + .controller_id = MSM_DP_CONTROLLER_1, + .prog_fetch_lines_worst_case = 24, + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30), + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31), + }, +}; + +static const struct dpu_perf_cfg sm8650_perf_data = { + .max_bw_low = 17000000, + .max_bw_high = 27000000, + .min_core_ib = 2500000, + .min_llcc_ib = 0, + .min_dram_ib = 800000, + .min_prefill_lines = 35, + /* FIXME: lut tables */ + .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0}, + .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(sc7180_qos_linear), + .entries = sc7180_qos_linear + }, + {.nentry = ARRAY_SIZE(sc7180_qos_macrotile), + .entries = sc7180_qos_macrotile + }, + {.nentry = ARRAY_SIZE(sc7180_qos_nrt), + .entries = sc7180_qos_nrt + }, + /* TODO: macrotile-qseed is different from macrotile */ + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_mdss_version sm8650_mdss_ver = { + .core_major_ver = 10, + .core_minor_ver = 0, +}; + +const struct dpu_mdss_cfg dpu_sm8650_cfg = { + .mdss_ver = &sm8650_mdss_ver, + .caps = &sm8650_dpu_caps, + .mdp = &sm8650_mdp, + .ctl_count = ARRAY_SIZE(sm8650_ctl), + .ctl = sm8650_ctl, + .sspp_count = ARRAY_SIZE(sm8650_sspp), + .sspp = sm8650_sspp, + .mixer_count = ARRAY_SIZE(sm8650_lm), + .mixer = sm8650_lm, + .dspp_count = ARRAY_SIZE(sm8650_dspp), + .dspp = sm8650_dspp, + .pingpong_count = ARRAY_SIZE(sm8650_pp), + .pingpong = sm8650_pp, + .dsc_count = ARRAY_SIZE(sm8650_dsc), + .dsc = sm8650_dsc, + .merge_3d_count = ARRAY_SIZE(sm8650_merge_3d), + .merge_3d = sm8650_merge_3d, + .wb_count = ARRAY_SIZE(sm8650_wb), + .wb = sm8650_wb, + .intf_count = ARRAY_SIZE(sm8650_intf), + .intf = sm8650_intf, + .vbif_count = ARRAY_SIZE(sm8650_vbif), + .vbif = sm8650_vbif, + .perf = &sm8650_perf_data, +}; + +#endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h index aa1867943c..1d3e9666c7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h @@ -10,7 +10,6 @@ static const struct dpu_caps msm8998_dpu_caps = { .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .max_mixer_blendstages = 0x7, - .qseed_type = DPU_SSPP_SCALER_QSEED3, .has_src_split = true, .has_dim_layer = true, .has_idle_pc = true, @@ -70,7 +69,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1ac, .features = VIG_MSM8998_MASK, - .sblk = &msm8998_vig_sblk_0, + .sblk = &dpu_vig_sblk_qseed3_1_2, .xin_id = 0, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG0, @@ -78,7 +77,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = { .name = "sspp_1", .id = SSPP_VIG1, .base = 0x6000, .len = 0x1ac, .features = VIG_MSM8998_MASK, - .sblk = &msm8998_vig_sblk_1, + .sblk = &dpu_vig_sblk_qseed3_1_2, .xin_id = 4, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG1, @@ -86,7 +85,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = { .name = "sspp_2", .id = SSPP_VIG2, .base = 0x8000, .len = 0x1ac, .features = VIG_MSM8998_MASK, - .sblk = &msm8998_vig_sblk_2, + .sblk = &dpu_vig_sblk_qseed3_1_2, .xin_id = 8, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG2, @@ -94,7 +93,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = { .name = "sspp_3", .id = SSPP_VIG3, .base = 0xa000, .len = 0x1ac, .features = VIG_MSM8998_MASK, - .sblk = &msm8998_vig_sblk_3, + .sblk = &dpu_vig_sblk_qseed3_1_2, .xin_id = 12, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG3, @@ -102,7 +101,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1ac, .features = DMA_MSM8998_MASK, - .sblk = &sdm845_dma_sblk_0, + .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA0, @@ -110,7 +109,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x1ac, .features = DMA_MSM8998_MASK, - .sblk = &sdm845_dma_sblk_1, + .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA1, @@ -118,7 +117,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = { .name = "sspp_10", .id = SSPP_DMA2, .base = 0x28000, .len = 0x1ac, .features = DMA_CURSOR_MSM8998_MASK, - .sblk = &sdm845_dma_sblk_2, + .sblk = &dpu_dma_sblk, .xin_id = 9, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA2, @@ -126,7 +125,7 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = { .name = "sspp_11", .id = SSPP_DMA3, .base = 0x2a000, .len = 0x1ac, .features = DMA_CURSOR_MSM8998_MASK, - .sblk = &sdm845_dma_sblk_3, + .sblk = &dpu_dma_sblk, .xin_id = 13, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA3, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h index 38ac0c1a13..7a23389a57 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_0_sdm845.h @@ -10,7 +10,6 @@ static const struct dpu_caps sdm845_dpu_caps = { .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .max_mixer_blendstages = 0xb, - .qseed_type = DPU_SSPP_SCALER_QSEED3, .has_src_split = true, .has_dim_layer = true, .has_idle_pc = true, @@ -68,7 +67,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1c8, .features = VIG_SDM845_MASK_SDMA, - .sblk = &sdm845_vig_sblk_0, + .sblk = &dpu_vig_sblk_qseed3_1_3, .xin_id = 0, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG0, @@ -76,7 +75,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = { .name = "sspp_1", .id = SSPP_VIG1, .base = 0x6000, .len = 0x1c8, .features = VIG_SDM845_MASK_SDMA, - .sblk = &sdm845_vig_sblk_1, + .sblk = &dpu_vig_sblk_qseed3_1_3, .xin_id = 4, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG1, @@ -84,7 +83,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = { .name = "sspp_2", .id = SSPP_VIG2, .base = 0x8000, .len = 0x1c8, .features = VIG_SDM845_MASK_SDMA, - .sblk = &sdm845_vig_sblk_2, + .sblk = &dpu_vig_sblk_qseed3_1_3, .xin_id = 8, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG2, @@ -92,7 +91,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = { .name = "sspp_3", .id = SSPP_VIG3, .base = 0xa000, .len = 0x1c8, .features = VIG_SDM845_MASK_SDMA, - .sblk = &sdm845_vig_sblk_3, + .sblk = &dpu_vig_sblk_qseed3_1_3, .xin_id = 12, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG3, @@ -100,7 +99,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1c8, .features = DMA_SDM845_MASK_SDMA, - .sblk = &sdm845_dma_sblk_0, + .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA0, @@ -108,7 +107,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x1c8, .features = DMA_SDM845_MASK_SDMA, - .sblk = &sdm845_dma_sblk_1, + .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA1, @@ -116,7 +115,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = { .name = "sspp_10", .id = SSPP_DMA2, .base = 0x28000, .len = 0x1c8, .features = DMA_CURSOR_SDM845_MASK_SDMA, - .sblk = &sdm845_dma_sblk_2, + .sblk = &dpu_dma_sblk, .xin_id = 9, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA2, @@ -124,7 +123,7 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = { .name = "sspp_11", .id = SSPP_DMA3, .base = 0x2a000, .len = 0x1c8, .features = DMA_CURSOR_SDM845_MASK_SDMA, - .sblk = &sdm845_dma_sblk_3, + .sblk = &dpu_dma_sblk, .xin_id = 13, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA3, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h new file mode 100644 index 0000000000..cbbdaebe35 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_4_1_sdm670.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023, Richard Acayan. All rights reserved. + */ + +#ifndef _DPU_4_1_SDM670_H +#define _DPU_4_1_SDM670_H + +static const struct dpu_mdp_cfg sdm670_mdp = { + .name = "top_0", + .base = 0x0, .len = 0x45c, + .features = BIT(DPU_MDP_AUDIO_SELECT), + .clk_ctrls = { + [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 }, + [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 }, + [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 }, + [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 }, + [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 }, + }, +}; + +static const struct dpu_sspp_cfg sdm670_sspp[] = { + { + .name = "sspp_0", .id = SSPP_VIG0, + .base = 0x4000, .len = 0x1c8, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_1_3, + .xin_id = 0, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG0, + }, { + .name = "sspp_1", .id = SSPP_VIG1, + .base = 0x6000, .len = 0x1c8, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_1_3, + .xin_id = 4, + .type = SSPP_TYPE_VIG, + .clk_ctrl = DPU_CLK_CTRL_VIG0, + }, { + .name = "sspp_8", .id = SSPP_DMA0, + .base = 0x24000, .len = 0x1c8, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 1, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA0, + }, { + .name = "sspp_9", .id = SSPP_DMA1, + .base = 0x26000, .len = 0x1c8, + .features = DMA_CURSOR_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 5, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA1, + }, { + .name = "sspp_10", .id = SSPP_DMA2, + .base = 0x28000, .len = 0x1c8, + .features = DMA_CURSOR_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, + .xin_id = 9, + .type = SSPP_TYPE_DMA, + .clk_ctrl = DPU_CLK_CTRL_DMA2, + }, +}; + +static const struct dpu_dsc_cfg sdm670_dsc[] = { + { + .name = "dsc_0", .id = DSC_0, + .base = 0x80000, .len = 0x140, + }, { + .name = "dsc_1", .id = DSC_1, + .base = 0x80400, .len = 0x140, + }, +}; + +static const struct dpu_mdss_version sdm670_mdss_ver = { + .core_major_ver = 4, + .core_minor_ver = 1, +}; + +const struct dpu_mdss_cfg dpu_sdm670_cfg = { + .mdss_ver = &sdm670_mdss_ver, + .caps = &sdm845_dpu_caps, + .mdp = &sdm670_mdp, + .ctl_count = ARRAY_SIZE(sdm845_ctl), + .ctl = sdm845_ctl, + .sspp_count = ARRAY_SIZE(sdm670_sspp), + .sspp = sdm670_sspp, + .mixer_count = ARRAY_SIZE(sdm845_lm), + .mixer = sdm845_lm, + .pingpong_count = ARRAY_SIZE(sdm845_pp), + .pingpong = sdm845_pp, + .dsc_count = ARRAY_SIZE(sdm670_dsc), + .dsc = sdm670_dsc, + .intf_count = ARRAY_SIZE(sdm845_intf), + .intf = sdm845_intf, + .vbif_count = ARRAY_SIZE(sdm845_vbif), + .vbif = sdm845_vbif, + .perf = &sdm845_perf_data, +}; + +#endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h index c022e57864..145f3d5953 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h @@ -10,7 +10,6 @@ static const struct dpu_caps sm8150_dpu_caps = { .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .max_mixer_blendstages = 0xb, - .qseed_type = DPU_SSPP_SCALER_QSEED3, .has_src_split = true, .has_dim_layer = true, .has_idle_pc = true, @@ -77,7 +76,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1f0, .features = VIG_SDM845_MASK, - .sblk = &sm8150_vig_sblk_0, + .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 0, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG0, @@ -85,7 +84,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { .name = "sspp_1", .id = SSPP_VIG1, .base = 0x6000, .len = 0x1f0, .features = VIG_SDM845_MASK, - .sblk = &sm8150_vig_sblk_1, + .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 4, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG1, @@ -93,7 +92,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { .name = "sspp_2", .id = SSPP_VIG2, .base = 0x8000, .len = 0x1f0, .features = VIG_SDM845_MASK, - .sblk = &sm8150_vig_sblk_2, + .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 8, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG2, @@ -101,7 +100,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { .name = "sspp_3", .id = SSPP_VIG3, .base = 0xa000, .len = 0x1f0, .features = VIG_SDM845_MASK, - .sblk = &sm8150_vig_sblk_3, + .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 12, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG3, @@ -109,7 +108,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1f0, .features = DMA_SDM845_MASK, - .sblk = &sdm845_dma_sblk_0, + .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA0, @@ -117,7 +116,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x1f0, .features = DMA_SDM845_MASK, - .sblk = &sdm845_dma_sblk_1, + .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA1, @@ -125,7 +124,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { .name = "sspp_10", .id = SSPP_DMA2, .base = 0x28000, .len = 0x1f0, .features = DMA_CURSOR_SDM845_MASK, - .sblk = &sdm845_dma_sblk_2, + .sblk = &dpu_dma_sblk, .xin_id = 9, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA2, @@ -133,7 +132,7 @@ static const struct dpu_sspp_cfg sm8150_sspp[] = { .name = "sspp_11", .id = SSPP_DMA3, .base = 0x2a000, .len = 0x1f0, .features = DMA_CURSOR_SDM845_MASK, - .sblk = &sdm845_dma_sblk_3, + .sblk = &dpu_dma_sblk, .xin_id = 13, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA3, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h index cb0758f082..9e3bec8bc1 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h @@ -10,7 +10,6 @@ static const struct dpu_caps sc8180x_dpu_caps = { .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .max_mixer_blendstages = 0xb, - .qseed_type = DPU_SSPP_SCALER_QSEED3, .has_src_split = true, .has_dim_layer = true, .has_idle_pc = true, @@ -76,7 +75,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1f0, .features = VIG_SDM845_MASK, - .sblk = &sm8150_vig_sblk_0, + .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 0, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG0, @@ -84,7 +83,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { .name = "sspp_1", .id = SSPP_VIG1, .base = 0x6000, .len = 0x1f0, .features = VIG_SDM845_MASK, - .sblk = &sm8150_vig_sblk_1, + .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 4, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG1, @@ -92,7 +91,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { .name = "sspp_2", .id = SSPP_VIG2, .base = 0x8000, .len = 0x1f0, .features = VIG_SDM845_MASK, - .sblk = &sm8150_vig_sblk_2, + .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 8, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG2, @@ -100,7 +99,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { .name = "sspp_3", .id = SSPP_VIG3, .base = 0xa000, .len = 0x1f0, .features = VIG_SDM845_MASK, - .sblk = &sm8150_vig_sblk_3, + .sblk = &dpu_vig_sblk_qseed3_1_4, .xin_id = 12, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG3, @@ -108,7 +107,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1f0, .features = DMA_SDM845_MASK, - .sblk = &sdm845_dma_sblk_0, + .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA0, @@ -116,7 +115,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x1f0, .features = DMA_SDM845_MASK, - .sblk = &sdm845_dma_sblk_1, + .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA1, @@ -124,7 +123,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { .name = "sspp_10", .id = SSPP_DMA2, .base = 0x28000, .len = 0x1f0, .features = DMA_CURSOR_SDM845_MASK, - .sblk = &sdm845_dma_sblk_2, + .sblk = &dpu_dma_sblk, .xin_id = 9, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA2, @@ -132,7 +131,7 @@ static const struct dpu_sspp_cfg sc8180x_sspp[] = { .name = "sspp_11", .id = SSPP_DMA3, .base = 0x2a000, .len = 0x1f0, .features = DMA_CURSOR_SDM845_MASK, - .sblk = &sdm845_dma_sblk_3, + .sblk = &dpu_dma_sblk, .xin_id = 13, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA3, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h index cec7af6667..76b2ec0d24 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h @@ -68,8 +68,8 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1f0, - .features = VIG_SM6125_MASK, - .sblk = &sm6125_vig_sblk_0, + .features = VIG_SDM845_MASK, + .sblk = &dpu_vig_sblk_qseed3_2_4, .xin_id = 0, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG0, @@ -77,7 +77,7 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1f0, .features = DMA_SDM845_MASK, - .sblk = &sdm845_dma_sblk_0, + .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA0, @@ -85,7 +85,7 @@ static const struct dpu_sspp_cfg sm6125_sspp[] = { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x1f0, .features = DMA_SDM845_MASK, - .sblk = &sdm845_dma_sblk_1, + .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA1, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h index 9f8068fa01..a57d50b1f0 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h @@ -10,7 +10,6 @@ static const struct dpu_caps sm8250_dpu_caps = { .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .max_mixer_blendstages = 0xb, - .qseed_type = DPU_SSPP_SCALER_QSEED4, .has_src_split = true, .has_dim_layer = true, .has_idle_pc = true, @@ -75,32 +74,32 @@ static const struct dpu_sspp_cfg sm8250_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1f8, - .features = VIG_SC7180_MASK_SDMA, - .sblk = &sm8250_vig_sblk_0, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 0, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG0, }, { .name = "sspp_1", .id = SSPP_VIG1, .base = 0x6000, .len = 0x1f8, - .features = VIG_SC7180_MASK_SDMA, - .sblk = &sm8250_vig_sblk_1, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 4, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG1, }, { .name = "sspp_2", .id = SSPP_VIG2, .base = 0x8000, .len = 0x1f8, - .features = VIG_SC7180_MASK_SDMA, - .sblk = &sm8250_vig_sblk_2, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 8, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG2, }, { .name = "sspp_3", .id = SSPP_VIG3, .base = 0xa000, .len = 0x1f8, - .features = VIG_SC7180_MASK_SDMA, - .sblk = &sm8250_vig_sblk_3, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 12, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG3, @@ -108,7 +107,7 @@ static const struct dpu_sspp_cfg sm8250_sspp[] = { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1f8, .features = DMA_SDM845_MASK_SDMA, - .sblk = &sdm845_dma_sblk_0, + .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA0, @@ -116,7 +115,7 @@ static const struct dpu_sspp_cfg sm8250_sspp[] = { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x1f8, .features = DMA_SDM845_MASK_SDMA, - .sblk = &sdm845_dma_sblk_1, + .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA1, @@ -124,7 +123,7 @@ static const struct dpu_sspp_cfg sm8250_sspp[] = { .name = "sspp_10", .id = SSPP_DMA2, .base = 0x28000, .len = 0x1f8, .features = DMA_CURSOR_SDM845_MASK_SDMA, - .sblk = &sdm845_dma_sblk_2, + .sblk = &dpu_dma_sblk, .xin_id = 9, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA2, @@ -132,7 +131,7 @@ static const struct dpu_sspp_cfg sm8250_sspp[] = { .name = "sspp_11", .id = SSPP_DMA3, .base = 0x2a000, .len = 0x1f8, .features = DMA_CURSOR_SDM845_MASK_SDMA, - .sblk = &sdm845_dma_sblk_3, + .sblk = &dpu_dma_sblk, .xin_id = 13, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA3, @@ -337,8 +336,8 @@ static const struct dpu_wb_cfg sm8250_wb[] = { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, .features = WB_SM8250_MASK, - .format_list = wb2_formats, - .num_formats = ARRAY_SIZE(wb2_formats), + .format_list = wb2_formats_rgb_yuv, + .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, .vbif_idx = VBIF_RT, @@ -385,6 +384,7 @@ const struct dpu_mdss_cfg dpu_sm8250_cfg = { .mdss_ver = &sm8250_mdss_ver, .caps = &sm8250_dpu_caps, .mdp = &sm8250_mdp, + .cdm = &sc7280_cdm, .ctl_count = ARRAY_SIZE(sm8250_ctl), .ctl = sm8250_ctl, .sspp_count = ARRAY_SIZE(sm8250_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h index 9bfa15e4e6..7382ebb6e5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h @@ -10,7 +10,6 @@ static const struct dpu_caps sc7180_dpu_caps = { .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .max_mixer_blendstages = 0x9, - .qseed_type = DPU_SSPP_SCALER_QSEED4, .has_dim_layer = true, .has_idle_pc = true, .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH, @@ -52,8 +51,8 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1f8, - .features = VIG_SC7180_MASK, - .sblk = &sc7180_vig_sblk_0, + .features = VIG_SDM845_MASK, + .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 0, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG0, @@ -61,7 +60,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1f8, .features = DMA_SDM845_MASK, - .sblk = &sdm845_dma_sblk_0, + .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA0, @@ -69,7 +68,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x1f8, .features = DMA_CURSOR_SDM845_MASK, - .sblk = &sdm845_dma_sblk_1, + .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA1, @@ -77,7 +76,7 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = { .name = "sspp_10", .id = SSPP_DMA2, .base = 0x28000, .len = 0x1f8, .features = DMA_CURSOR_SDM845_MASK, - .sblk = &sdm845_dma_sblk_2, + .sblk = &dpu_dma_sblk, .xin_id = 9, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA2, @@ -158,8 +157,8 @@ static const struct dpu_wb_cfg sc7180_wb[] = { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, .features = WB_SM8250_MASK, - .format_list = wb2_formats, - .num_formats = ARRAY_SIZE(wb2_formats), + .format_list = wb2_formats_rgb, + .num_formats = ARRAY_SIZE(wb2_formats_rgb), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, .vbif_idx = VBIF_RT, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h index 57ce14c18d..43f64a005f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h @@ -10,7 +10,6 @@ static const struct dpu_caps sm6115_dpu_caps = { .max_mixer_width = DEFAULT_DPU_LINE_WIDTH, .max_mixer_blendstages = 0x4, - .qseed_type = DPU_SSPP_SCALER_QSEED4, .has_dim_layer = true, .has_idle_pc = true, .max_linewidth = 2160, @@ -39,8 +38,8 @@ static const struct dpu_sspp_cfg sm6115_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1f8, - .features = VIG_SC7180_MASK, - .sblk = &sm6115_vig_sblk_0, + .features = VIG_SDM845_MASK, + .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 0, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG0, @@ -48,7 +47,7 @@ static const struct dpu_sspp_cfg sm6115_sspp[] = { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1f8, .features = DMA_SDM845_MASK, - .sblk = &sdm845_dma_sblk_0, + .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA0, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h index 62db84bd15..e17a30be75 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h @@ -11,7 +11,6 @@ static const struct dpu_caps sm6350_dpu_caps = { .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .max_mixer_blendstages = 0x7, - .qseed_type = DPU_SSPP_SCALER_QSEED4, .has_src_split = true, .has_dim_layer = true, .has_idle_pc = true, @@ -59,8 +58,8 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1f8, - .features = VIG_SC7180_MASK, - .sblk = &sc7180_vig_sblk_0, + .features = VIG_SDM845_MASK, + .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 0, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG0, @@ -68,7 +67,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1f8, .features = DMA_SDM845_MASK, - .sblk = &sdm845_dma_sblk_0, + .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA0, @@ -76,7 +75,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x1f8, .features = DMA_CURSOR_SDM845_MASK, - .sblk = &sdm845_dma_sblk_1, + .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA1, @@ -84,7 +83,7 @@ static const struct dpu_sspp_cfg sm6350_sspp[] = { .name = "sspp_10", .id = SSPP_DMA2, .base = 0x28000, .len = 0x1f8, .features = DMA_CURSOR_SDM845_MASK, - .sblk = &sdm845_dma_sblk_2, + .sblk = &dpu_dma_sblk, .xin_id = 9, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA2, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h index fb36fba517..3cbb2fe8ab 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h @@ -39,7 +39,7 @@ static const struct dpu_sspp_cfg qcm2290_sspp[] = { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1f8, .features = VIG_QCM2290_MASK, - .sblk = &qcm2290_vig_sblk_0, + .sblk = &dpu_vig_sblk_noscale, .xin_id = 0, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG0, @@ -47,7 +47,7 @@ static const struct dpu_sspp_cfg qcm2290_sspp[] = { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1f8, .features = DMA_SDM845_MASK, - .sblk = &qcm2290_dma_sblk_0, + .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA0, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h index 5a3aad364c..a06c8634d2 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_9_sm6375.h @@ -11,7 +11,6 @@ static const struct dpu_caps sm6375_dpu_caps = { .max_mixer_width = DEFAULT_DPU_LINE_WIDTH, .max_mixer_blendstages = 0x4, - .qseed_type = DPU_SSPP_SCALER_QSEED4, .has_dim_layer = true, .has_idle_pc = true, .max_linewidth = 2160, @@ -40,8 +39,8 @@ static const struct dpu_sspp_cfg sm6375_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1f8, - .features = VIG_SC7180_MASK, - .sblk = &sm6115_vig_sblk_0, + .features = VIG_SDM845_MASK, + .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 0, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG0, @@ -49,7 +48,7 @@ static const struct dpu_sspp_cfg sm6375_sspp[] = { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1f8, .features = DMA_SDM845_MASK, - .sblk = &sdm845_dma_sblk_0, + .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA0, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h index 022b0408c2..aced16e350 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h @@ -10,7 +10,6 @@ static const struct dpu_caps sm8350_dpu_caps = { .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .max_mixer_blendstages = 0xb, - .qseed_type = DPU_SSPP_SCALER_QSEED4, .has_src_split = true, .has_dim_layer = true, .has_idle_pc = true, @@ -75,64 +74,64 @@ static const struct dpu_sspp_cfg sm8350_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1f8, - .features = VIG_SC7180_MASK, - .sblk = &sm8250_vig_sblk_0, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 0, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG0, }, { .name = "sspp_1", .id = SSPP_VIG1, .base = 0x6000, .len = 0x1f8, - .features = VIG_SC7180_MASK, - .sblk = &sm8250_vig_sblk_1, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 4, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG1, }, { .name = "sspp_2", .id = SSPP_VIG2, .base = 0x8000, .len = 0x1f8, - .features = VIG_SC7180_MASK, - .sblk = &sm8250_vig_sblk_2, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 8, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG2, }, { .name = "sspp_3", .id = SSPP_VIG3, .base = 0xa000, .len = 0x1f8, - .features = VIG_SC7180_MASK, - .sblk = &sm8250_vig_sblk_3, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 12, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG3, }, { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1f8, - .features = DMA_SDM845_MASK, - .sblk = &sdm845_dma_sblk_0, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA0, }, { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x1f8, - .features = DMA_SDM845_MASK, - .sblk = &sdm845_dma_sblk_1, + .features = DMA_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA1, }, { .name = "sspp_10", .id = SSPP_DMA2, .base = 0x28000, .len = 0x1f8, - .features = DMA_CURSOR_SDM845_MASK, - .sblk = &sdm845_dma_sblk_2, + .features = DMA_CURSOR_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, .xin_id = 9, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA2, }, { .name = "sspp_11", .id = SSPP_DMA3, .base = 0x2a000, .len = 0x1f8, - .features = DMA_CURSOR_SDM845_MASK, - .sblk = &sdm845_dma_sblk_3, + .features = DMA_CURSOR_SDM845_MASK_SDMA, + .sblk = &dpu_dma_sblk, .xin_id = 13, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA3, @@ -304,8 +303,8 @@ static const struct dpu_wb_cfg sm8350_wb[] = { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, .features = WB_SM8250_MASK, - .format_list = wb2_formats, - .num_formats = ARRAY_SIZE(wb2_formats), + .format_list = wb2_formats_rgb, + .num_formats = ARRAY_SIZE(wb2_formats_rgb), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, .vbif_idx = VBIF_RT, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h index b9c296e51e..2f153e0b5c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h @@ -10,7 +10,6 @@ static const struct dpu_caps sc7280_dpu_caps = { .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .max_mixer_blendstages = 0x7, - .qseed_type = DPU_SSPP_SCALER_QSEED4, .has_dim_layer = true, .has_idle_pc = true, .max_linewidth = 2400, @@ -58,7 +57,7 @@ static const struct dpu_sspp_cfg sc7280_sspp[] = { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1f8, .features = VIG_SC7280_MASK_SDMA, - .sblk = &sc7280_vig_sblk_0, + .sblk = &dpu_vig_sblk_qseed3_3_0_rot_v2, .xin_id = 0, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG0, @@ -66,7 +65,7 @@ static const struct dpu_sspp_cfg sc7280_sspp[] = { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1f8, .features = DMA_SDM845_MASK_SDMA, - .sblk = &sdm845_dma_sblk_0, + .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA0, @@ -74,7 +73,7 @@ static const struct dpu_sspp_cfg sc7280_sspp[] = { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x1f8, .features = DMA_CURSOR_SDM845_MASK_SDMA, - .sblk = &sdm845_dma_sblk_1, + .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA1, @@ -82,7 +81,7 @@ static const struct dpu_sspp_cfg sc7280_sspp[] = { .name = "sspp_10", .id = SSPP_DMA2, .base = 0x28000, .len = 0x1f8, .features = DMA_CURSOR_SDM845_MASK_SDMA, - .sblk = &sdm845_dma_sblk_2, + .sblk = &dpu_dma_sblk, .xin_id = 9, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA2, @@ -170,8 +169,8 @@ static const struct dpu_wb_cfg sc7280_wb[] = { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, .features = WB_SM8250_MASK, - .format_list = wb2_formats, - .num_formats = ARRAY_SIZE(wb2_formats), + .format_list = wb2_formats_rgb_yuv, + .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, .vbif_idx = VBIF_RT, @@ -249,6 +248,7 @@ const struct dpu_mdss_cfg dpu_sc7280_cfg = { .mdss_ver = &sc7280_mdss_ver, .caps = &sc7280_dpu_caps, .mdp = &sc7280_mdp, + .cdm = &sc7280_cdm, .ctl_count = ARRAY_SIZE(sc7280_ctl), .ctl = sc7280_ctl, .sspp_count = ARRAY_SIZE(sc7280_sspp), diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h index 4c0528794e..0d143e390e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h @@ -10,7 +10,6 @@ static const struct dpu_caps sc8280xp_dpu_caps = { .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .max_mixer_blendstages = 11, - .qseed_type = DPU_SSPP_SCALER_QSEED4, .has_src_split = true, .has_dim_layer = true, .has_idle_pc = true, @@ -75,32 +74,32 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x2ac, - .features = VIG_SC7180_MASK, - .sblk = &sm8250_vig_sblk_0, + .features = VIG_SDM845_MASK, + .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 0, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG0, }, { .name = "sspp_1", .id = SSPP_VIG1, .base = 0x6000, .len = 0x2ac, - .features = VIG_SC7180_MASK, - .sblk = &sm8250_vig_sblk_1, + .features = VIG_SDM845_MASK, + .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 4, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG1, }, { .name = "sspp_2", .id = SSPP_VIG2, .base = 0x8000, .len = 0x2ac, - .features = VIG_SC7180_MASK, - .sblk = &sm8250_vig_sblk_2, + .features = VIG_SDM845_MASK, + .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 8, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG2, }, { .name = "sspp_3", .id = SSPP_VIG3, .base = 0xa000, .len = 0x2ac, - .features = VIG_SC7180_MASK, - .sblk = &sm8250_vig_sblk_3, + .features = VIG_SDM845_MASK, + .sblk = &dpu_vig_sblk_qseed3_3_0, .xin_id = 12, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG3, @@ -108,7 +107,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x2ac, .features = DMA_SDM845_MASK, - .sblk = &sdm845_dma_sblk_0, + .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA0, @@ -116,7 +115,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x2ac, .features = DMA_SDM845_MASK, - .sblk = &sdm845_dma_sblk_1, + .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA1, @@ -124,7 +123,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = { .name = "sspp_10", .id = SSPP_DMA2, .base = 0x28000, .len = 0x2ac, .features = DMA_CURSOR_SDM845_MASK, - .sblk = &sdm845_dma_sblk_2, + .sblk = &dpu_dma_sblk, .xin_id = 9, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA2, @@ -132,7 +131,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = { .name = "sspp_11", .id = SSPP_DMA3, .base = 0x2a000, .len = 0x2ac, .features = DMA_CURSOR_SDM845_MASK, - .sblk = &sdm845_dma_sblk_3, + .sblk = &dpu_dma_sblk, .xin_id = 13, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA3, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h index 7adc42257e..a1779c5597 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h @@ -10,7 +10,6 @@ static const struct dpu_caps sm8450_dpu_caps = { .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .max_mixer_blendstages = 0xb, - .qseed_type = DPU_SSPP_SCALER_QSEED4, .has_src_split = true, .has_dim_layer = true, .has_idle_pc = true, @@ -76,32 +75,32 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x32c, - .features = VIG_SC7180_MASK_SDMA, - .sblk = &sm8450_vig_sblk_0, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_1, .xin_id = 0, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG0, }, { .name = "sspp_1", .id = SSPP_VIG1, .base = 0x6000, .len = 0x32c, - .features = VIG_SC7180_MASK_SDMA, - .sblk = &sm8450_vig_sblk_1, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_1, .xin_id = 4, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG1, }, { .name = "sspp_2", .id = SSPP_VIG2, .base = 0x8000, .len = 0x32c, - .features = VIG_SC7180_MASK_SDMA, - .sblk = &sm8450_vig_sblk_2, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_1, .xin_id = 8, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG2, }, { .name = "sspp_3", .id = SSPP_VIG3, .base = 0xa000, .len = 0x32c, - .features = VIG_SC7180_MASK_SDMA, - .sblk = &sm8450_vig_sblk_3, + .features = VIG_SDM845_MASK_SDMA, + .sblk = &dpu_vig_sblk_qseed3_3_1, .xin_id = 12, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG3, @@ -109,7 +108,7 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x32c, .features = DMA_SDM845_MASK_SDMA, - .sblk = &sdm845_dma_sblk_0, + .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA0, @@ -117,7 +116,7 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x32c, .features = DMA_SDM845_MASK_SDMA, - .sblk = &sdm845_dma_sblk_1, + .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA1, @@ -125,7 +124,7 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = { .name = "sspp_10", .id = SSPP_DMA2, .base = 0x28000, .len = 0x32c, .features = DMA_CURSOR_SDM845_MASK_SDMA, - .sblk = &sdm845_dma_sblk_2, + .sblk = &dpu_dma_sblk, .xin_id = 9, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA2, @@ -133,7 +132,7 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = { .name = "sspp_11", .id = SSPP_DMA3, .base = 0x2a000, .len = 0x32c, .features = DMA_CURSOR_SDM845_MASK_SDMA, - .sblk = &sdm845_dma_sblk_3, + .sblk = &dpu_dma_sblk, .xin_id = 13, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA3, @@ -322,8 +321,8 @@ static const struct dpu_wb_cfg sm8450_wb[] = { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, .features = WB_SM8250_MASK, - .format_list = wb2_formats, - .num_formats = ARRAY_SIZE(wb2_formats), + .format_list = wb2_formats_rgb, + .num_formats = ARRAY_SIZE(wb2_formats_rgb), .clk_ctrl = DPU_CLK_CTRL_WB2, .xin_id = 6, .vbif_idx = VBIF_RT, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h index 69b80af656..ad48defa15 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h @@ -10,7 +10,6 @@ static const struct dpu_caps sm8550_dpu_caps = { .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .max_mixer_blendstages = 0xb, - .qseed_type = DPU_SSPP_SCALER_QSEED4, .has_src_split = true, .has_dim_layer = true, .has_idle_pc = true, @@ -67,71 +66,71 @@ static const struct dpu_sspp_cfg sm8550_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x344, - .features = VIG_SC7180_MASK, - .sblk = &sm8550_vig_sblk_0, + .features = VIG_SDM845_MASK, + .sblk = &dpu_vig_sblk_qseed3_3_2, .xin_id = 0, .type = SSPP_TYPE_VIG, }, { .name = "sspp_1", .id = SSPP_VIG1, .base = 0x6000, .len = 0x344, - .features = VIG_SC7180_MASK, - .sblk = &sm8550_vig_sblk_1, + .features = VIG_SDM845_MASK, + .sblk = &dpu_vig_sblk_qseed3_3_2, .xin_id = 4, .type = SSPP_TYPE_VIG, }, { .name = "sspp_2", .id = SSPP_VIG2, .base = 0x8000, .len = 0x344, - .features = VIG_SC7180_MASK, - .sblk = &sm8550_vig_sblk_2, + .features = VIG_SDM845_MASK, + .sblk = &dpu_vig_sblk_qseed3_3_2, .xin_id = 8, .type = SSPP_TYPE_VIG, }, { .name = "sspp_3", .id = SSPP_VIG3, .base = 0xa000, .len = 0x344, - .features = VIG_SC7180_MASK, - .sblk = &sm8550_vig_sblk_3, + .features = VIG_SDM845_MASK, + .sblk = &dpu_vig_sblk_qseed3_3_2, .xin_id = 12, .type = SSPP_TYPE_VIG, }, { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x344, .features = DMA_SDM845_MASK, - .sblk = &sdm845_dma_sblk_0, + .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, }, { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x344, .features = DMA_SDM845_MASK, - .sblk = &sdm845_dma_sblk_1, + .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, }, { .name = "sspp_10", .id = SSPP_DMA2, .base = 0x28000, .len = 0x344, .features = DMA_SDM845_MASK, - .sblk = &sdm845_dma_sblk_2, + .sblk = &dpu_dma_sblk, .xin_id = 9, .type = SSPP_TYPE_DMA, }, { .name = "sspp_11", .id = SSPP_DMA3, .base = 0x2a000, .len = 0x344, .features = DMA_SDM845_MASK, - .sblk = &sdm845_dma_sblk_3, + .sblk = &dpu_dma_sblk, .xin_id = 13, .type = SSPP_TYPE_DMA, }, { .name = "sspp_12", .id = SSPP_DMA4, .base = 0x2c000, .len = 0x344, .features = DMA_CURSOR_SDM845_MASK, - .sblk = &sm8550_dma_sblk_4, + .sblk = &dpu_dma_sblk, .xin_id = 14, .type = SSPP_TYPE_DMA, }, { .name = "sspp_13", .id = SSPP_DMA5, .base = 0x2e000, .len = 0x344, .features = DMA_CURSOR_SDM845_MASK, - .sblk = &sm8550_dma_sblk_5, + .sblk = &dpu_dma_sblk, .xin_id = 15, .type = SSPP_TYPE_DMA, }, @@ -316,8 +315,8 @@ static const struct dpu_wb_cfg sm8550_wb[] = { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, .features = WB_SM8250_MASK, - .format_list = wb2_formats, - .num_formats = ARRAY_SIZE(wb2_formats), + .format_list = wb2_formats_rgb, + .num_formats = ARRAY_SIZE(wb2_formats_rgb), .xin_id = 6, .vbif_idx = VBIF_RT, .maxlinewidth = 4096, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c index ef871239ad..68fae048a9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c @@ -459,15 +459,15 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent) &perf->core_clk_rate); debugfs_create_u32("enable_bw_release", 0600, entry, (u32 *)&perf->enable_bw_release); - debugfs_create_u32("threshold_low", 0600, entry, + debugfs_create_u32("threshold_low", 0400, entry, (u32 *)&perf->perf_cfg->max_bw_low); - debugfs_create_u32("threshold_high", 0600, entry, + debugfs_create_u32("threshold_high", 0400, entry, (u32 *)&perf->perf_cfg->max_bw_high); - debugfs_create_u32("min_core_ib", 0600, entry, + debugfs_create_u32("min_core_ib", 0400, entry, (u32 *)&perf->perf_cfg->min_core_ib); - debugfs_create_u32("min_llcc_ib", 0600, entry, + debugfs_create_u32("min_llcc_ib", 0400, entry, (u32 *)&perf->perf_cfg->min_llcc_ib); - debugfs_create_u32("min_dram_ib", 0600, entry, + debugfs_create_u32("min_dram_ib", 0400, entry, (u32 *)&perf->perf_cfg->min_dram_ib); debugfs_create_file("perf_mode", 0600, entry, (u32 *)perf, &dpu_core_perf_mode_fops); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index db501ce1d8..88c2e51ab1 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -51,17 +51,6 @@ static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) return to_dpu_kms(priv->kms); } -static void dpu_crtc_destroy(struct drm_crtc *crtc) -{ - struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); - - if (!crtc) - return; - - drm_crtc_cleanup(crtc); - kfree(dpu_crtc); -} - static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -1435,7 +1424,6 @@ static int dpu_crtc_late_register(struct drm_crtc *crtc) static const struct drm_crtc_funcs dpu_crtc_funcs = { .set_config = drm_atomic_helper_set_config, - .destroy = dpu_crtc_destroy, .page_flip = drm_atomic_helper_page_flip, .reset = dpu_crtc_reset, .atomic_duplicate_state = dpu_crtc_duplicate_state, @@ -1469,9 +1457,13 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, struct dpu_crtc *dpu_crtc; int i, ret; - dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL); - if (!dpu_crtc) - return ERR_PTR(-ENOMEM); + dpu_crtc = drmm_crtc_alloc_with_planes(dev, struct dpu_crtc, base, + plane, cursor, + &dpu_crtc_funcs, + NULL); + + if (IS_ERR(dpu_crtc)) + return ERR_CAST(dpu_crtc); crtc = &dpu_crtc->base; crtc->dev = dev; @@ -1491,9 +1483,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, dpu_crtc_frame_event_work); } - drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs, - NULL); - drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs); if (dpu_kms->catalog->dspp_count) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index b26d23b2ab..aca689314c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "msm_drv.h" #include "dpu_kms.h" @@ -26,6 +27,7 @@ #include "dpu_hw_dspp.h" #include "dpu_hw_dsc.h" #include "dpu_hw_merge3d.h" +#include "dpu_hw_cdm.h" #include "dpu_formats.h" #include "dpu_encoder_phys.h" #include "dpu_crtc.h" @@ -142,10 +144,6 @@ enum dpu_enc_rc_states { * to track crtc in the disable() hook which is called * _after_ encoder_mask is cleared. * @connector: If a mode is set, cached pointer to the active connector - * @crtc_kickoff_cb: Callback into CRTC that will flush & start - * all CTL paths - * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb - * @debugfs_root: Debug file system root file node * @enc_lock: Lock around physical encoder * create/destroy/enable/disable * @frame_busy_mask: Bitmask tracking which phys_enc we are still @@ -154,6 +152,8 @@ enum dpu_enc_rc_states { * @crtc_frame_event_cb: callback handler for frame event * @crtc_frame_event_cb_data: callback handler private data * @frame_done_timeout_ms: frame done timeout in ms + * @frame_done_timeout_cnt: atomic counter tracking the number of frame + * done timeouts * @frame_done_timer: watchdog timer for frame done event * @disp_info: local copy of msm_display_info struct * @idle_pc_supported: indicate if idle power collaps is supported @@ -187,13 +187,13 @@ struct dpu_encoder_virt { struct drm_crtc *crtc; struct drm_connector *connector; - struct dentry *debugfs_root; struct mutex enc_lock; DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL); void (*crtc_frame_event_cb)(void *, u32 event); void *crtc_frame_event_cb_data; atomic_t frame_done_timeout_ms; + atomic_t frame_done_timeout_cnt; struct timer_list frame_done_timer; struct msm_display_info disp_info; @@ -449,41 +449,6 @@ int dpu_encoder_get_linecount(struct drm_encoder *drm_enc) return linecount; } -static void dpu_encoder_destroy(struct drm_encoder *drm_enc) -{ - struct dpu_encoder_virt *dpu_enc = NULL; - int i = 0; - - if (!drm_enc) { - DPU_ERROR("invalid encoder\n"); - return; - } - - dpu_enc = to_dpu_encoder_virt(drm_enc); - DPU_DEBUG_ENC(dpu_enc, "\n"); - - mutex_lock(&dpu_enc->enc_lock); - - for (i = 0; i < dpu_enc->num_phys_encs; i++) { - struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; - - if (phys->ops.destroy) { - phys->ops.destroy(phys); - --dpu_enc->num_phys_encs; - dpu_enc->phys_encs[i] = NULL; - } - } - - if (dpu_enc->num_phys_encs) - DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n", - dpu_enc->num_phys_encs); - dpu_enc->num_phys_encs = 0; - mutex_unlock(&dpu_enc->enc_lock); - - drm_encoder_cleanup(drm_enc); - mutex_destroy(&dpu_enc->enc_lock); -} - void dpu_encoder_helper_split_config( struct dpu_encoder_phys *phys_enc, enum dpu_intf interface) @@ -624,6 +589,7 @@ static int dpu_encoder_virt_atomic_check( struct drm_display_mode *adj_mode; struct msm_display_topology topology; struct dpu_global_state *global_state; + struct drm_framebuffer *fb; struct drm_dsc_config *dsc; int i = 0; int ret = 0; @@ -664,6 +630,22 @@ static int dpu_encoder_virt_atomic_check( topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc); + /* + * Use CDM only for writeback at the moment as other interfaces cannot handle it. + * if writeback itself cannot handle cdm for some reason it will fail in its atomic_check() + * earlier. + */ + if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) { + fb = conn_state->writeback_job->fb; + + if (fb && DPU_FORMAT_IS_YUV(to_dpu_format(msm_framebuffer_format(fb)))) + topology.needs_cdm = true; + if (topology.needs_cdm && !dpu_enc->cur_master->hw_cdm) + crtc_state->mode_changed = true; + else if (!topology.needs_cdm && dpu_enc->cur_master->hw_cdm) + crtc_state->mode_changed = true; + } + /* * Release and Allocate resources on every modeset * Dont allocate when active is false. @@ -1104,6 +1086,15 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, dpu_enc->dsc_mask = dsc_mask; + if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) { + struct dpu_hw_blk *hw_cdm = NULL; + + dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_CDM, + &hw_cdm, 1); + dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL; + } + cstate = to_dpu_crtc_state(crtc_state); for (i = 0; i < num_lm; i++) { @@ -1214,6 +1205,8 @@ static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc, dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc); + atomic_set(&dpu_enc->frame_done_timeout_cnt, 0); + if (disp_info->intf_type == INTF_DP) dpu_enc->wide_bus_en = msm_dp_wide_bus_available(priv->dp[index]); else if (disp_info->intf_type == INTF_DSI) @@ -2092,6 +2085,15 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) phys_enc->hw_pp->merge_3d->idx); } + if (phys_enc->hw_cdm) { + if (phys_enc->hw_cdm->ops.bind_pingpong_blk && phys_enc->hw_pp) + phys_enc->hw_cdm->ops.bind_pingpong_blk(phys_enc->hw_cdm, + PINGPONG_NONE); + if (phys_enc->hw_ctl->ops.update_pending_flush_cdm) + phys_enc->hw_ctl->ops.update_pending_flush_cdm(phys_enc->hw_ctl, + phys_enc->hw_cdm->idx); + } + if (dpu_enc->dsc) { dpu_encoder_unprep_dsc(dpu_enc); dpu_enc->dsc = NULL; @@ -2120,18 +2122,20 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) #ifdef CONFIG_DEBUG_FS static int _dpu_encoder_status_show(struct seq_file *s, void *data) { - struct dpu_encoder_virt *dpu_enc = s->private; + struct drm_encoder *drm_enc = s->private; + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); int i; mutex_lock(&dpu_enc->enc_lock); for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; - seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d ", + seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d frame_done_cnt:%d", phys->hw_intf ? phys->hw_intf->idx - INTF_0 : -1, phys->hw_wb ? phys->hw_wb->idx - WB_0 : -1, atomic_read(&phys->vsync_cnt), - atomic_read(&phys->underrun_cnt)); + atomic_read(&phys->underrun_cnt), + atomic_read(&dpu_enc->frame_done_timeout_cnt)); seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode)); } @@ -2142,49 +2146,18 @@ static int _dpu_encoder_status_show(struct seq_file *s, void *data) DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status); -static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) +static void dpu_encoder_debugfs_init(struct drm_encoder *drm_enc, struct dentry *root) { - struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); - - char name[12]; - - if (!drm_enc->dev) { - DPU_ERROR("invalid encoder or kms\n"); - return -EINVAL; - } - - snprintf(name, sizeof(name), "encoder%u", drm_enc->base.id); - - /* create overall sub-directory for the encoder */ - dpu_enc->debugfs_root = debugfs_create_dir(name, - drm_enc->dev->primary->debugfs_root); - /* don't error check these */ debugfs_create_file("status", 0600, - dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops); - - return 0; + root, drm_enc, &_dpu_encoder_status_fops); } #else -static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) -{ - return 0; -} +#define dpu_encoder_debugfs_init NULL #endif -static int dpu_encoder_late_register(struct drm_encoder *encoder) -{ - return _dpu_encoder_init_debugfs(encoder); -} - -static void dpu_encoder_early_unregister(struct drm_encoder *encoder) -{ - struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder); - - debugfs_remove_recursive(dpu_enc->debugfs_root); -} - static int dpu_encoder_virt_add_phys_encs( + struct drm_device *dev, struct msm_display_info *disp_info, struct dpu_encoder_virt *dpu_enc, struct dpu_enc_phys_init_params *params) @@ -2206,7 +2179,7 @@ static int dpu_encoder_virt_add_phys_encs( if (disp_info->intf_type == INTF_WB) { - enc = dpu_encoder_phys_wb_init(params); + enc = dpu_encoder_phys_wb_init(dev, params); if (IS_ERR(enc)) { DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n", @@ -2217,7 +2190,7 @@ static int dpu_encoder_virt_add_phys_encs( dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; ++dpu_enc->num_phys_encs; } else if (disp_info->is_cmd_mode) { - enc = dpu_encoder_phys_cmd_init(params); + enc = dpu_encoder_phys_cmd_init(dev, params); if (IS_ERR(enc)) { DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n", @@ -2228,7 +2201,7 @@ static int dpu_encoder_virt_add_phys_encs( dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; ++dpu_enc->num_phys_encs; } else { - enc = dpu_encoder_phys_vid_init(params); + enc = dpu_encoder_phys_vid_init(dev, params); if (IS_ERR(enc)) { DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n", @@ -2317,7 +2290,7 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, break; } - ret = dpu_encoder_virt_add_phys_encs(disp_info, + ret = dpu_encoder_virt_add_phys_encs(dpu_kms->dev, disp_info, dpu_enc, &phys_params); if (ret) { DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n"); @@ -2353,6 +2326,9 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t) DPU_ERROR_ENC_RATELIMITED(dpu_enc, "frame done timeout\n"); + if (atomic_inc_return(&dpu_enc->frame_done_timeout_cnt) == 1) + msm_disp_snapshot_state(drm_enc->dev); + event = DPU_ENCODER_FRAME_EVENT_ERROR; trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event); dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event); @@ -2366,9 +2342,7 @@ static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = { }; static const struct drm_encoder_funcs dpu_encoder_funcs = { - .destroy = dpu_encoder_destroy, - .late_register = dpu_encoder_late_register, - .early_unregister = dpu_encoder_early_unregister, + .debugfs_init = dpu_encoder_debugfs_init, }; struct drm_encoder *dpu_encoder_init(struct drm_device *dev, @@ -2377,20 +2351,13 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev, { struct msm_drm_private *priv = dev->dev_private; struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); - struct drm_encoder *drm_enc = NULL; - struct dpu_encoder_virt *dpu_enc = NULL; - int ret = 0; - - dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL); - if (!dpu_enc) - return ERR_PTR(-ENOMEM); + struct dpu_encoder_virt *dpu_enc; + int ret; - ret = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs, - drm_enc_mode, NULL); - if (ret) { - devm_kfree(dev->dev, dpu_enc); - return ERR_PTR(ret); - } + dpu_enc = drmm_encoder_alloc(dev, struct dpu_encoder_virt, base, + &dpu_encoder_funcs, drm_enc_mode, NULL); + if (IS_ERR(dpu_enc)) + return ERR_CAST(dpu_enc); drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs); @@ -2400,10 +2367,13 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev, mutex_init(&dpu_enc->rc_lock); ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info); - if (ret) - goto fail; + if (ret) { + DPU_ERROR("failed to setup encoder\n"); + return ERR_PTR(-ENOMEM); + } atomic_set(&dpu_enc->frame_done_timeout_ms, 0); + atomic_set(&dpu_enc->frame_done_timeout_cnt, 0); timer_setup(&dpu_enc->frame_done_timer, dpu_encoder_frame_done_timeout, 0); @@ -2416,13 +2386,6 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev, DPU_DEBUG_ENC(dpu_enc, "created\n"); return &dpu_enc->base; - -fail: - DPU_ERROR("failed to create encoder\n"); - if (drm_enc) - dpu_encoder_destroy(drm_enc); - - return ERR_PTR(ret); } int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc, @@ -2449,9 +2412,6 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc, case MSM_ENC_TX_COMPLETE: fn_wait = phys->ops.wait_for_tx_complete; break; - case MSM_ENC_VBLANK: - fn_wait = phys->ops.wait_for_vblank; - break; default: DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n", event); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h index 96bda57b69..993f263433 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h @@ -14,8 +14,10 @@ #include "dpu_hw_intf.h" #include "dpu_hw_wb.h" #include "dpu_hw_pingpong.h" +#include "dpu_hw_cdm.h" #include "dpu_hw_ctl.h" #include "dpu_hw_top.h" +#include "dpu_hw_util.h" #include "dpu_encoder.h" #include "dpu_crtc.h" @@ -72,7 +74,6 @@ struct dpu_encoder_phys; * @enable: DRM Call. Enable a DRM mode. * @disable: DRM Call. Disable mode. * @atomic_check: DRM Call. Atomic check new DRM state. - * @destroy: DRM Call. Destroy and release resources. * @control_vblank_irq Register/Deregister for VBLANK IRQ * @wait_for_commit_done: Wait for hardware to have flushed the * current pending frames to hardware @@ -102,11 +103,9 @@ struct dpu_encoder_phys_ops { int (*atomic_check)(struct dpu_encoder_phys *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state); - void (*destroy)(struct dpu_encoder_phys *encoder); int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable); int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc); int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc); - int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc); void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc); void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc); void (*trigger_start)(struct dpu_encoder_phys *phys_enc); @@ -153,6 +152,7 @@ enum dpu_intr_idx { * @hw_pp: Hardware interface to the ping pong registers * @hw_intf: Hardware interface to the intf registers * @hw_wb: Hardware interface to the wb registers + * @hw_cdm: Hardware interface to the CDM registers * @dpu_kms: Pointer to the dpu_kms top level * @cached_mode: DRM mode cached at mode_set time, acted on in enable * @vblank_ctl_lock: Vblank ctl mutex lock to protect vblank_refcount @@ -182,6 +182,7 @@ struct dpu_encoder_phys { struct dpu_hw_pingpong *hw_pp; struct dpu_hw_intf *hw_intf; struct dpu_hw_wb *hw_wb; + struct dpu_hw_cdm *hw_cdm; struct dpu_kms *dpu_kms; struct drm_display_mode cached_mode; struct mutex vblank_ctl_lock; @@ -212,6 +213,7 @@ static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys) * @wbirq_refcount: Reference count of writeback interrupt * @wb_done_timeout_cnt: number of wb done irq timeout errors * @wb_cfg: writeback block config to store fb related details + * @cdm_cfg: cdm block config needed to store writeback block's CDM configuration * @wb_conn: backpointer to writeback connector * @wb_job: backpointer to current writeback job * @dest: dpu buffer layout for current writeback output buffer @@ -221,6 +223,7 @@ struct dpu_encoder_phys_wb { atomic_t wbirq_refcount; int wb_done_timeout_cnt; struct dpu_hw_wb_cfg wb_cfg; + struct dpu_hw_cdm_cfg cdm_cfg; struct drm_writeback_connector *wb_conn; struct drm_writeback_job *wb_job; struct dpu_hw_fmt_layout dest; @@ -283,22 +286,24 @@ struct dpu_encoder_wait_info { * @p: Pointer to init params structure * Return: Error code or newly allocated encoder */ -struct dpu_encoder_phys *dpu_encoder_phys_vid_init( +struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev, struct dpu_enc_phys_init_params *p); /** * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder + * @dev: Corresponding device for devres management * @p: Pointer to init params structure * Return: Error code or newly allocated encoder */ -struct dpu_encoder_phys *dpu_encoder_phys_cmd_init( +struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev, struct dpu_enc_phys_init_params *p); /** * dpu_encoder_phys_wb_init - initialize writeback encoder + * @dev: Corresponding device for devres management * @init: Pointer to init info structure with initialization params */ -struct dpu_encoder_phys *dpu_encoder_phys_wb_init( +struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev, struct dpu_enc_phys_init_params *p); /** diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index 2d788c5e26..a301e28331 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -13,6 +13,8 @@ #include "dpu_trace.h" #include "disp/msm_disp_snapshot.h" +#include + #define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \ (e) && (e)->base.parent ? \ (e)->base.parent->base.id : -1, \ @@ -567,14 +569,6 @@ static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc) phys_enc->enable_state = DPU_ENC_DISABLED; } -static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc) -{ - struct dpu_encoder_phys_cmd *cmd_enc = - to_dpu_encoder_phys_cmd(phys_enc); - - kfree(cmd_enc); -} - static void dpu_encoder_phys_cmd_prepare_for_kickoff( struct dpu_encoder_phys *phys_enc) { @@ -690,33 +684,6 @@ static int dpu_encoder_phys_cmd_wait_for_commit_done( return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc); } -static int dpu_encoder_phys_cmd_wait_for_vblank( - struct dpu_encoder_phys *phys_enc) -{ - int rc = 0; - struct dpu_encoder_phys_cmd *cmd_enc; - struct dpu_encoder_wait_info wait_info; - - cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); - - /* only required for master controller */ - if (!dpu_encoder_phys_cmd_is_master(phys_enc)) - return rc; - - wait_info.wq = &cmd_enc->pending_vblank_wq; - wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt; - wait_info.timeout_ms = KICKOFF_TIMEOUT_MS; - - atomic_inc(&cmd_enc->pending_vblank_cnt); - - rc = dpu_encoder_helper_wait_for_irq(phys_enc, - phys_enc->irq[INTR_IDX_RDPTR], - dpu_encoder_phys_cmd_te_rd_ptr_irq, - &wait_info); - - return rc; -} - static void dpu_encoder_phys_cmd_handle_post_kickoff( struct dpu_encoder_phys *phys_enc) { @@ -740,12 +707,10 @@ static void dpu_encoder_phys_cmd_init_ops( ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set; ops->enable = dpu_encoder_phys_cmd_enable; ops->disable = dpu_encoder_phys_cmd_disable; - ops->destroy = dpu_encoder_phys_cmd_destroy; ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq; ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done; ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff; ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete; - ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank; ops->trigger_start = dpu_encoder_phys_cmd_trigger_start; ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush; ops->irq_control = dpu_encoder_phys_cmd_irq_control; @@ -755,7 +720,7 @@ static void dpu_encoder_phys_cmd_init_ops( ops->get_line_count = dpu_encoder_phys_cmd_get_line_count; } -struct dpu_encoder_phys *dpu_encoder_phys_cmd_init( +struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev, struct dpu_enc_phys_init_params *p) { struct dpu_encoder_phys *phys_enc = NULL; @@ -763,7 +728,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init( DPU_DEBUG("intf\n"); - cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL); + cmd_enc = drmm_kzalloc(dev, sizeof(*cmd_enc), GFP_KERNEL); if (!cmd_enc) { DPU_ERROR("failed to allocate\n"); return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index f66e0f125a..f02411b062 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -11,6 +11,8 @@ #include "dpu_trace.h" #include "disp/msm_disp_snapshot.h" +#include + #define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \ (e) && (e)->parent ? \ (e)->parent->base.id : -1, \ @@ -450,13 +452,7 @@ skip_flush: phys_enc->enable_state = DPU_ENC_ENABLING; } -static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc) -{ - DPU_DEBUG_VIDENC(phys_enc, "\n"); - kfree(phys_enc); -} - -static int dpu_encoder_phys_vid_wait_for_vblank( +static int dpu_encoder_phys_vid_wait_for_tx_complete( struct dpu_encoder_phys *phys_enc) { struct dpu_encoder_wait_info wait_info; @@ -570,7 +566,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc) * scanout buffer) don't latch properly.. */ if (dpu_encoder_phys_vid_is_master(phys_enc)) { - ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc); + ret = dpu_encoder_phys_vid_wait_for_tx_complete(phys_enc); if (ret) { atomic_set(&phys_enc->pending_kickoff_cnt, 0); DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n", @@ -590,7 +586,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc) spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); dpu_encoder_phys_inc_pending(phys_enc); spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); - ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc); + ret = dpu_encoder_phys_vid_wait_for_tx_complete(phys_enc); if (ret) { atomic_set(&phys_enc->pending_kickoff_cnt, 0); DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n", @@ -693,11 +689,9 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops) ops->atomic_mode_set = dpu_encoder_phys_vid_atomic_mode_set; ops->enable = dpu_encoder_phys_vid_enable; ops->disable = dpu_encoder_phys_vid_disable; - ops->destroy = dpu_encoder_phys_vid_destroy; ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq; ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_commit_done; - ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank; - ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank; + ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_tx_complete; ops->irq_control = dpu_encoder_phys_vid_irq_control; ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff; ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff; @@ -706,7 +700,7 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops) ops->get_frame_count = dpu_encoder_phys_vid_get_frame_count; } -struct dpu_encoder_phys *dpu_encoder_phys_vid_init( +struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev, struct dpu_enc_phys_init_params *p) { struct dpu_encoder_phys *phys_enc = NULL; @@ -716,7 +710,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init( return ERR_PTR(-EINVAL); } - phys_enc = kzalloc(sizeof(*phys_enc), GFP_KERNEL); + phys_enc = drmm_kzalloc(dev, sizeof(*phys_enc), GFP_KERNEL); if (!phys_enc) { DPU_ERROR("failed to create encoder due to memory allocation error\n"); return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c index 0b6a761d68..4cd2d9e313 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c @@ -8,6 +8,7 @@ #include #include +#include #include "dpu_encoder_phys.h" #include "dpu_formats.h" @@ -206,13 +207,14 @@ static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc, } /** - * dpu_encoder_phys_wb_setup_cdp - setup chroma down prefetch block + * dpu_encoder_phys_wb_setup_ctl - setup wb pipeline for ctl path * @phys_enc:Pointer to physical encoder */ -static void dpu_encoder_phys_wb_setup_cdp(struct dpu_encoder_phys *phys_enc) +static void dpu_encoder_phys_wb_setup_ctl(struct dpu_encoder_phys *phys_enc) { struct dpu_hw_wb *hw_wb; struct dpu_hw_ctl *ctl; + struct dpu_hw_cdm *hw_cdm; if (!phys_enc) { DPU_ERROR("invalid encoder\n"); @@ -221,6 +223,7 @@ static void dpu_encoder_phys_wb_setup_cdp(struct dpu_encoder_phys *phys_enc) hw_wb = phys_enc->hw_wb; ctl = phys_enc->hw_ctl; + hw_cdm = phys_enc->hw_cdm; if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) && (phys_enc->hw_ctl && @@ -237,6 +240,9 @@ static void dpu_encoder_phys_wb_setup_cdp(struct dpu_encoder_phys *phys_enc) if (mode_3d && hw_pp && hw_pp->merge_3d) intf_cfg.merge_3d = hw_pp->merge_3d->idx; + if (hw_cdm) + intf_cfg.cdm = hw_cdm->idx; + if (phys_enc->hw_pp->merge_3d && phys_enc->hw_pp->merge_3d->ops.setup_3d_mode) phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, mode_3d); @@ -258,6 +264,96 @@ static void dpu_encoder_phys_wb_setup_cdp(struct dpu_encoder_phys *phys_enc) } } +/** + * dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block + * This API does not handle DPU_CHROMA_H1V2. + * @phys_enc:Pointer to physical encoder + */ +static void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc) +{ + struct dpu_hw_cdm *hw_cdm; + struct dpu_hw_cdm_cfg *cdm_cfg; + struct dpu_hw_pingpong *hw_pp; + struct dpu_encoder_phys_wb *wb_enc; + const struct msm_format *format; + const struct dpu_format *dpu_fmt; + struct drm_writeback_job *wb_job; + int ret; + + if (!phys_enc) + return; + + wb_enc = to_dpu_encoder_phys_wb(phys_enc); + cdm_cfg = &wb_enc->cdm_cfg; + hw_pp = phys_enc->hw_pp; + hw_cdm = phys_enc->hw_cdm; + wb_job = wb_enc->wb_job; + + format = msm_framebuffer_format(wb_enc->wb_job->fb); + dpu_fmt = dpu_get_dpu_format_ext(format->pixel_format, wb_job->fb->modifier); + + if (!hw_cdm) + return; + + if (!DPU_FORMAT_IS_YUV(dpu_fmt)) { + DPU_DEBUG("[enc:%d] cdm_disable fmt:%x\n", DRMID(phys_enc->parent), + dpu_fmt->base.pixel_format); + if (hw_cdm->ops.bind_pingpong_blk) + hw_cdm->ops.bind_pingpong_blk(hw_cdm, PINGPONG_NONE); + + return; + } + + memset(cdm_cfg, 0, sizeof(struct dpu_hw_cdm_cfg)); + + cdm_cfg->output_width = wb_job->fb->width; + cdm_cfg->output_height = wb_job->fb->height; + cdm_cfg->output_fmt = dpu_fmt; + cdm_cfg->output_type = CDM_CDWN_OUTPUT_WB; + cdm_cfg->output_bit_depth = DPU_FORMAT_IS_DX(dpu_fmt) ? + CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT; + cdm_cfg->csc_cfg = &dpu_csc10_rgb2yuv_601l; + + /* enable 10 bit logic */ + switch (cdm_cfg->output_fmt->chroma_sample) { + case DPU_CHROMA_RGB: + cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE; + cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; + break; + case DPU_CHROMA_H2V1: + cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE; + cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; + break; + case DPU_CHROMA_420: + cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE; + cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE; + break; + case DPU_CHROMA_H1V2: + default: + DPU_ERROR("[enc:%d] unsupported chroma sampling type\n", + DRMID(phys_enc->parent)); + cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE; + cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; + break; + } + + DPU_DEBUG("[enc:%d] cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n", + DRMID(phys_enc->parent), cdm_cfg->output_width, + cdm_cfg->output_height, cdm_cfg->output_fmt->base.pixel_format, + cdm_cfg->output_type, cdm_cfg->output_bit_depth, + cdm_cfg->h_cdwn_type, cdm_cfg->v_cdwn_type); + + if (hw_cdm->ops.enable) { + cdm_cfg->pp_id = hw_pp->idx; + ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg); + if (ret < 0) { + DPU_ERROR("[enc:%d] failed to enable CDM; ret:%d\n", + DRMID(phys_enc->parent), ret); + return; + } + } +} + /** * dpu_encoder_phys_wb_atomic_check - verify and fixup given atomic states * @phys_enc: Pointer to physical encoder @@ -307,7 +403,7 @@ static int dpu_encoder_phys_wb_atomic_check( return -EINVAL; } - return 0; + return drm_atomic_helper_check_wb_connector_state(conn_state->connector, conn_state->state); } @@ -320,6 +416,7 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc) struct dpu_hw_wb *hw_wb; struct dpu_hw_ctl *hw_ctl; struct dpu_hw_pingpong *hw_pp; + struct dpu_hw_cdm *hw_cdm; u32 pending_flush = 0; if (!phys_enc) @@ -328,6 +425,7 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc) hw_wb = phys_enc->hw_wb; hw_pp = phys_enc->hw_pp; hw_ctl = phys_enc->hw_ctl; + hw_cdm = phys_enc->hw_cdm; DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0); @@ -343,6 +441,9 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc) hw_ctl->ops.update_pending_flush_merge_3d(hw_ctl, hw_pp->merge_3d->idx); + if (hw_cdm && hw_ctl->ops.update_pending_flush_cdm) + hw_ctl->ops.update_pending_flush_cdm(hw_ctl, hw_cdm->idx); + if (hw_ctl->ops.get_pending_flush) pending_flush = hw_ctl->ops.get_pending_flush(hw_ctl); @@ -374,8 +475,9 @@ static void dpu_encoder_phys_wb_setup( dpu_encoder_phys_wb_setup_fb(phys_enc, fb); - dpu_encoder_phys_wb_setup_cdp(phys_enc); + dpu_encoder_helper_phys_setup_cdm(phys_enc); + dpu_encoder_phys_wb_setup_ctl(phys_enc); } /** @@ -580,20 +682,6 @@ static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc) phys_enc->enable_state = DPU_ENC_DISABLED; } -/** - * dpu_encoder_phys_wb_destroy - destroy writeback encoder - * @phys_enc: Pointer to physical encoder - */ -static void dpu_encoder_phys_wb_destroy(struct dpu_encoder_phys *phys_enc) -{ - if (!phys_enc) - return; - - DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0); - - kfree(phys_enc); -} - static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc, struct drm_writeback_job *job) { @@ -689,7 +777,6 @@ static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops) ops->atomic_mode_set = dpu_encoder_phys_wb_atomic_mode_set; ops->enable = dpu_encoder_phys_wb_enable; ops->disable = dpu_encoder_phys_wb_disable; - ops->destroy = dpu_encoder_phys_wb_destroy; ops->atomic_check = dpu_encoder_phys_wb_atomic_check; ops->wait_for_commit_done = dpu_encoder_phys_wb_wait_for_commit_done; ops->prepare_for_kickoff = dpu_encoder_phys_wb_prepare_for_kickoff; @@ -705,9 +792,10 @@ static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops) /** * dpu_encoder_phys_wb_init - initialize writeback encoder + * @dev: Corresponding device for devres management * @p: Pointer to init info structure with initialization params */ -struct dpu_encoder_phys *dpu_encoder_phys_wb_init( +struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev, struct dpu_enc_phys_init_params *p) { struct dpu_encoder_phys *phys_enc = NULL; @@ -720,7 +808,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_wb_init( return ERR_PTR(-EINVAL); } - wb_enc = kzalloc(sizeof(*wb_enc), GFP_KERNEL); + wb_enc = drmm_kzalloc(dev, sizeof(*wb_enc), GFP_KERNEL); if (!wb_enc) { DPU_ERROR("failed to allocate wb phys_enc enc\n"); return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index 7056c08b99..54e8717403 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -22,23 +22,14 @@ BIT(DPU_SSPP_CSC_10BIT)) #define VIG_MSM8998_MASK \ - (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3)) + (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE)) #define VIG_SDM845_MASK \ - (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3)) + (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE)) #define VIG_SDM845_MASK_SDMA \ (VIG_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2)) -#define VIG_SC7180_MASK \ - (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4)) - -#define VIG_SM6125_MASK \ - (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE)) - -#define VIG_SC7180_MASK_SDMA \ - (VIG_SC7180_MASK | BIT(DPU_SSPP_SMART_DMA_V2)) - #define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL)) #define DMA_MSM8998_MASK \ @@ -47,7 +38,7 @@ BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT)) #define VIG_SC7280_MASK \ - (VIG_SC7180_MASK | BIT(DPU_SSPP_INLINE_ROTATION)) + (VIG_SDM845_MASK | BIT(DPU_SSPP_INLINE_ROTATION)) #define VIG_SC7280_MASK_SDMA \ (VIG_SC7280_MASK | BIT(DPU_SSPP_SMART_DMA_V2)) @@ -211,7 +202,41 @@ static const u32 rotation_v2_formats[] = { /* TODO add formats after validation */ }; -static const uint32_t wb2_formats[] = { +static const u32 wb2_formats_rgb[] = { + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_RGB888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_BGR565, + DRM_FORMAT_BGR888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_BGRX4444, + DRM_FORMAT_XBGR4444, +}; + +static const u32 wb2_formats_rgb_yuv[] = { DRM_FORMAT_RGB565, DRM_FORMAT_BGR565, DRM_FORMAT_RGB888, @@ -243,6 +268,7 @@ static const uint32_t wb2_formats[] = { DRM_FORMAT_BGRA4444, DRM_FORMAT_BGRX4444, DRM_FORMAT_XBGR4444, + DRM_FORMAT_NV12, }; /************************************************************* @@ -252,17 +278,14 @@ static const uint32_t wb2_formats[] = { #define SSPP_SCALER_VER(maj, min) (((maj) << 16) | (min)) /* SSPP common configuration */ -#define _VIG_SBLK(sdma_pri, qseed_ver, scaler_ver) \ +#define _VIG_SBLK(scaler_ver) \ { \ .maxdwnscale = MAX_DOWNSCALE_RATIO, \ .maxupscale = MAX_UPSCALE_RATIO, \ - .smart_dma_priority = sdma_pri, \ .scaler_blk = {.name = "scaler", \ - .id = qseed_ver, \ .version = scaler_ver, \ .base = 0xa00, .len = 0xa0,}, \ .csc_blk = {.name = "csc", \ - .id = DPU_SSPP_CSC_10BIT, \ .base = 0x1a00, .len = 0x100,}, \ .format_list = plane_formats_yuv, \ .num_formats = ARRAY_SIZE(plane_formats_yuv), \ @@ -271,17 +294,14 @@ static const uint32_t wb2_formats[] = { .rotation_cfg = NULL, \ } -#define _VIG_SBLK_ROT(sdma_pri, qseed_ver, scaler_ver, rot_cfg) \ +#define _VIG_SBLK_ROT(scaler_ver, rot_cfg) \ { \ .maxdwnscale = MAX_DOWNSCALE_RATIO, \ .maxupscale = MAX_UPSCALE_RATIO, \ - .smart_dma_priority = sdma_pri, \ .scaler_blk = {.name = "scaler", \ - .id = qseed_ver, \ .version = scaler_ver, \ .base = 0xa00, .len = 0xa0,}, \ .csc_blk = {.name = "csc", \ - .id = DPU_SSPP_CSC_10BIT, \ .base = 0x1a00, .len = 0x100,}, \ .format_list = plane_formats_yuv, \ .num_formats = ARRAY_SIZE(plane_formats_yuv), \ @@ -290,138 +310,64 @@ static const uint32_t wb2_formats[] = { .rotation_cfg = rot_cfg, \ } -#define _DMA_SBLK(sdma_pri) \ +#define _VIG_SBLK_NOSCALE() \ + { \ + .maxdwnscale = SSPP_UNITY_SCALE, \ + .maxupscale = SSPP_UNITY_SCALE, \ + .format_list = plane_formats_yuv, \ + .num_formats = ARRAY_SIZE(plane_formats_yuv), \ + .virt_format_list = plane_formats, \ + .virt_num_formats = ARRAY_SIZE(plane_formats), \ + } + +#define _DMA_SBLK() \ { \ .maxdwnscale = SSPP_UNITY_SCALE, \ .maxupscale = SSPP_UNITY_SCALE, \ - .smart_dma_priority = sdma_pri, \ .format_list = plane_formats, \ .num_formats = ARRAY_SIZE(plane_formats), \ .virt_format_list = plane_formats, \ .virt_num_formats = ARRAY_SIZE(plane_formats), \ } -static const struct dpu_sspp_sub_blks msm8998_vig_sblk_0 = - _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3, - SSPP_SCALER_VER(1, 2)); -static const struct dpu_sspp_sub_blks msm8998_vig_sblk_1 = - _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3, - SSPP_SCALER_VER(1, 2)); -static const struct dpu_sspp_sub_blks msm8998_vig_sblk_2 = - _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3, - SSPP_SCALER_VER(1, 2)); -static const struct dpu_sspp_sub_blks msm8998_vig_sblk_3 = - _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3, - SSPP_SCALER_VER(1, 2)); - static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = { .rot_maxheight = 1088, .rot_num_formats = ARRAY_SIZE(rotation_v2_formats), .rot_format_list = rotation_v2_formats, }; -static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = - _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED3, - SSPP_SCALER_VER(1, 3)); -static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 = - _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED3, - SSPP_SCALER_VER(1, 3)); -static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 = - _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED3, - SSPP_SCALER_VER(1, 3)); -static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 = - _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED3, - SSPP_SCALER_VER(1, 3)); - -static const struct dpu_sspp_sub_blks sm8150_vig_sblk_0 = - _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED3, - SSPP_SCALER_VER(1, 4)); -static const struct dpu_sspp_sub_blks sm8150_vig_sblk_1 = - _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED3, - SSPP_SCALER_VER(1, 4)); -static const struct dpu_sspp_sub_blks sm8150_vig_sblk_2 = - _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED3, - SSPP_SCALER_VER(1, 4)); -static const struct dpu_sspp_sub_blks sm8150_vig_sblk_3 = - _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED3, - SSPP_SCALER_VER(1, 4)); - -static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK(1); -static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK(2); -static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK(3); -static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK(4); - -static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 = - _VIG_SBLK(4, DPU_SSPP_SCALER_QSEED4, - SSPP_SCALER_VER(3, 0)); - -static const struct dpu_sspp_sub_blks sc7280_vig_sblk_0 = - _VIG_SBLK_ROT(4, DPU_SSPP_SCALER_QSEED4, - SSPP_SCALER_VER(3, 0), +static const struct dpu_sspp_sub_blks dpu_vig_sblk_noscale = + _VIG_SBLK_NOSCALE(); + +static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_1_2 = + _VIG_SBLK(SSPP_SCALER_VER(1, 2)); + +static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_1_3 = + _VIG_SBLK(SSPP_SCALER_VER(1, 3)); + +static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_1_4 = + _VIG_SBLK(SSPP_SCALER_VER(1, 4)); + +static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_2_4 = + _VIG_SBLK(SSPP_SCALER_VER(2, 4)); + +static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_0 = + _VIG_SBLK(SSPP_SCALER_VER(3, 0)); + +static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_0_rot_v2 = + _VIG_SBLK_ROT(SSPP_SCALER_VER(3, 0), &dpu_rot_sc7280_cfg_v2); -static const struct dpu_sspp_sub_blks sm6115_vig_sblk_0 = - _VIG_SBLK(2, DPU_SSPP_SCALER_QSEED4, - SSPP_SCALER_VER(3, 0)); - -static const struct dpu_sspp_sub_blks sm6125_vig_sblk_0 = - _VIG_SBLK(3, DPU_SSPP_SCALER_QSEED3LITE, - SSPP_SCALER_VER(2, 4)); - -static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 = - _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED4, - SSPP_SCALER_VER(3, 0)); -static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 = - _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED4, - SSPP_SCALER_VER(3, 0)); -static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 = - _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4, - SSPP_SCALER_VER(3, 0)); -static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 = - _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4, - SSPP_SCALER_VER(3, 0)); - -static const struct dpu_sspp_sub_blks sm8450_vig_sblk_0 = - _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED4, - SSPP_SCALER_VER(3, 1)); -static const struct dpu_sspp_sub_blks sm8450_vig_sblk_1 = - _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED4, - SSPP_SCALER_VER(3, 1)); -static const struct dpu_sspp_sub_blks sm8450_vig_sblk_2 = - _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4, - SSPP_SCALER_VER(3, 1)); -static const struct dpu_sspp_sub_blks sm8450_vig_sblk_3 = - _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4, - SSPP_SCALER_VER(3, 1)); - -static const struct dpu_sspp_sub_blks sm8550_vig_sblk_0 = - _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4, - SSPP_SCALER_VER(3, 2)); -static const struct dpu_sspp_sub_blks sm8550_vig_sblk_1 = - _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4, - SSPP_SCALER_VER(3, 2)); -static const struct dpu_sspp_sub_blks sm8550_vig_sblk_2 = - _VIG_SBLK(9, DPU_SSPP_SCALER_QSEED4, - SSPP_SCALER_VER(3, 2)); -static const struct dpu_sspp_sub_blks sm8550_vig_sblk_3 = - _VIG_SBLK(10, DPU_SSPP_SCALER_QSEED4, - SSPP_SCALER_VER(3, 2)); -static const struct dpu_sspp_sub_blks sm8550_dma_sblk_4 = _DMA_SBLK(5); -static const struct dpu_sspp_sub_blks sm8550_dma_sblk_5 = _DMA_SBLK(6); - -#define _VIG_SBLK_NOSCALE(sdma_pri) \ - { \ - .maxdwnscale = SSPP_UNITY_SCALE, \ - .maxupscale = SSPP_UNITY_SCALE, \ - .smart_dma_priority = sdma_pri, \ - .format_list = plane_formats_yuv, \ - .num_formats = ARRAY_SIZE(plane_formats_yuv), \ - .virt_format_list = plane_formats, \ - .virt_num_formats = ARRAY_SIZE(plane_formats), \ - } +static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_1 = + _VIG_SBLK(SSPP_SCALER_VER(3, 1)); + +static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_2 = + _VIG_SBLK(SSPP_SCALER_VER(3, 2)); + +static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_3 = + _VIG_SBLK(SSPP_SCALER_VER(3, 3)); -static const struct dpu_sspp_sub_blks qcm2290_vig_sblk_0 = _VIG_SBLK_NOSCALE(2); -static const struct dpu_sspp_sub_blks qcm2290_dma_sblk_0 = _DMA_SBLK(1); +static const struct dpu_sspp_sub_blks dpu_dma_sblk = _DMA_SBLK(); /************************************************************* * MIXER sub blocks config @@ -473,12 +419,12 @@ static const struct dpu_lm_sub_blks qcm2290_lm_sblk = { * DSPP sub blocks config *************************************************************/ static const struct dpu_dspp_sub_blks msm8998_dspp_sblk = { - .pcc = {.name = "pcc", .id = DPU_DSPP_PCC, .base = 0x1700, + .pcc = {.name = "pcc", .base = 0x1700, .len = 0x90, .version = 0x10007}, }; static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = { - .pcc = {.name = "pcc", .id = DPU_DSPP_PCC, .base = 0x1700, + .pcc = {.name = "pcc", .base = 0x1700, .len = 0x90, .version = 0x40000}, }; @@ -486,19 +432,19 @@ static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = { * PINGPONG sub blocks config *************************************************************/ static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = { - .te2 = {.name = "te2", .id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0, + .te2 = {.name = "te2", .base = 0x2000, .len = 0x0, .version = 0x1}, - .dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0x30e0, + .dither = {.name = "dither", .base = 0x30e0, .len = 0x20, .version = 0x10000}, }; static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = { - .dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0x30e0, + .dither = {.name = "dither", .base = 0x30e0, .len = 0x20, .version = 0x10000}, }; static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = { - .dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0xe0, + .dither = {.name = "dither", .base = 0xe0, .len = 0x20, .version = 0x20000}, }; @@ -515,6 +461,16 @@ static const struct dpu_dsc_sub_blks dsc_sblk_1 = { .ctl = {.name = "ctl", .base = 0xF80, .len = 0x10}, }; +/************************************************************* + * CDM block config + *************************************************************/ +static const struct dpu_cdm_cfg sc7280_cdm = { + .name = "cdm_0", + .id = CDM_0, + .len = 0x228, + .base = 0x79200, +}; + /************************************************************* * VBIF sub blocks config *************************************************************/ @@ -523,6 +479,7 @@ static const u32 msm8998_rt_pri_lvl[] = {1, 2, 2, 2}; static const u32 msm8998_nrt_pri_lvl[] = {1, 1, 1, 1}; static const u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6}; static const u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3}; +static const u32 sm8650_rt_pri_lvl[] = {4, 4, 5, 5, 5, 5, 5, 6}; static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = { { @@ -609,6 +566,26 @@ static const struct dpu_vbif_cfg sm8550_vbif[] = { }, }; +static const struct dpu_vbif_cfg sm8650_vbif[] = { + { + .name = "vbif_rt", .id = VBIF_RT, + .base = 0, .len = 0x1074, + .features = BIT(DPU_VBIF_QOS_REMAP), + .xin_halt_timeout = 0x4000, + .qos_rp_remap_size = 0x40, + .qos_rt_tbl = { + .npriority_lvl = ARRAY_SIZE(sm8650_rt_pri_lvl), + .priority_lvl = sm8650_rt_pri_lvl, + }, + .qos_nrt_tbl = { + .npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl), + .priority_lvl = sdm845_nrt_pri_lvl, + }, + .memtype_count = 16, + .memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}, + }, +}; + /************************************************************* * PERF data config *************************************************************/ @@ -705,6 +682,7 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = { #include "catalog/dpu_3_0_msm8998.h" #include "catalog/dpu_4_0_sdm845.h" +#include "catalog/dpu_4_1_sdm670.h" #include "catalog/dpu_5_0_sm8150.h" #include "catalog/dpu_5_1_sc8180x.h" @@ -724,3 +702,5 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = { #include "catalog/dpu_8_1_sm8450.h" #include "catalog/dpu_9_0_sm8550.h" + +#include "catalog/dpu_10_0_sm8650.h" diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h index 6244507530..ba82ef4560 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h @@ -51,9 +51,7 @@ enum { /** * SSPP sub-blocks/features * @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support - * @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support - * @DPU_SSPP_SCALER_QSEED3LITE, QSEED3 Lite alogorithm support - * @DPU_SSPP_SCALER_QSEED4, QSEED4 algorithm support + * @DPU_SSPP_SCALER_QSEED3_COMPATIBLE, QSEED3-compatible alogorithm support (includes QSEED3, QSEED3LITE and QSEED4) * @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes * @DPU_SSPP_CSC, Support of Color space converion * @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion @@ -71,9 +69,7 @@ enum { */ enum { DPU_SSPP_SCALER_QSEED2 = 0x1, - DPU_SSPP_SCALER_QSEED3, - DPU_SSPP_SCALER_QSEED3LITE, - DPU_SSPP_SCALER_QSEED4, + DPU_SSPP_SCALER_QSEED3_COMPATIBLE, DPU_SSPP_SCALER_RGB, DPU_SSPP_CSC, DPU_SSPP_CSC_10BIT, @@ -248,51 +244,51 @@ enum { u32 len; \ unsigned long features -/** - * MACRO DPU_HW_SUBBLK_INFO - information of HW sub-block inside DPU - * @name: string name for debug purposes - * @id: enum identifying this sub-block - * @base: offset of this sub-block relative to the block - * offset - * @len register block length of this sub-block - */ -#define DPU_HW_SUBBLK_INFO \ - char name[DPU_HW_BLK_NAME_LEN]; \ - u32 id; \ - u32 base; \ - u32 len - /** * struct dpu_scaler_blk: Scaler information - * @info: HW register and features supported by this sub-blk + * @name: string name for debug purposes + * @base: offset of this sub-block relative to the block offset + * @len: register block length of this sub-block * @version: qseed block revision, on QSEED3+ platforms this is the value of * scaler_blk.base + QSEED3_HW_VERSION registers. */ struct dpu_scaler_blk { - DPU_HW_SUBBLK_INFO; + char name[DPU_HW_BLK_NAME_LEN]; + u32 base; + u32 len; u32 version; }; struct dpu_csc_blk { - DPU_HW_SUBBLK_INFO; + char name[DPU_HW_BLK_NAME_LEN]; + u32 base; + u32 len; }; /** * struct dpu_pp_blk : Pixel processing sub-blk information - * @info: HW register and features supported by this sub-blk + * @name: string name for debug purposes + * @base: offset of this sub-block relative to the block offset + * @len: register block length of this sub-block * @version: HW Algorithm version */ struct dpu_pp_blk { - DPU_HW_SUBBLK_INFO; + char name[DPU_HW_BLK_NAME_LEN]; + u32 base; + u32 len; u32 version; }; /** * struct dpu_dsc_blk - DSC Encoder sub-blk information - * @info: HW register and features supported by this sub-blk + * @name: string name for debug purposes + * @base: offset of this sub-block relative to the block offset + * @len: register block length of this sub-block */ struct dpu_dsc_blk { - DPU_HW_SUBBLK_INFO; + char name[DPU_HW_BLK_NAME_LEN]; + u32 base; + u32 len; }; /** @@ -342,7 +338,6 @@ struct dpu_rotation_cfg { * @max_mixer_width max layer mixer line width support. * @max_mixer_blendstages max layer mixer blend stages or * supported z order - * @qseed_type qseed2 or qseed3 support. * @has_src_split source split feature status * @has_dim_layer dim layer feature status * @has_idle_pc indicate if idle power collapse feature is supported @@ -355,7 +350,6 @@ struct dpu_rotation_cfg { struct dpu_caps { u32 max_mixer_width; u32 max_mixer_blendstages; - u32 qseed_type; bool has_src_split; bool has_dim_layer; bool has_idle_pc; @@ -372,7 +366,6 @@ struct dpu_caps { * common: Pointer to common configurations shared by sub blocks * @maxdwnscale: max downscale ratio supported(without DECIMATION) * @maxupscale: maxupscale ratio supported - * @smart_dma_priority: hw priority of rect1 of multirect pipe * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps * @qseed_ver: qseed version * @scaler_blk: @@ -386,7 +379,6 @@ struct dpu_caps { struct dpu_sspp_sub_blks { u32 maxdwnscale; u32 maxupscale; - u32 smart_dma_priority; u32 max_per_pipe_bw; u32 qseed_ver; struct dpu_scaler_blk scaler_blk; @@ -690,6 +682,17 @@ struct dpu_vbif_cfg { u32 memtype[MAX_XIN_COUNT]; }; +/** + * struct dpu_cdm_cfg - information of chroma down blocks + * @name string name for debug purposes + * @id enum identifying this block + * @base register offset of this block + * @features bit mask identifying sub-blocks/features + */ +struct dpu_cdm_cfg { + DPU_HW_BLK_INFO; +}; + /** * Define CDP use cases * @DPU_PERF_CDP_UDAGE_RT: real-time use cases @@ -813,6 +816,8 @@ struct dpu_mdss_cfg { u32 wb_count; const struct dpu_wb_cfg *wb; + const struct dpu_cdm_cfg *cdm; + u32 ad_count; u32 dspp_count; @@ -828,6 +833,7 @@ struct dpu_mdss_cfg { extern const struct dpu_mdss_cfg dpu_msm8998_cfg; extern const struct dpu_mdss_cfg dpu_sdm845_cfg; +extern const struct dpu_mdss_cfg dpu_sdm670_cfg; extern const struct dpu_mdss_cfg dpu_sm8150_cfg; extern const struct dpu_mdss_cfg dpu_sc8180x_cfg; extern const struct dpu_mdss_cfg dpu_sm8250_cfg; @@ -842,5 +848,6 @@ extern const struct dpu_mdss_cfg dpu_sc7280_cfg; extern const struct dpu_mdss_cfg dpu_sc8280xp_cfg; extern const struct dpu_mdss_cfg dpu_sm8450_cfg; extern const struct dpu_mdss_cfg dpu_sm8550_cfg; +extern const struct dpu_mdss_cfg dpu_sm8650_cfg; #endif /* _DPU_HW_CATALOG_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c new file mode 100644 index 0000000000..9016b3ade6 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023, The Linux Foundation. All rights reserved. + */ + +#include + +#include + +#include "dpu_hw_mdss.h" +#include "dpu_hw_util.h" +#include "dpu_hw_catalog.h" +#include "dpu_hw_cdm.h" +#include "dpu_kms.h" + +#define CDM_CSC_10_OPMODE 0x000 +#define CDM_CSC_10_BASE 0x004 + +#define CDM_CDWN2_OP_MODE 0x100 +#define CDM_CDWN2_CLAMP_OUT 0x104 +#define CDM_CDWN2_PARAMS_3D_0 0x108 +#define CDM_CDWN2_PARAMS_3D_1 0x10C +#define CDM_CDWN2_COEFF_COSITE_H_0 0x110 +#define CDM_CDWN2_COEFF_COSITE_H_1 0x114 +#define CDM_CDWN2_COEFF_COSITE_H_2 0x118 +#define CDM_CDWN2_COEFF_OFFSITE_H_0 0x11C +#define CDM_CDWN2_COEFF_OFFSITE_H_1 0x120 +#define CDM_CDWN2_COEFF_OFFSITE_H_2 0x124 +#define CDM_CDWN2_COEFF_COSITE_V 0x128 +#define CDM_CDWN2_COEFF_OFFSITE_V 0x12C +#define CDM_CDWN2_OUT_SIZE 0x130 + +#define CDM_HDMI_PACK_OP_MODE 0x200 +#define CDM_CSC_10_MATRIX_COEFF_0 0x004 + +#define CDM_MUX 0x224 + +/* CDM CDWN2 sub-block bit definitions */ +#define CDM_CDWN2_OP_MODE_EN BIT(0) +#define CDM_CDWN2_OP_MODE_ENABLE_H BIT(1) +#define CDM_CDWN2_OP_MODE_ENABLE_V BIT(2) +#define CDM_CDWN2_OP_MODE_BITS_OUT_8BIT BIT(7) +#define CDM_CDWN2_V_PIXEL_METHOD_MASK GENMASK(6, 5) +#define CDM_CDWN2_H_PIXEL_METHOD_MASK GENMASK(4, 3) + +/* CDM CSC10 sub-block bit definitions */ +#define CDM_CSC10_OP_MODE_EN BIT(0) +#define CDM_CSC10_OP_MODE_SRC_FMT_YUV BIT(1) +#define CDM_CSC10_OP_MODE_DST_FMT_YUV BIT(2) + +/* CDM HDMI pack sub-block bit definitions */ +#define CDM_HDMI_PACK_OP_MODE_EN BIT(0) + +/* + * Horizontal coefficients for cosite chroma downscale + * s13 representation of coefficients + */ +static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e}; + +/* + * Horizontal coefficients for offsite chroma downscale + */ +static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046}; + +/* + * Vertical coefficients for cosite chroma downscale + */ +static u32 cosite_v_coeff[] = {0x00080004}; +/* + * Vertical coefficients for offsite chroma downscale + */ +static u32 offsite_v_coeff[] = {0x00060002}; + +static int dpu_hw_cdm_setup_cdwn(struct dpu_hw_cdm *ctx, struct dpu_hw_cdm_cfg *cfg) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + u32 opmode; + u32 out_size; + + switch (cfg->h_cdwn_type) { + case CDM_CDWN_DISABLE: + opmode = 0; + break; + case CDM_CDWN_PIXEL_DROP: + opmode = CDM_CDWN2_OP_MODE_ENABLE_H | + FIELD_PREP(CDM_CDWN2_H_PIXEL_METHOD_MASK, + CDM_CDWN2_METHOD_PIXEL_DROP); + break; + case CDM_CDWN_AVG: + opmode = CDM_CDWN2_OP_MODE_ENABLE_H | + FIELD_PREP(CDM_CDWN2_H_PIXEL_METHOD_MASK, + CDM_CDWN2_METHOD_AVG); + break; + case CDM_CDWN_COSITE: + opmode = CDM_CDWN2_OP_MODE_ENABLE_H | + FIELD_PREP(CDM_CDWN2_H_PIXEL_METHOD_MASK, + CDM_CDWN2_METHOD_COSITE); + DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0, + cosite_h_coeff[0]); + DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1, + cosite_h_coeff[1]); + DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2, + cosite_h_coeff[2]); + break; + case CDM_CDWN_OFFSITE: + opmode = CDM_CDWN2_OP_MODE_ENABLE_H | + FIELD_PREP(CDM_CDWN2_H_PIXEL_METHOD_MASK, CDM_CDWN2_METHOD_OFFSITE); + DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0, + offsite_h_coeff[0]); + DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1, + offsite_h_coeff[1]); + DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2, + offsite_h_coeff[2]); + break; + default: + DPU_ERROR("%s invalid horz down sampling type\n", __func__); + return -EINVAL; + } + + switch (cfg->v_cdwn_type) { + case CDM_CDWN_DISABLE: + /* if its only Horizontal downsample, we dont need to do anything here */ + break; + case CDM_CDWN_PIXEL_DROP: + opmode |= CDM_CDWN2_OP_MODE_ENABLE_V | + FIELD_PREP(CDM_CDWN2_V_PIXEL_METHOD_MASK, + CDM_CDWN2_METHOD_PIXEL_DROP); + break; + case CDM_CDWN_AVG: + opmode |= CDM_CDWN2_OP_MODE_ENABLE_V | + FIELD_PREP(CDM_CDWN2_V_PIXEL_METHOD_MASK, + CDM_CDWN2_METHOD_AVG); + break; + case CDM_CDWN_COSITE: + opmode |= CDM_CDWN2_OP_MODE_ENABLE_V | + FIELD_PREP(CDM_CDWN2_V_PIXEL_METHOD_MASK, + CDM_CDWN2_METHOD_COSITE); + DPU_REG_WRITE(c, + CDM_CDWN2_COEFF_COSITE_V, + cosite_v_coeff[0]); + break; + case CDM_CDWN_OFFSITE: + opmode |= CDM_CDWN2_OP_MODE_ENABLE_V | + FIELD_PREP(CDM_CDWN2_V_PIXEL_METHOD_MASK, + CDM_CDWN2_METHOD_OFFSITE); + DPU_REG_WRITE(c, + CDM_CDWN2_COEFF_OFFSITE_V, + offsite_v_coeff[0]); + break; + default: + return -EINVAL; + } + + if (cfg->output_bit_depth != CDM_CDWN_OUTPUT_10BIT) + opmode |= CDM_CDWN2_OP_MODE_BITS_OUT_8BIT; + + if (cfg->v_cdwn_type || cfg->h_cdwn_type) + opmode |= CDM_CDWN2_OP_MODE_EN; /* EN CDWN module */ + else + opmode &= ~CDM_CDWN2_OP_MODE_EN; + + out_size = (cfg->output_width & 0xFFFF) | ((cfg->output_height & 0xFFFF) << 16); + DPU_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size); + DPU_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode); + DPU_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT, ((0x3FF << 16) | 0x0)); + + return 0; +} + +static int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx, struct dpu_hw_cdm_cfg *cdm) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + const struct dpu_format *fmt; + u32 opmode = 0; + u32 csc = 0; + + if (!ctx || !cdm) + return -EINVAL; + + fmt = cdm->output_fmt; + + if (!DPU_FORMAT_IS_YUV(fmt)) + return -EINVAL; + + dpu_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, cdm->csc_cfg, true); + dpu_hw_cdm_setup_cdwn(ctx, cdm); + + if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) { + if (fmt->chroma_sample == DPU_CHROMA_H1V2) + return -EINVAL; /*unsupported format */ + opmode = CDM_HDMI_PACK_OP_MODE_EN; + opmode |= (fmt->chroma_sample << 1); + } + + csc |= CDM_CSC10_OP_MODE_DST_FMT_YUV; + csc &= ~CDM_CSC10_OP_MODE_SRC_FMT_YUV; + csc |= CDM_CSC10_OP_MODE_EN; + + if (ctx && ctx->ops.bind_pingpong_blk) + ctx->ops.bind_pingpong_blk(ctx, cdm->pp_id); + + DPU_REG_WRITE(c, CDM_CSC_10_OPMODE, csc); + DPU_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode); + return 0; +} + +static void dpu_hw_cdm_bind_pingpong_blk(struct dpu_hw_cdm *ctx, const enum dpu_pingpong pp) +{ + struct dpu_hw_blk_reg_map *c; + int mux_cfg; + + c = &ctx->hw; + + mux_cfg = DPU_REG_READ(c, CDM_MUX); + mux_cfg &= ~0xf; + + if (pp) + mux_cfg |= (pp - PINGPONG_0) & 0x7; + else + mux_cfg |= 0xf; + + DPU_REG_WRITE(c, CDM_MUX, mux_cfg); +} + +struct dpu_hw_cdm *dpu_hw_cdm_init(struct drm_device *dev, + const struct dpu_cdm_cfg *cfg, void __iomem *addr, + const struct dpu_mdss_version *mdss_rev) +{ + struct dpu_hw_cdm *c; + + c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + c->hw.blk_addr = addr + cfg->base; + c->hw.log_mask = DPU_DBG_MASK_CDM; + + /* Assign ops */ + c->idx = cfg->id; + c->caps = cfg; + + c->ops.enable = dpu_hw_cdm_enable; + if (mdss_rev->core_major_ver >= 5) + c->ops.bind_pingpong_blk = dpu_hw_cdm_bind_pingpong_blk; + + return c; +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h new file mode 100644 index 0000000000..348424df87 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023, The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_HW_CDM_H +#define _DPU_HW_CDM_H + +#include "dpu_hw_mdss.h" +#include "dpu_hw_top.h" + +struct dpu_hw_cdm; + +/** + * struct dpu_hw_cdm_cfg : current configuration of CDM block + * + * @output_width: output ROI width of CDM block + * @output_height: output ROI height of CDM block + * @output_bit_depth: output bit-depth of CDM block + * @h_cdwn_type: downsample type used for horizontal pixels + * @v_cdwn_type: downsample type used for vertical pixels + * @output_fmt: handle to dpu_format of CDM block + * @csc_cfg: handle to CSC matrix programmed for CDM block + * @output_type: interface to which CDM is paired (HDMI/WB) + * @pp_id: ping-pong block to which CDM is bound to + */ +struct dpu_hw_cdm_cfg { + u32 output_width; + u32 output_height; + u32 output_bit_depth; + u32 h_cdwn_type; + u32 v_cdwn_type; + const struct dpu_format *output_fmt; + const struct dpu_csc_cfg *csc_cfg; + u32 output_type; + int pp_id; +}; + +/* + * These values are used indicate which type of downsample is used + * in the horizontal/vertical direction for the CDM block. + */ +enum dpu_hw_cdwn_type { + CDM_CDWN_DISABLE, + CDM_CDWN_PIXEL_DROP, + CDM_CDWN_AVG, + CDM_CDWN_COSITE, + CDM_CDWN_OFFSITE, +}; + +/* + * CDM block can be paired with WB or HDMI block. These values match + * the input with which the CDM block is paired. + */ +enum dpu_hw_cdwn_output_type { + CDM_CDWN_OUTPUT_HDMI, + CDM_CDWN_OUTPUT_WB, +}; + +/* + * CDM block can give an 8-bit or 10-bit output. These values + * are used to indicate the output bit depth of CDM block + */ +enum dpu_hw_cdwn_output_bit_depth { + CDM_CDWN_OUTPUT_8BIT, + CDM_CDWN_OUTPUT_10BIT, +}; + +/* + * CDM block can downsample using different methods. These values + * are used to indicate the downsample method which can be used + * either in the horizontal or vertical direction. + */ +enum dpu_hw_cdwn_op_mode_method_h_v { + CDM_CDWN2_METHOD_PIXEL_DROP, + CDM_CDWN2_METHOD_AVG, + CDM_CDWN2_METHOD_COSITE, + CDM_CDWN2_METHOD_OFFSITE +}; + +/** + * struct dpu_hw_cdm_ops : Interface to the chroma down Hw driver functions + * Assumption is these functions will be called after + * clocks are enabled + * @enable: Enables the output to interface and programs the + * output packer + * @bind_pingpong_blk: enable/disable the connection with pingpong which + * will feed pixels to this cdm + */ +struct dpu_hw_cdm_ops { + /** + * Enable the CDM module + * @cdm Pointer to chroma down context + */ + int (*enable)(struct dpu_hw_cdm *cdm, struct dpu_hw_cdm_cfg *cfg); + + /** + * Enable/disable the connection with pingpong + * @cdm Pointer to chroma down context + * @pp pingpong block id. + */ + void (*bind_pingpong_blk)(struct dpu_hw_cdm *cdm, const enum dpu_pingpong pp); +}; + +/** + * struct dpu_hw_cdm - cdm description + * @base: Hardware block base structure + * @hw: Block hardware details + * @idx: CDM index + * @caps: Pointer to cdm_cfg + * @ops: handle to operations possible for this CDM + */ +struct dpu_hw_cdm { + struct dpu_hw_blk base; + struct dpu_hw_blk_reg_map hw; + + /* chroma down */ + const struct dpu_cdm_cfg *caps; + enum dpu_cdm idx; + + /* ops */ + struct dpu_hw_cdm_ops ops; +}; + +/** + * dpu_hw_cdm_init - initializes the cdm hw driver object. + * should be called once before accessing every cdm. + * @dev: DRM device handle + * @cdm: CDM catalog entry for which driver object is required + * @addr : mapped register io address of MDSS + * @mdss_rev: mdss hw core revision + */ +struct dpu_hw_cdm *dpu_hw_cdm_init(struct drm_device *dev, + const struct dpu_cdm_cfg *cdm, void __iomem *addr, + const struct dpu_mdss_version *mdss_rev); + +static inline struct dpu_hw_cdm *to_dpu_hw_cdm(struct dpu_hw_blk *hw) +{ + return container_of(hw, struct dpu_hw_cdm, base); +} + +#endif /*_DPU_HW_CDM_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c index e7b680a151..e76565c3e6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c @@ -32,11 +32,13 @@ #define CTL_DSC_ACTIVE 0x0E8 #define CTL_WB_ACTIVE 0x0EC #define CTL_INTF_ACTIVE 0x0F4 +#define CTL_CDM_ACTIVE 0x0F8 #define CTL_FETCH_PIPE_ACTIVE 0x0FC #define CTL_MERGE_3D_FLUSH 0x100 #define CTL_DSC_FLUSH 0x104 #define CTL_WB_FLUSH 0x108 #define CTL_INTF_FLUSH 0x110 +#define CTL_CDM_FLUSH 0x114 #define CTL_INTF_MASTER 0x134 #define CTL_DSPP_n_FLUSH(n) ((0x13C) + ((n) * 4)) @@ -46,6 +48,7 @@ #define DPU_REG_RESET_TIMEOUT_US 2000 #define MERGE_3D_IDX 23 #define DSC_IDX 22 +#define CDM_IDX 26 #define INTF_IDX 31 #define WB_IDX 16 #define DSPP_IDX 29 /* From DPU hw rev 7.x.x */ @@ -107,6 +110,7 @@ static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx) ctx->pending_wb_flush_mask = 0; ctx->pending_merge_3d_flush_mask = 0; ctx->pending_dsc_flush_mask = 0; + ctx->pending_cdm_flush_mask = 0; memset(ctx->pending_dspp_flush_mask, 0, sizeof(ctx->pending_dspp_flush_mask)); @@ -151,6 +155,10 @@ static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx) DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH, ctx->pending_dsc_flush_mask); + if (ctx->pending_flush_mask & BIT(CDM_IDX)) + DPU_REG_WRITE(&ctx->hw, CTL_CDM_FLUSH, + ctx->pending_cdm_flush_mask); + DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask); } @@ -282,6 +290,13 @@ static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx, } } +static void dpu_hw_ctl_update_pending_flush_cdm(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num) +{ + /* update pending flush only if CDM_0 is flushed */ + if (cdm_num == CDM_0) + ctx->pending_flush_mask |= BIT(CDM_IDX); +} + static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx, enum dpu_wb wb) { @@ -310,6 +325,12 @@ static void dpu_hw_ctl_update_pending_flush_dsc_v1(struct dpu_hw_ctl *ctx, ctx->pending_flush_mask |= BIT(DSC_IDX); } +static void dpu_hw_ctl_update_pending_flush_cdm_v1(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num) +{ + ctx->pending_cdm_flush_mask |= BIT(cdm_num - CDM_0); + ctx->pending_flush_mask |= BIT(CDM_IDX); +} + static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx, enum dpu_dspp dspp, u32 dspp_sub_blk) { @@ -543,6 +564,9 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx, if (cfg->dsc) DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc); + + if (cfg->cdm) + DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cfg->cdm); } static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx, @@ -586,6 +610,7 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx, u32 wb_active = 0; u32 merge3d_active = 0; u32 dsc_active; + u32 cdm_active; /* * This API resets each portion of the CTL path namely, @@ -621,6 +646,12 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx, dsc_active &= ~cfg->dsc; DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active); } + + if (cfg->cdm) { + cdm_active = DPU_REG_READ(c, CTL_CDM_ACTIVE); + cdm_active &= ~cfg->cdm; + DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cdm_active); + } } static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx, @@ -654,12 +685,14 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops, ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1; ops->update_pending_flush_dsc = dpu_hw_ctl_update_pending_flush_dsc_v1; + ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1; } else { ops->trigger_flush = dpu_hw_ctl_trigger_flush; ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg; ops->update_pending_flush_intf = dpu_hw_ctl_update_pending_flush_intf; ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb; + ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm; } ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush; ops->update_pending_flush = dpu_hw_ctl_update_pending_flush; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h index 279ebd8dfb..ff85b5ee0a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h @@ -39,6 +39,7 @@ struct dpu_hw_stage_cfg { * @mode_3d: 3d mux configuration * @merge_3d: 3d merge block used * @intf_mode_sel: Interface mode, cmd / vid + * @cdm: CDM block used * @stream_sel: Stream selection for multi-stream interfaces * @dsc: DSC BIT masks used */ @@ -48,6 +49,7 @@ struct dpu_hw_intf_cfg { enum dpu_3d_blend_mode mode_3d; enum dpu_merge_3d merge_3d; enum dpu_ctl_mode_sel intf_mode_sel; + enum dpu_cdm cdm; int stream_sel; unsigned int dsc; }; @@ -166,6 +168,14 @@ struct dpu_hw_ctl_ops { void (*update_pending_flush_dsc)(struct dpu_hw_ctl *ctx, enum dpu_dsc blk); + /** + * OR in the given flushbits to the cached pending_(cdm_)flush_mask + * No effect on hardware + * @ctx: ctl path ctx pointer + * @cdm_num: idx of cdm to be flushed + */ + void (*update_pending_flush_cdm)(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num); + /** * Write the value of the pending_flush_mask to hardware * @ctx : ctl path ctx pointer @@ -239,6 +249,7 @@ struct dpu_hw_ctl_ops { * @pending_intf_flush_mask: pending INTF flush * @pending_wb_flush_mask: pending WB flush * @pending_dsc_flush_mask: pending DSC flush + * @pending_cdm_flush_mask: pending CDM flush * @ops: operation list */ struct dpu_hw_ctl { @@ -256,6 +267,7 @@ struct dpu_hw_ctl { u32 pending_merge_3d_flush_mask; u32 pending_dspp_flush_mask[DSPP_MAX - DSPP_0]; u32 pending_dsc_flush_mask; + u32 pending_cdm_flush_mask; /* ops */ struct dpu_hw_ctl_ops ops; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c index 088807db2c..6a0a74832f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c @@ -6,6 +6,8 @@ #include #include +#include + #include "dpu_core_irq.h" #include "dpu_kms.h" #include "dpu_hw_interrupts.h" @@ -472,8 +474,9 @@ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, return intr_status; } -struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr, - const struct dpu_mdss_cfg *m) +struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev, + void __iomem *addr, + const struct dpu_mdss_cfg *m) { struct dpu_hw_intr *intr; unsigned int i; @@ -481,7 +484,7 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr, if (!addr || !m) return ERR_PTR(-EINVAL); - intr = kzalloc(sizeof(*intr), GFP_KERNEL); + intr = drmm_kzalloc(dev, sizeof(*intr), GFP_KERNEL); if (!intr) return ERR_PTR(-ENOMEM); @@ -512,11 +515,6 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr, return intr; } -void dpu_hw_intr_destroy(struct dpu_hw_intr *intr) -{ - kfree(intr); -} - int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, unsigned int irq_idx, void (*irq_cb)(void *arg), @@ -527,14 +525,14 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int ret; if (!irq_cb) { - DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n", - DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb); + DPU_ERROR("IRQ=[%d, %d] NULL callback\n", + DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx)); return -EINVAL; } if (!dpu_core_irq_is_valid(irq_idx)) { - DPU_ERROR("invalid IRQ=[%d, %d]\n", - DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx)); + DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n", + DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb); return -EINVAL; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h index 53a21ebc57..564b750a28 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h @@ -70,15 +70,12 @@ struct dpu_hw_intr { /** * dpu_hw_intr_init(): Initializes the interrupts hw object + * @dev: Corresponding device for devres management * @addr: mapped register io address of MDP * @m: pointer to MDSS catalog data */ -struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr, - const struct dpu_mdss_cfg *m); +struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev, + void __iomem *addr, + const struct dpu_mdss_cfg *m); -/** - * dpu_hw_intr_destroy(): Cleanup interrutps hw object - * @intr: pointer to interrupts hw object - */ -void dpu_hw_intr_destroy(struct dpu_hw_intr *intr); #endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h index d85157acfb..5df5459040 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h @@ -98,6 +98,7 @@ enum dpu_hw_blk_type { DPU_HW_BLK_DSPP, DPU_HW_BLK_MERGE_3D, DPU_HW_BLK_DSC, + DPU_HW_BLK_CDM, DPU_HW_BLK_MAX, }; @@ -185,6 +186,11 @@ enum dpu_dsc { DSC_MAX }; +enum dpu_cdm { + CDM_0 = 1, + CDM_MAX +}; + enum dpu_pingpong { PINGPONG_NONE, PINGPONG_0, @@ -195,6 +201,8 @@ enum dpu_pingpong { PINGPONG_5, PINGPONG_6, PINGPONG_7, + PINGPONG_8, + PINGPONG_9, PINGPONG_S0, PINGPONG_MAX }; @@ -204,6 +212,7 @@ enum dpu_merge_3d { MERGE_3D_1, MERGE_3D_2, MERGE_3D_3, + MERGE_3D_4, MERGE_3D_MAX }; @@ -458,6 +467,7 @@ struct dpu_mdss_color { #define DPU_DBG_MASK_ROT (1 << 9) #define DPU_DBG_MASK_DSPP (1 << 10) #define DPU_DBG_MASK_DSC (1 << 11) +#define DPU_DBG_MASK_CDM (1 << 12) /** * struct dpu_hw_tear_check - Struct contains parameters to configure diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c index 069bf429e5..0bf8a83e8d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c @@ -396,15 +396,6 @@ static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_sspp *ctx, format); } -static u32 _dpu_hw_sspp_get_scaler3_ver(struct dpu_hw_sspp *ctx) -{ - if (!ctx) - return 0; - - return dpu_hw_get_scaler3_ver(&ctx->hw, - ctx->cap->sblk->scaler_blk.base); -} - /* * dpu_hw_sspp_setup_rects() */ @@ -615,12 +606,8 @@ static void _setup_layer_ops(struct dpu_hw_sspp *c, test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features)) c->ops.setup_multirect = dpu_hw_sspp_setup_multirect; - if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) || - test_bit(DPU_SSPP_SCALER_QSEED3LITE, &features) || - test_bit(DPU_SSPP_SCALER_QSEED4, &features)) { + if (test_bit(DPU_SSPP_SCALER_QSEED3_COMPATIBLE, &features)) c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3; - c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver; - } if (test_bit(DPU_SSPP_CDP, &features)) c->ops.setup_cdp = dpu_hw_sspp_setup_cdp; @@ -655,10 +642,7 @@ int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms, cfg->len, kms); - if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) || - cfg->features & BIT(DPU_SSPP_SCALER_QSEED3LITE) || - cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) || - cfg->features & BIT(DPU_SSPP_SCALER_QSEED4)) + if (sblk->scaler_blk.len) dpu_debugfs_create_regset32("scaler_blk", 0400, debugfs_root, sblk->scaler_blk.base + cfg->base, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h index 3641ef6bef..b7dc52312c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h @@ -21,21 +21,6 @@ struct dpu_hw_sspp; #define DPU_SSPP_ROT_90 BIT(3) #define DPU_SSPP_SOLID_FILL BIT(4) -/** - * Define all scaler feature bits in catalog - */ -#define DPU_SSPP_SCALER (BIT(DPU_SSPP_SCALER_RGB) | \ - BIT(DPU_SSPP_SCALER_QSEED2) | \ - BIT(DPU_SSPP_SCALER_QSEED3) | \ - BIT(DPU_SSPP_SCALER_QSEED3LITE) | \ - BIT(DPU_SSPP_SCALER_QSEED4)) - -/* - * Define all CSC feature bits in catalog - */ -#define DPU_SSPP_CSC_ANY (BIT(DPU_SSPP_CSC) | \ - BIT(DPU_SSPP_CSC_10BIT)) - /** * Component indices */ @@ -296,12 +281,6 @@ struct dpu_hw_sspp_ops { struct dpu_hw_scaler3_cfg *scaler3_cfg, const struct dpu_format *format); - /** - * get_scaler_ver - get scaler h/w version - * @ctx: Pointer to pipe context - */ - u32 (*get_scaler_ver)(struct dpu_hw_sspp *ctx); - /** * setup_cdp - setup client driven prefetch * @pipe: Pointer to software pipe context diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c index 1d4f0b97c3..dd47582731 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c @@ -381,12 +381,6 @@ end: DPU_REG_WRITE(c, QSEED3_OP_MODE + scaler_offset, op_mode); } -u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c, - u32 scaler_offset) -{ - return DPU_REG_READ(c, QSEED3_HW_VERSION + scaler_offset); -} - void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c, u32 csc_reg_off, const struct dpu_csc_cfg *data, bool csc10) @@ -563,3 +557,47 @@ bool dpu_hw_clk_force_ctrl(struct dpu_hw_blk_reg_map *c, return clk_forced_on; } + +#define TO_S15D16(_x_)((_x_) << 7) + +const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = { + { + /* S15.16 format */ + 0x00012A00, 0x00000000, 0x00019880, + 0x00012A00, 0xFFFF9B80, 0xFFFF3000, + 0x00012A00, 0x00020480, 0x00000000, + }, + /* signed bias */ + { 0xfff0, 0xff80, 0xff80,}, + { 0x0, 0x0, 0x0,}, + /* unsigned clamp */ + { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,}, + { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,}, +}; + +const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = { + { + /* S15.16 format */ + 0x00012A00, 0x00000000, 0x00019880, + 0x00012A00, 0xFFFF9B80, 0xFFFF3000, + 0x00012A00, 0x00020480, 0x00000000, + }, + /* signed bias */ + { 0xffc0, 0xfe00, 0xfe00,}, + { 0x0, 0x0, 0x0,}, + /* unsigned clamp */ + { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,}, + { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,}, +}; + +const struct dpu_csc_cfg dpu_csc10_rgb2yuv_601l = { + { + TO_S15D16(0x0083), TO_S15D16(0x0102), TO_S15D16(0x0032), + TO_S15D16(0x1fb5), TO_S15D16(0x1f6c), TO_S15D16(0x00e1), + TO_S15D16(0x00e1), TO_S15D16(0x1f45), TO_S15D16(0x1fdc) + }, + { 0x00, 0x00, 0x00 }, + { 0x0040, 0x0200, 0x0200 }, + { 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff }, + { 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 }, +}; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h index ec09fc3865..64ded69fa9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h @@ -19,6 +19,12 @@ #define MISR_CTRL_STATUS_CLEAR BIT(10) #define MISR_CTRL_FREE_RUN_MASK BIT(31) +#define TO_S15D16(_x_)((_x_) << 7) + +extern const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L; +extern const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L; +extern const struct dpu_csc_cfg dpu_csc10_rgb2yuv_601l; + /* * This is the common struct maintained by each sub block * for mapping the register offsets in this block to the @@ -340,9 +346,6 @@ void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c, u32 scaler_offset, u32 scaler_version, const struct dpu_format *format); -u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c, - u32 scaler_offset); - void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c, u32 csc_reg_off, const struct dpu_csc_cfg *data, bool csc10); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c index a5121a50b2..98e34afde2 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c @@ -2,6 +2,8 @@ /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. */ +#include + #include "dpu_hwio.h" #include "dpu_hw_catalog.h" #include "dpu_hw_vbif.h" @@ -211,12 +213,13 @@ static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops, ops->set_write_gather_en = dpu_hw_set_write_gather_en; } -struct dpu_hw_vbif *dpu_hw_vbif_init(const struct dpu_vbif_cfg *cfg, - void __iomem *addr) +struct dpu_hw_vbif *dpu_hw_vbif_init(struct drm_device *dev, + const struct dpu_vbif_cfg *cfg, + void __iomem *addr) { struct dpu_hw_vbif *c; - c = kzalloc(sizeof(*c), GFP_KERNEL); + c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL); if (!c) return ERR_PTR(-ENOMEM); @@ -234,8 +237,3 @@ struct dpu_hw_vbif *dpu_hw_vbif_init(const struct dpu_vbif_cfg *cfg, return c; } - -void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif) -{ - kfree(vbif); -} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h index 7e10d2a172..e2b4307500 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h @@ -108,12 +108,12 @@ struct dpu_hw_vbif { /** * dpu_hw_vbif_init() - Initializes the VBIF driver for the passed * VBIF catalog entry. + * @dev: Corresponding device for devres management * @cfg: VBIF catalog entry for which driver object is required * @addr: Mapped register io address of MDSS */ -struct dpu_hw_vbif *dpu_hw_vbif_init(const struct dpu_vbif_cfg *cfg, - void __iomem *addr); - -void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif); +struct dpu_hw_vbif *dpu_hw_vbif_init(struct drm_device *dev, + const struct dpu_vbif_cfg *cfg, + void __iomem *addr); #endif /*_DPU_HW_VBIF_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 81acbebace..2330eb2a4e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -274,9 +274,6 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) struct dpu_kms *dpu_kms = to_dpu_kms(kms); void *p = dpu_hw_util_get_log_mask_ptr(); struct dentry *entry; - struct drm_device *dev; - struct msm_drm_private *priv; - int i; if (!p) return -EINVAL; @@ -285,9 +282,6 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) if (minor->type != DRM_MINOR_PRIMARY) return 0; - dev = dpu_kms->dev; - priv = dev->dev_private; - entry = debugfs_create_dir("debug", minor->debugfs_root); debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p); @@ -297,11 +291,6 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) dpu_debugfs_core_irq_init(dpu_kms, entry); dpu_debugfs_sspp_init(dpu_kms, entry); - for (i = 0; i < ARRAY_SIZE(priv->dp); i++) { - if (priv->dp[i]) - msm_dp_debugfs_init(priv->dp[i], minor); - } - return dpu_core_perf_debugfs_init(dpu_kms, entry); } #endif @@ -603,7 +592,6 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev, rc = msm_dp_modeset_init(priv->dp[i], dev, encoder); if (rc) { DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc); - drm_encoder_cleanup(encoder); return rc; } } @@ -636,7 +624,6 @@ static int _dpu_kms_initialize_hdmi(struct drm_device *dev, rc = msm_hdmi_modeset_init(priv->hdmi, dev, encoder); if (rc) { DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc); - drm_encoder_cleanup(encoder); return rc; } @@ -668,7 +655,6 @@ static int _dpu_kms_initialize_writeback(struct drm_device *dev, n_formats); if (rc) { DPU_ERROR("dpu_writeback_init, rc = %d\n", rc); - drm_encoder_cleanup(encoder); return rc; } @@ -812,20 +798,13 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms) { int i; - if (dpu_kms->hw_intr) - dpu_hw_intr_destroy(dpu_kms->hw_intr); dpu_kms->hw_intr = NULL; /* safe to call these more than once during shutdown */ _dpu_kms_mmu_destroy(dpu_kms); - if (dpu_kms->catalog) { - for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { - if (dpu_kms->hw_vbif[i]) { - dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]); - dpu_kms->hw_vbif[i] = NULL; - } - } + for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { + dpu_kms->hw_vbif[i] = NULL; } dpu_kms_global_obj_fini(dpu_kms); @@ -858,7 +837,6 @@ static int dpu_irq_postinstall(struct msm_kms *kms) { struct msm_drm_private *priv; struct dpu_kms *dpu_kms = to_dpu_kms(kms); - int i; if (!dpu_kms || !dpu_kms->dev) return -EINVAL; @@ -867,9 +845,6 @@ static int dpu_irq_postinstall(struct msm_kms *kms) if (!priv) return -EINVAL; - for (i = 0; i < ARRAY_SIZE(priv->dp); i++) - msm_dp_irq_postinstall(priv->dp[i]); - return 0; } @@ -977,6 +952,10 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k } } + if (cat->cdm) + msm_disp_snapshot_add_block(disp_state, cat->cdm->len, + dpu_kms->mmio + cat->cdm->base, cat->cdm->name); + pm_runtime_put_sync(&dpu_kms->pdev->dev); } @@ -1080,7 +1059,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms) if (!dpu_kms->catalog) { DPU_ERROR("device config not known!\n"); rc = -EINVAL; - goto power_error; + goto err_pm_put; } /* @@ -1090,26 +1069,26 @@ static int dpu_kms_hw_init(struct msm_kms *kms) rc = _dpu_kms_mmu_init(dpu_kms); if (rc) { DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc); - goto power_error; + goto err_pm_put; } dpu_kms->mdss = msm_mdss_get_mdss_data(dpu_kms->pdev->dev.parent); if (IS_ERR(dpu_kms->mdss)) { rc = PTR_ERR(dpu_kms->mdss); DPU_ERROR("failed to get MDSS data: %d\n", rc); - goto power_error; + goto err_pm_put; } if (!dpu_kms->mdss) { rc = -EINVAL; DPU_ERROR("NULL MDSS data\n"); - goto power_error; + goto err_pm_put; } rc = dpu_rm_init(dev, &dpu_kms->rm, dpu_kms->catalog, dpu_kms->mdss, dpu_kms->mmio); if (rc) { DPU_ERROR("rm init failed: %d\n", rc); - goto power_error; + goto err_pm_put; } dpu_kms->hw_mdp = dpu_hw_mdptop_init(dev, @@ -1120,18 +1099,18 @@ static int dpu_kms_hw_init(struct msm_kms *kms) rc = PTR_ERR(dpu_kms->hw_mdp); DPU_ERROR("failed to get hw_mdp: %d\n", rc); dpu_kms->hw_mdp = NULL; - goto power_error; + goto err_pm_put; } for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { struct dpu_hw_vbif *hw; const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i]; - hw = dpu_hw_vbif_init(vbif, dpu_kms->vbif[vbif->id]); + hw = dpu_hw_vbif_init(dev, vbif, dpu_kms->vbif[vbif->id]); if (IS_ERR(hw)) { rc = PTR_ERR(hw); DPU_ERROR("failed to init vbif %d: %d\n", vbif->id, rc); - goto power_error; + goto err_pm_put; } dpu_kms->hw_vbif[vbif->id] = hw; @@ -1147,15 +1126,15 @@ static int dpu_kms_hw_init(struct msm_kms *kms) rc = dpu_core_perf_init(&dpu_kms->perf, dpu_kms->catalog->perf, max_core_clk_rate); if (rc) { DPU_ERROR("failed to init perf %d\n", rc); - goto perf_err; + goto err_pm_put; } - dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog); - if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) { + dpu_kms->hw_intr = dpu_hw_intr_init(dev, dpu_kms->mmio, dpu_kms->catalog); + if (IS_ERR(dpu_kms->hw_intr)) { rc = PTR_ERR(dpu_kms->hw_intr); DPU_ERROR("hw_intr init failed: %d\n", rc); dpu_kms->hw_intr = NULL; - goto hw_intr_init_err; + goto err_pm_put; } dev->mode_config.min_width = 0; @@ -1180,7 +1159,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms) rc = _dpu_kms_drm_obj_init(dpu_kms); if (rc) { DPU_ERROR("modeset init failed: %d\n", rc); - goto drm_obj_init_err; + goto err_pm_put; } dpu_vbif_init_memtypes(dpu_kms); @@ -1189,10 +1168,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms) return 0; -drm_obj_init_err: -hw_intr_init_err: -perf_err: -power_error: +err_pm_put: pm_runtime_put_sync(&dpu_kms->pdev->dev); error: _dpu_kms_hw_destroy(dpu_kms); @@ -1350,6 +1326,7 @@ static const struct dev_pm_ops dpu_pm_ops = { static const struct of_device_id dpu_dt_match[] = { { .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, }, { .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, }, + { .compatible = "qcom,sdm670-dpu", .data = &dpu_sdm670_cfg, }, { .compatible = "qcom,sdm845-dpu", .data = &dpu_sdm845_cfg, }, { .compatible = "qcom,sc7180-dpu", .data = &dpu_sc7180_cfg, }, { .compatible = "qcom,sc7280-dpu", .data = &dpu_sc7280_cfg, }, @@ -1364,6 +1341,7 @@ static const struct of_device_id dpu_dt_match[] = { { .compatible = "qcom,sm8350-dpu", .data = &dpu_sm8350_cfg, }, { .compatible = "qcom,sm8450-dpu", .data = &dpu_sm8450_cfg, }, { .compatible = "qcom,sm8550-dpu", .data = &dpu_sm8550_cfg, }, + { .compatible = "qcom,sm8650-dpu", .data = &dpu_sm8650_cfg, }, {} }; MODULE_DEVICE_TABLE(of, dpu_dt_match); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h index 9112a702a0..d1207f4ec3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h @@ -136,6 +136,7 @@ struct dpu_global_state { uint32_t ctl_to_enc_id[CTL_MAX - CTL_0]; uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0]; uint32_t dsc_to_enc_id[DSC_MAX - DSC_0]; + uint32_t cdm_to_enc_id; }; struct dpu_global_state diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index 3eef5e025e..ff975ad511 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -21,6 +21,7 @@ #include "dpu_kms.h" #include "dpu_formats.h" #include "dpu_hw_sspp.h" +#include "dpu_hw_util.h" #include "dpu_trace.h" #include "dpu_crtc.h" #include "dpu_vbif.h" @@ -78,8 +79,6 @@ static const uint32_t qcom_compressed_supported_formats[] = { struct dpu_plane { struct drm_plane base; - struct mutex lock; - enum dpu_sspp pipe; uint32_t color_fill; @@ -470,8 +469,7 @@ static void _dpu_plane_setup_scaler3(struct dpu_hw_sspp *pipe_hw, scale_cfg->src_height[i] /= chroma_subsmpl_v; } - if (pipe_hw->cap->features & - BIT(DPU_SSPP_SCALER_QSEED4)) { + if (pipe_hw->cap->sblk->scaler_blk.version >= 0x3000) { scale_cfg->preload_x[i] = DPU_QSEED4_DEFAULT_PRELOAD_H; scale_cfg->preload_y[i] = DPU_QSEED4_DEFAULT_PRELOAD_V; } else { @@ -511,36 +509,6 @@ static void _dpu_plane_setup_pixel_ext(struct dpu_hw_scaler3_cfg *scale_cfg, } } -static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = { - { - /* S15.16 format */ - 0x00012A00, 0x00000000, 0x00019880, - 0x00012A00, 0xFFFF9B80, 0xFFFF3000, - 0x00012A00, 0x00020480, 0x00000000, - }, - /* signed bias */ - { 0xfff0, 0xff80, 0xff80,}, - { 0x0, 0x0, 0x0,}, - /* unsigned clamp */ - { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,}, - { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,}, -}; - -static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = { - { - /* S15.16 format */ - 0x00012A00, 0x00000000, 0x00019880, - 0x00012A00, 0xFFFF9B80, 0xFFFF3000, - 0x00012A00, 0x00020480, 0x00000000, - }, - /* signed bias */ - { 0xffc0, 0xfe00, 0xfe00,}, - { 0x0, 0x0, 0x0,}, - /* unsigned clamp */ - { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,}, - { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,}, -}; - static const struct dpu_csc_cfg *_dpu_plane_get_csc(struct dpu_sw_pipe *pipe, const struct dpu_format *fmt) { @@ -774,8 +742,8 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu, min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1; if (DPU_FORMAT_IS_YUV(fmt) && - (!(pipe->sspp->cap->features & DPU_SSPP_SCALER) || - !(pipe->sspp->cap->features & DPU_SSPP_CSC_ANY))) { + (!pipe->sspp->cap->sblk->scaler_blk.len || + !pipe->sspp->cap->sblk->csc_blk.len)) { DPU_DEBUG_PLANE(pdpu, "plane doesn't have scaler/csc for yuv\n"); return -EINVAL; @@ -824,6 +792,8 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, plane); int ret = 0, min_scale; struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); + u64 max_mdp_clk_rate = kms->perf.max_core_clk_rate; struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); struct dpu_sw_pipe *pipe = &pstate->pipe; struct dpu_sw_pipe *r_pipe = &pstate->r_pipe; @@ -892,14 +862,16 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, max_linewidth = pdpu->catalog->caps->max_linewidth; - if (drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) { + if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) || + _dpu_plane_calc_clk(&crtc_state->adjusted_mode, pipe_cfg) > max_mdp_clk_rate) { /* * In parallel multirect case only the half of the usual width * is supported for tiled formats. If we are here, we know that * full width is more than max_linewidth, thus each rect is * wider than allowed. */ - if (DPU_FORMAT_IS_UBWC(fmt)) { + if (DPU_FORMAT_IS_UBWC(fmt) && + drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) { DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, tiled format\n", DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth); return -E2BIG; @@ -1213,29 +1185,6 @@ static void dpu_plane_atomic_update(struct drm_plane *plane, } } -static void dpu_plane_destroy(struct drm_plane *plane) -{ - struct dpu_plane *pdpu = plane ? to_dpu_plane(plane) : NULL; - struct dpu_plane_state *pstate; - - DPU_DEBUG_PLANE(pdpu, "\n"); - - if (pdpu) { - pstate = to_dpu_plane_state(plane->state); - _dpu_plane_set_qos_ctrl(plane, &pstate->pipe, false); - - if (pstate->r_pipe.sspp) - _dpu_plane_set_qos_ctrl(plane, &pstate->r_pipe, false); - - mutex_destroy(&pdpu->lock); - - /* this will destroy the states as well */ - drm_plane_cleanup(plane); - - kfree(pdpu); - } -} - static void dpu_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) { @@ -1405,7 +1354,6 @@ static bool dpu_plane_format_mod_supported(struct drm_plane *plane, static const struct drm_plane_funcs dpu_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = dpu_plane_destroy, .reset = dpu_plane_reset, .atomic_duplicate_state = dpu_plane_duplicate_state, .atomic_destroy_state = dpu_plane_destroy_state, @@ -1433,35 +1381,28 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev, struct dpu_hw_sspp *pipe_hw; uint32_t num_formats; uint32_t supported_rotations; - int ret = -EINVAL; - - /* create and zero local structure */ - pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL); - if (!pdpu) { - DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe); - ret = -ENOMEM; - return ERR_PTR(ret); - } - - /* cache local stuff for later */ - plane = &pdpu->base; - pdpu->pipe = pipe; + int ret; /* initialize underlying h/w driver */ pipe_hw = dpu_rm_get_sspp(&kms->rm, pipe); if (!pipe_hw || !pipe_hw->cap || !pipe_hw->cap->sblk) { DPU_ERROR("[%u]SSPP is invalid\n", pipe); - goto clean_plane; + return ERR_PTR(-EINVAL); } format_list = pipe_hw->cap->sblk->format_list; num_formats = pipe_hw->cap->sblk->num_formats; - ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs, + pdpu = drmm_universal_plane_alloc(dev, struct dpu_plane, base, + 0xff, &dpu_plane_funcs, format_list, num_formats, supported_format_modifiers, type, NULL); - if (ret) - goto clean_plane; + if (IS_ERR(pdpu)) + return ERR_CAST(pdpu); + + /* cache local stuff for later */ + plane = &pdpu->base; + pdpu->pipe = pipe; pdpu->catalog = kms->catalog; @@ -1488,13 +1429,7 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev, /* success! finalize initialization */ drm_plane_helper_add(plane, &dpu_plane_helper_funcs); - mutex_init(&pdpu->lock); - DPU_DEBUG("%s created for pipe:%u id:%u\n", plane->name, pipe, plane->base.id); return plane; - -clean_plane: - kfree(pdpu); - return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index 0bb28cf4a6..724537ab77 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -8,6 +8,7 @@ #include "dpu_kms.h" #include "dpu_hw_lm.h" #include "dpu_hw_ctl.h" +#include "dpu_hw_cdm.h" #include "dpu_hw_pingpong.h" #include "dpu_hw_sspp.h" #include "dpu_hw_intf.h" @@ -28,7 +29,6 @@ static inline bool reserved_by_other(uint32_t *res_map, int idx, /** * struct dpu_rm_requirements - Reservation requirements parameter bundle * @topology: selected topology for the display - * @hw_res: Hardware resources required as reported by the encoders */ struct dpu_rm_requirements { struct msm_display_topology topology; @@ -176,6 +176,18 @@ int dpu_rm_init(struct drm_device *dev, rm->hw_sspp[sspp->id - SSPP_NONE] = hw; } + if (cat->cdm) { + struct dpu_hw_cdm *hw; + + hw = dpu_hw_cdm_init(dev, cat->cdm, mmio, cat->mdss_ver); + if (IS_ERR(hw)) { + rc = PTR_ERR(hw); + DPU_ERROR("failed cdm object creation: err %d\n", rc); + goto fail; + } + rm->cdm_blk = &hw->base; + } + return 0; fail: @@ -191,6 +203,8 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) * _dpu_rm_get_lm_peer - get the id of a mixer which is a peer of the primary * @rm: dpu resource manager handle * @primary_idx: index of primary mixer in rm->mixer_blks[] + * + * Returns: lm peer mixed id on success or %-EINVAL on error */ static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx) { @@ -422,6 +436,26 @@ static int _dpu_rm_reserve_dsc(struct dpu_rm *rm, return 0; } +static int _dpu_rm_reserve_cdm(struct dpu_rm *rm, + struct dpu_global_state *global_state, + struct drm_encoder *enc) +{ + /* try allocating only one CDM block */ + if (!rm->cdm_blk) { + DPU_ERROR("CDM block does not exist\n"); + return -EIO; + } + + if (global_state->cdm_to_enc_id) { + DPU_ERROR("CDM_0 is already allocated\n"); + return -EIO; + } + + global_state->cdm_to_enc_id = enc->base.id; + + return 0; +} + static int _dpu_rm_make_reservation( struct dpu_rm *rm, struct dpu_global_state *global_state, @@ -447,6 +481,14 @@ static int _dpu_rm_make_reservation( if (ret) return ret; + if (reqs->topology.needs_cdm) { + ret = _dpu_rm_reserve_cdm(rm, global_state, enc); + if (ret) { + DPU_ERROR("unable to find CDM blk\n"); + return ret; + } + } + return ret; } @@ -457,9 +499,9 @@ static int _dpu_rm_populate_requirements( { reqs->topology = req_topology; - DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n", + DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d cdm: %d\n", reqs->topology.num_lm, reqs->topology.num_dsc, - reqs->topology.num_intf); + reqs->topology.num_intf, reqs->topology.needs_cdm); return 0; } @@ -488,6 +530,7 @@ void dpu_rm_release(struct dpu_global_state *global_state, ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id); _dpu_rm_clear_mapping(global_state->dspp_to_enc_id, ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id); + _dpu_rm_clear_mapping(&global_state->cdm_to_enc_id, 1, enc->base.id); } int dpu_rm_reserve( @@ -561,6 +604,11 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm, hw_to_enc_id = global_state->dsc_to_enc_id; max_blks = ARRAY_SIZE(rm->dsc_blks); break; + case DPU_HW_BLK_CDM: + hw_blks = &rm->cdm_blk; + hw_to_enc_id = &global_state->cdm_to_enc_id; + max_blks = 1; + break; default: DPU_ERROR("blk type %d not managed by rm\n", type); return 0; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h index 36752d837b..e3f83ebc65 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h @@ -22,6 +22,7 @@ struct dpu_global_state; * @hw_wb: array of wb hardware resources * @dspp_blks: array of dspp hardware resources * @hw_sspp: array of sspp hardware resources + * @cdm_blk: cdm hardware resource */ struct dpu_rm { struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0]; @@ -33,6 +34,7 @@ struct dpu_rm { struct dpu_hw_blk *merge_3d_blks[MERGE_3D_MAX - MERGE_3D_0]; struct dpu_hw_blk *dsc_blks[DSC_MAX - DSC_0]; struct dpu_hw_sspp *hw_sspp[SSPP_MAX - SSPP_NONE]; + struct dpu_hw_blk *cdm_blk; }; /** diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c index 3100957225..75f93e3462 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -123,16 +124,6 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val) drm_gem_object_put(val); } -static void mdp4_crtc_destroy(struct drm_crtc *crtc) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - - drm_crtc_cleanup(crtc); - drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work); - - kfree(mdp4_crtc); -} - /* statically (for now) map planes to mixer stage (z-order): */ static const int idxs[] = { [VG1] = 1, @@ -484,7 +475,6 @@ static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) static const struct drm_crtc_funcs mdp4_crtc_funcs = { .set_config = drm_atomic_helper_set_config, - .destroy = mdp4_crtc_destroy, .page_flip = drm_atomic_helper_page_flip, .cursor_set = mdp4_crtc_cursor_set, .cursor_move = mdp4_crtc_cursor_move, @@ -625,6 +615,13 @@ static const char *dma_names[] = { "DMA_P", "DMA_S", "DMA_E", }; +static void mdp4_crtc_flip_cleanup(struct drm_device *dev, void *ptr) +{ + struct mdp4_crtc *mdp4_crtc = ptr; + + drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work); +} + /* initialize crtc */ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, struct drm_plane *plane, int id, int ovlp_id, @@ -632,10 +629,13 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, { struct drm_crtc *crtc = NULL; struct mdp4_crtc *mdp4_crtc; + int ret; - mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL); - if (!mdp4_crtc) - return ERR_PTR(-ENOMEM); + mdp4_crtc = drmm_crtc_alloc_with_planes(dev, struct mdp4_crtc, base, + plane, NULL, + &mdp4_crtc_funcs, NULL); + if (IS_ERR(mdp4_crtc)) + return ERR_CAST(mdp4_crtc); crtc = &mdp4_crtc->base; @@ -657,9 +657,10 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, drm_flip_work_init(&mdp4_crtc->unref_cursor_work, "unref cursor", unref_cursor_worker); + ret = drmm_add_action_or_reset(dev, mdp4_crtc_flip_cleanup, mdp4_crtc); + if (ret) + return ERR_PTR(ret); - drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs, - NULL); drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs); return crtc; diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c index 39b8fe53c2..74dafe7106 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c @@ -26,18 +26,6 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder) return to_mdp4_kms(to_mdp_kms(priv->kms)); } -static void mdp4_dsi_encoder_destroy(struct drm_encoder *encoder) -{ - struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder); - - drm_encoder_cleanup(encoder); - kfree(mdp4_dsi_encoder); -} - -static const struct drm_encoder_funcs mdp4_dsi_encoder_funcs = { - .destroy = mdp4_dsi_encoder_destroy, -}; - static void mdp4_dsi_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -148,28 +136,18 @@ static const struct drm_encoder_helper_funcs mdp4_dsi_encoder_helper_funcs = { /* initialize encoder */ struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev) { - struct drm_encoder *encoder = NULL; + struct drm_encoder *encoder; struct mdp4_dsi_encoder *mdp4_dsi_encoder; - int ret; - mdp4_dsi_encoder = kzalloc(sizeof(*mdp4_dsi_encoder), GFP_KERNEL); - if (!mdp4_dsi_encoder) { - ret = -ENOMEM; - goto fail; - } + mdp4_dsi_encoder = drmm_encoder_alloc(dev, struct mdp4_dsi_encoder, base, + NULL, DRM_MODE_ENCODER_DSI, NULL); + if (IS_ERR(mdp4_dsi_encoder)) + return ERR_CAST(mdp4_dsi_encoder); encoder = &mdp4_dsi_encoder->base; - drm_encoder_init(dev, encoder, &mdp4_dsi_encoder_funcs, - DRM_MODE_ENCODER_DSI, NULL); drm_encoder_helper_add(encoder, &mdp4_dsi_encoder_helper_funcs); return encoder; - -fail: - if (encoder) - mdp4_dsi_encoder_destroy(encoder); - - return ERR_PTR(ret); } #endif /* CONFIG_DRM_MSM_DSI */ diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c index 88645dbc37..3b70764b48 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c @@ -25,17 +25,6 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder) return to_mdp4_kms(to_mdp_kms(priv->kms)); } -static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder) -{ - struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); - drm_encoder_cleanup(encoder); - kfree(mdp4_dtv_encoder); -} - -static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = { - .destroy = mdp4_dtv_encoder_destroy, -}; - static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -173,41 +162,29 @@ long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate) /* initialize encoder */ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev) { - struct drm_encoder *encoder = NULL; + struct drm_encoder *encoder; struct mdp4_dtv_encoder *mdp4_dtv_encoder; - int ret; - mdp4_dtv_encoder = kzalloc(sizeof(*mdp4_dtv_encoder), GFP_KERNEL); - if (!mdp4_dtv_encoder) { - ret = -ENOMEM; - goto fail; - } + mdp4_dtv_encoder = drmm_encoder_alloc(dev, struct mdp4_dtv_encoder, base, + NULL, DRM_MODE_ENCODER_TMDS, NULL); + if (IS_ERR(mdp4_dtv_encoder)) + return ERR_CAST(mdp4_dtv_encoder); encoder = &mdp4_dtv_encoder->base; - drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs); mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk"); if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) { DRM_DEV_ERROR(dev->dev, "failed to get hdmi_clk\n"); - ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk); - goto fail; + return ERR_CAST(mdp4_dtv_encoder->hdmi_clk); } mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk"); if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) { DRM_DEV_ERROR(dev->dev, "failed to get tv_clk\n"); - ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk); - goto fail; + return ERR_CAST(mdp4_dtv_encoder->mdp_clk); } return encoder; - -fail: - if (encoder) - mdp4_dtv_encoder_destroy(encoder); - - return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c index 10eb3e5b21..576995ddce 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c @@ -18,7 +18,7 @@ struct mdp4_lcdc_encoder { struct drm_panel *panel; struct clk *lcdc_clk; unsigned long int pixclock; - struct regulator *regs[3]; + struct regulator_bulk_data regs[3]; bool enabled; uint32_t bsc; }; @@ -30,18 +30,6 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder) return to_mdp4_kms(to_mdp_kms(priv->kms)); } -static void mdp4_lcdc_encoder_destroy(struct drm_encoder *encoder) -{ - struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = - to_mdp4_lcdc_encoder(encoder); - drm_encoder_cleanup(encoder); - kfree(mdp4_lcdc_encoder); -} - -static const struct drm_encoder_funcs mdp4_lcdc_encoder_funcs = { - .destroy = mdp4_lcdc_encoder_destroy, -}; - /* this should probably be a helper: */ static struct drm_connector *get_connector(struct drm_encoder *encoder) { @@ -271,12 +259,10 @@ static void mdp4_lcdc_encoder_mode_set(struct drm_encoder *encoder, static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder) { - struct drm_device *dev = encoder->dev; struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = to_mdp4_lcdc_encoder(encoder); struct mdp4_kms *mdp4_kms = get_kms(encoder); struct drm_panel *panel; - int i, ret; if (WARN_ON(!mdp4_lcdc_encoder->enabled)) return; @@ -301,11 +287,8 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder) clk_disable_unprepare(mdp4_lcdc_encoder->lcdc_clk); - for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) { - ret = regulator_disable(mdp4_lcdc_encoder->regs[i]); - if (ret) - DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret); - } + regulator_bulk_disable(ARRAY_SIZE(mdp4_lcdc_encoder->regs), + mdp4_lcdc_encoder->regs); mdp4_lcdc_encoder->enabled = false; } @@ -319,7 +302,7 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder) struct mdp4_kms *mdp4_kms = get_kms(encoder); struct drm_panel *panel; uint32_t config; - int i, ret; + int ret; if (WARN_ON(mdp4_lcdc_encoder->enabled)) return; @@ -339,11 +322,10 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder) mdp4_crtc_set_config(encoder->crtc, config); mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0); - for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) { - ret = regulator_enable(mdp4_lcdc_encoder->regs[i]); - if (ret) - DRM_DEV_ERROR(dev->dev, "failed to enable regulator: %d\n", ret); - } + ret = regulator_bulk_enable(ARRAY_SIZE(mdp4_lcdc_encoder->regs), + mdp4_lcdc_encoder->regs); + if (ret) + DRM_DEV_ERROR(dev->dev, "failed to enable regulators: %d\n", ret); DBG("setting lcdc_clk=%lu", pc); ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc); @@ -383,63 +365,38 @@ long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate) struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev, struct device_node *panel_node) { - struct drm_encoder *encoder = NULL; + struct drm_encoder *encoder; struct mdp4_lcdc_encoder *mdp4_lcdc_encoder; - struct regulator *reg; int ret; - mdp4_lcdc_encoder = kzalloc(sizeof(*mdp4_lcdc_encoder), GFP_KERNEL); - if (!mdp4_lcdc_encoder) { - ret = -ENOMEM; - goto fail; - } + mdp4_lcdc_encoder = drmm_encoder_alloc(dev, struct mdp4_lcdc_encoder, base, + NULL, DRM_MODE_ENCODER_LVDS, NULL); + if (IS_ERR(mdp4_lcdc_encoder)) + return ERR_CAST(mdp4_lcdc_encoder); mdp4_lcdc_encoder->panel_node = panel_node; encoder = &mdp4_lcdc_encoder->base; - drm_encoder_init(dev, encoder, &mdp4_lcdc_encoder_funcs, - DRM_MODE_ENCODER_LVDS, NULL); drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs); /* TODO: do we need different pll in other cases? */ mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev); if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) { DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n"); - ret = PTR_ERR(mdp4_lcdc_encoder->lcdc_clk); - goto fail; + return ERR_CAST(mdp4_lcdc_encoder->lcdc_clk); } /* TODO: different regulators in other cases? */ - reg = devm_regulator_get(dev->dev, "lvds-vccs-3p3v"); - if (IS_ERR(reg)) { - ret = PTR_ERR(reg); - DRM_DEV_ERROR(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret); - goto fail; - } - mdp4_lcdc_encoder->regs[0] = reg; - - reg = devm_regulator_get(dev->dev, "lvds-pll-vdda"); - if (IS_ERR(reg)) { - ret = PTR_ERR(reg); - DRM_DEV_ERROR(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret); - goto fail; - } - mdp4_lcdc_encoder->regs[1] = reg; + mdp4_lcdc_encoder->regs[0].supply = "lvds-vccs-3p3v"; + mdp4_lcdc_encoder->regs[1].supply = "lvds-vccs-3p3v"; + mdp4_lcdc_encoder->regs[2].supply = "lvds-vdda"; - reg = devm_regulator_get(dev->dev, "lvds-vdda"); - if (IS_ERR(reg)) { - ret = PTR_ERR(reg); - DRM_DEV_ERROR(dev->dev, "failed to get lvds-vdda: %d\n", ret); - goto fail; - } - mdp4_lcdc_encoder->regs[2] = reg; + ret = devm_regulator_bulk_get(dev->dev, + ARRAY_SIZE(mdp4_lcdc_encoder->regs), + mdp4_lcdc_encoder->regs); + if (ret) + return ERR_PTR(ret); return encoder; - -fail: - if (encoder) - mdp4_lcdc_encoder_destroy(encoder); - - return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c index 694d543413..c5179e4c39 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c @@ -1350,23 +1350,17 @@ int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_handler) return cfg_handler->revision; } -void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler) -{ - kfree(cfg_handler); -} - struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms, uint32_t major, uint32_t minor) { struct drm_device *dev = mdp5_kms->dev; struct mdp5_cfg_handler *cfg_handler; const struct mdp5_cfg_handler *cfg_handlers; - int i, ret = 0, num_handlers; + int i, num_handlers; - cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL); + cfg_handler = devm_kzalloc(dev->dev, sizeof(*cfg_handler), GFP_KERNEL); if (unlikely(!cfg_handler)) { - ret = -ENOMEM; - goto fail; + return ERR_PTR(-ENOMEM); } switch (major) { @@ -1381,8 +1375,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms, default: DRM_DEV_ERROR(dev->dev, "unexpected MDP major version: v%d.%d\n", major, minor); - ret = -ENXIO; - goto fail; + return ERR_PTR(-ENXIO); } /* only after mdp5_cfg global pointer's init can we access the hw */ @@ -1396,8 +1389,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms, if (unlikely(!mdp5_cfg)) { DRM_DEV_ERROR(dev->dev, "unexpected MDP minor revision: v%d.%d\n", major, minor); - ret = -ENXIO; - goto fail; + return ERR_PTR(-ENXIO); } cfg_handler->revision = minor; @@ -1406,10 +1398,4 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms, DBG("MDP5: %s hw config selected", mdp5_cfg->name); return cfg_handler; - -fail: - if (cfg_handler) - mdp5_cfg_destroy(cfg_handler); - - return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h index c2502cc338..26c5d8b4ab 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h @@ -121,6 +121,5 @@ int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd); struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms, uint32_t major, uint32_t minor); -void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd); #endif /* __MDP5_CFG_H__ */ diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index 86036dd4e1..4a3db2ea16 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -172,14 +173,11 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val) drm_gem_object_put(val); } -static void mdp5_crtc_destroy(struct drm_crtc *crtc) +static void mdp5_crtc_flip_cleanup(struct drm_device *dev, void *ptr) { - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc *mdp5_crtc = ptr; - drm_crtc_cleanup(crtc); drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work); - - kfree(mdp5_crtc); } static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage) @@ -1147,7 +1145,6 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc) static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = { .set_config = drm_atomic_helper_set_config, - .destroy = mdp5_crtc_destroy, .page_flip = drm_atomic_helper_page_flip, .reset = mdp5_crtc_reset, .atomic_duplicate_state = mdp5_crtc_duplicate_state, @@ -1161,7 +1158,6 @@ static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = { static const struct drm_crtc_funcs mdp5_crtc_funcs = { .set_config = drm_atomic_helper_set_config, - .destroy = mdp5_crtc_destroy, .page_flip = drm_atomic_helper_page_flip, .reset = mdp5_crtc_reset, .atomic_duplicate_state = mdp5_crtc_duplicate_state, @@ -1327,10 +1323,16 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, { struct drm_crtc *crtc = NULL; struct mdp5_crtc *mdp5_crtc; + int ret; - mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL); - if (!mdp5_crtc) - return ERR_PTR(-ENOMEM); + mdp5_crtc = drmm_crtc_alloc_with_planes(dev, struct mdp5_crtc, base, + plane, cursor_plane, + cursor_plane ? + &mdp5_crtc_no_lm_cursor_funcs : + &mdp5_crtc_funcs, + NULL); + if (IS_ERR(mdp5_crtc)) + return ERR_CAST(mdp5_crtc); crtc = &mdp5_crtc->base; @@ -1346,13 +1348,11 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true; - drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane, - cursor_plane ? - &mdp5_crtc_no_lm_cursor_funcs : - &mdp5_crtc_funcs, NULL); - drm_flip_work_init(&mdp5_crtc->unref_cursor_work, "unref cursor", unref_cursor_worker); + ret = drmm_add_action_or_reset(dev, mdp5_crtc_flip_cleanup, mdp5_crtc); + if (ret) + return ERR_PTR(ret); drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c index 1220f2b20e..666de99a46 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c @@ -681,11 +681,6 @@ void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr) } } -void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr) -{ - kfree(ctl_mgr); -} - struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd) { @@ -697,18 +692,16 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, unsigned long flags; int c, ret; - ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL); + ctl_mgr = devm_kzalloc(dev->dev, sizeof(*ctl_mgr), GFP_KERNEL); if (!ctl_mgr) { DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n"); - ret = -ENOMEM; - goto fail; + return ERR_PTR(-ENOMEM); } if (WARN_ON(ctl_cfg->count > MAX_CTL)) { DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n", ctl_cfg->count); - ret = -ENOSPC; - goto fail; + return ERR_PTR(-ENOSPC); } /* initialize the CTL manager: */ @@ -727,7 +720,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c); ret = -EINVAL; spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); - goto fail; + return ERR_PTR(ret); } ctl->ctlm = ctl_mgr; ctl->id = c; @@ -755,10 +748,4 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, DBG("Pool of %d CTLs created.", ctl_mgr->nctl); return ctl_mgr; - -fail: - if (ctl_mgr) - mdp5_ctlm_destroy(ctl_mgr); - - return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h index c2af68aa77..9020e8efc4 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h @@ -17,7 +17,6 @@ struct mdp5_ctl_manager; struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd); void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm); -void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm); /* * CTL prototypes: diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c index 79d67c4957..8db97083e1 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c @@ -16,17 +16,6 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder) return to_mdp5_kms(to_mdp_kms(priv->kms)); } -static void mdp5_encoder_destroy(struct drm_encoder *encoder) -{ - struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); - drm_encoder_cleanup(encoder); - kfree(mdp5_encoder); -} - -static const struct drm_encoder_funcs mdp5_encoder_funcs = { - .destroy = mdp5_encoder_destroy, -}; - static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -342,13 +331,11 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, struct mdp5_encoder *mdp5_encoder; int enc_type = (intf->type == INTF_DSI) ? DRM_MODE_ENCODER_DSI : DRM_MODE_ENCODER_TMDS; - int ret; - mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL); - if (!mdp5_encoder) { - ret = -ENOMEM; - goto fail; - } + mdp5_encoder = drmm_encoder_alloc(dev, struct mdp5_encoder, base, + NULL, enc_type, NULL); + if (IS_ERR(mdp5_encoder)) + return ERR_CAST(mdp5_encoder); encoder = &mdp5_encoder->base; mdp5_encoder->ctl = ctl; @@ -356,15 +343,7 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, spin_lock_init(&mdp5_encoder->intf_lock); - drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type, NULL); - drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs); return encoder; - -fail: - if (encoder) - mdp5_encoder_destroy(encoder); - - return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index ec933d597e..0827634664 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -209,13 +209,6 @@ static void mdp5_kms_destroy(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); struct msm_gem_address_space *aspace = kms->aspace; - int i; - - for (i = 0; i < mdp5_kms->num_hwmixers; i++) - mdp5_mixer_destroy(mdp5_kms->hwmixers[i]); - - for (i = 0; i < mdp5_kms->num_hwpipes; i++) - mdp5_pipe_destroy(mdp5_kms->hwpipes[i]); if (aspace) { aspace->mmu->funcs->detach(aspace->mmu); @@ -623,18 +616,6 @@ fail: static void mdp5_destroy(struct mdp5_kms *mdp5_kms) { - int i; - - if (mdp5_kms->ctlm) - mdp5_ctlm_destroy(mdp5_kms->ctlm); - if (mdp5_kms->smp) - mdp5_smp_destroy(mdp5_kms->smp); - if (mdp5_kms->cfg) - mdp5_cfg_destroy(mdp5_kms->cfg); - - for (i = 0; i < mdp5_kms->num_intfs; i++) - kfree(mdp5_kms->intfs[i]); - if (mdp5_kms->rpm_enabled) pm_runtime_disable(&mdp5_kms->pdev->dev); @@ -652,7 +633,7 @@ static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt, for (i = 0; i < cnt; i++) { struct mdp5_hw_pipe *hwpipe; - hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps); + hwpipe = mdp5_pipe_init(dev, pipes[i], offsets[i], caps); if (IS_ERR(hwpipe)) { ret = PTR_ERR(hwpipe); DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n", @@ -724,7 +705,7 @@ static int hwmixer_init(struct mdp5_kms *mdp5_kms) for (i = 0; i < hw_cfg->lm.count; i++) { struct mdp5_hw_mixer *mixer; - mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]); + mixer = mdp5_mixer_init(dev, &hw_cfg->lm.instances[i]); if (IS_ERR(mixer)) { ret = PTR_ERR(mixer); DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n", @@ -755,7 +736,7 @@ static int interface_init(struct mdp5_kms *mdp5_kms) if (intf_types[i] == INTF_DISABLED) continue; - intf = kzalloc(sizeof(*intf), GFP_KERNEL); + intf = devm_kzalloc(dev->dev, sizeof(*intf), GFP_KERNEL); if (!intf) { DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i); return -ENOMEM; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c index 2536def2a0..2822b533f8 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c @@ -140,20 +140,16 @@ int mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer) return 0; } -void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer) -{ - kfree(mixer); -} - static const char * const mixer_names[] = { "LM0", "LM1", "LM2", "LM3", "LM4", "LM5", }; -struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm) +struct mdp5_hw_mixer *mdp5_mixer_init(struct drm_device *dev, + const struct mdp5_lm_instance *lm) { struct mdp5_hw_mixer *mixer; - mixer = kzalloc(sizeof(*mixer), GFP_KERNEL); + mixer = devm_kzalloc(dev->dev, sizeof(*mixer), GFP_KERNEL); if (!mixer) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h index 545ee223b9..2bedd75835 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h @@ -25,8 +25,8 @@ struct mdp5_hw_mixer_state { struct drm_crtc *hwmixer_to_crtc[8]; }; -struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm); -void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm); +struct mdp5_hw_mixer *mdp5_mixer_init(struct drm_device *dev, + const struct mdp5_lm_instance *lm); int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc, uint32_t caps, struct mdp5_hw_mixer **mixer, struct mdp5_hw_mixer **r_mixer); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c index e4b8a78983..99b2c30b1d 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c @@ -151,17 +151,13 @@ int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe) return 0; } -void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe) -{ - kfree(hwpipe); -} - -struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe, +struct mdp5_hw_pipe *mdp5_pipe_init(struct drm_device *dev, + enum mdp5_pipe pipe, uint32_t reg_offset, uint32_t caps) { struct mdp5_hw_pipe *hwpipe; - hwpipe = kzalloc(sizeof(*hwpipe), GFP_KERNEL); + hwpipe = devm_kzalloc(dev->dev, sizeof(*hwpipe), GFP_KERNEL); if (!hwpipe) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h index cca67938ca..452138821f 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h @@ -39,8 +39,8 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane, struct mdp5_hw_pipe **r_hwpipe); int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe); -struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe, +struct mdp5_hw_pipe *mdp5_pipe_init(struct drm_device *dev, + enum mdp5_pipe pipe, uint32_t reg_offset, uint32_t caps); -void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe); #endif /* __MDP5_PIPE_H__ */ diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c index b68682c1b5..8b59562e29 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c @@ -370,23 +370,17 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p) drm_modeset_unlock(&mdp5_kms->glob_state_lock); } -void mdp5_smp_destroy(struct mdp5_smp *smp) -{ - kfree(smp); -} struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg) { + struct drm_device *dev = mdp5_kms->dev; struct mdp5_smp_state *state; struct mdp5_global_state *global_state; struct mdp5_smp *smp; - int ret; - smp = kzalloc(sizeof(*smp), GFP_KERNEL); - if (unlikely(!smp)) { - ret = -ENOMEM; - goto fail; - } + smp = devm_kzalloc(dev->dev, sizeof(*smp), GFP_KERNEL); + if (unlikely(!smp)) + return ERR_PTR(-ENOMEM); smp->dev = mdp5_kms->dev; smp->blk_cnt = cfg->mmb_count; @@ -400,9 +394,4 @@ struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_ memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved)); return smp; -fail: - if (smp) - mdp5_smp_destroy(smp); - - return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h index ba5618e136..d8b6a11413 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h @@ -68,7 +68,6 @@ struct mdp5_smp; struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg); -void mdp5_smp_destroy(struct mdp5_smp *smp); void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p); diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c index 8e3b677f35..03f4951c49 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.c +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -291,6 +291,10 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, return -EINVAL; } + ret = pm_runtime_resume_and_get(dp_aux->dev); + if (ret) + return ret; + mutex_lock(&aux->mutex); if (!aux->initted) { ret = -EIO; @@ -364,6 +368,7 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, exit: mutex_unlock(&aux->mutex); + pm_runtime_put_sync(dp_aux->dev); return ret; } @@ -474,7 +479,6 @@ void dp_aux_deinit(struct drm_dp_aux *dp_aux) int dp_aux_register(struct drm_dp_aux *dp_aux) { - struct dp_aux_private *aux; int ret; if (!dp_aux) { @@ -482,12 +486,7 @@ int dp_aux_register(struct drm_dp_aux *dp_aux) return -EINVAL; } - aux = container_of(dp_aux, struct dp_aux_private, dp_aux); - - aux->dp_aux.name = "dpu_dp_aux"; - aux->dp_aux.dev = aux->dev; - aux->dp_aux.transfer = dp_aux_transfer; - ret = drm_dp_aux_register(&aux->dp_aux); + ret = drm_dp_aux_register(dp_aux); if (ret) { DRM_ERROR("%s: failed to register drm aux: %d\n", __func__, ret); @@ -502,6 +501,21 @@ void dp_aux_unregister(struct drm_dp_aux *dp_aux) drm_dp_aux_unregister(dp_aux); } +static int dp_wait_hpd_asserted(struct drm_dp_aux *dp_aux, + unsigned long wait_us) +{ + int ret; + struct dp_aux_private *aux; + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + pm_runtime_get_sync(aux->dev); + ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog); + pm_runtime_put_sync(aux->dev); + + return ret; +} + struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog, bool is_edp) { @@ -525,6 +539,17 @@ struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog, aux->catalog = catalog; aux->retry_cnt = 0; + /* + * Use the drm_dp_aux_init() to use the aux adapter + * before registering AUX with the DRM device so that + * msm eDP panel can be detected by generic_dep_panel_probe(). + */ + aux->dp_aux.name = "dpu_dp_aux"; + aux->dp_aux.dev = dev; + aux->dp_aux.transfer = dp_aux_transfer; + aux->dp_aux.wait_hpd_asserted = dp_wait_hpd_asserted; + drm_dp_aux_init(&aux->dp_aux); + return &aux->dp_aux; } diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c index 3bba901afe..6c281dc095 100644 --- a/drivers/gpu/drm/msm/dp/dp_debug.c +++ b/drivers/gpu/drm/msm/dp/dp_debug.c @@ -19,13 +19,9 @@ #define DEBUG_NAME "msm_dp" struct dp_debug_private { - struct dentry *root; - struct dp_link *link; struct dp_panel *panel; struct drm_connector *connector; - struct device *dev; - struct drm_device *drm_dev; struct dp_debug dp_debug; }; @@ -204,35 +200,33 @@ static const struct file_operations test_active_fops = { .write = dp_test_active_write }; -static void dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor) +static void dp_debug_init(struct dp_debug *dp_debug, struct dentry *root, bool is_edp) { - char path[64]; struct dp_debug_private *debug = container_of(dp_debug, struct dp_debug_private, dp_debug); - snprintf(path, sizeof(path), "msm_dp-%s", debug->connector->name); - - debug->root = debugfs_create_dir(path, minor->debugfs_root); - - debugfs_create_file("dp_debug", 0444, debug->root, + debugfs_create_file("dp_debug", 0444, root, debug, &dp_debug_fops); - debugfs_create_file("msm_dp_test_active", 0444, - debug->root, - debug, &test_active_fops); + if (!is_edp) { + debugfs_create_file("msm_dp_test_active", 0444, + root, + debug, &test_active_fops); - debugfs_create_file("msm_dp_test_data", 0444, - debug->root, - debug, &dp_test_data_fops); + debugfs_create_file("msm_dp_test_data", 0444, + root, + debug, &dp_test_data_fops); - debugfs_create_file("msm_dp_test_type", 0444, - debug->root, - debug, &dp_test_type_fops); + debugfs_create_file("msm_dp_test_type", 0444, + root, + debug, &dp_test_type_fops); + } } struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, struct dp_link *link, - struct drm_connector *connector, struct drm_minor *minor) + struct drm_connector *connector, + struct dentry *root, bool is_edp) { struct dp_debug_private *debug; struct dp_debug *dp_debug; @@ -253,46 +247,15 @@ struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, debug->dp_debug.debug_en = false; debug->link = link; debug->panel = panel; - debug->dev = dev; - debug->drm_dev = minor->dev; - debug->connector = connector; dp_debug = &debug->dp_debug; dp_debug->vdisplay = 0; dp_debug->hdisplay = 0; dp_debug->vrefresh = 0; - dp_debug_init(dp_debug, minor); + dp_debug_init(dp_debug, root, is_edp); return dp_debug; error: return ERR_PTR(rc); } - -static int dp_debug_deinit(struct dp_debug *dp_debug) -{ - struct dp_debug_private *debug; - - if (!dp_debug) - return -EINVAL; - - debug = container_of(dp_debug, struct dp_debug_private, dp_debug); - - debugfs_remove_recursive(debug->root); - - return 0; -} - -void dp_debug_put(struct dp_debug *dp_debug) -{ - struct dp_debug_private *debug; - - if (!dp_debug) - return; - - debug = container_of(dp_debug, struct dp_debug_private, dp_debug); - - dp_debug_deinit(dp_debug); - - devm_kfree(debug->dev, debug); -} diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h index 124227873d..9b3b2e702f 100644 --- a/drivers/gpu/drm/msm/dp/dp_debug.h +++ b/drivers/gpu/drm/msm/dp/dp_debug.h @@ -34,7 +34,8 @@ struct dp_debug { * @panel: instance of panel module * @link: instance of link module * @connector: double pointer to display connector - * @minor: pointer to drm minor number after device registration + * @root: connector's debugfs root + * @is_edp: set for eDP connectors / panels * return: pointer to allocated debug module data * * This function sets up the debug module and provides a way @@ -43,31 +44,21 @@ struct dp_debug { struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, struct dp_link *link, struct drm_connector *connector, - struct drm_minor *minor); - -/** - * dp_debug_put() - * - * Cleans up dp_debug instance - * - * @dp_debug: instance of dp_debug - */ -void dp_debug_put(struct dp_debug *dp_debug); + struct dentry *root, + bool is_edp); #else static inline struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, struct dp_link *link, - struct drm_connector *connector, struct drm_minor *minor) + struct drm_connector *connector, + struct dentry *root, + bool is_edp) { return ERR_PTR(-EINVAL); } -static inline void dp_debug_put(struct dp_debug *dp_debug) -{ -} - #endif /* defined(CONFIG_DEBUG_FS) */ #endif /* _DP_DEBUG_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index 4f89c99395..78464c395c 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -49,13 +49,11 @@ enum { ST_CONNECTED, ST_DISCONNECT_PENDING, ST_DISPLAY_OFF, - ST_SUSPENDED, }; enum { EV_NO_EVENT, /* hpd events */ - EV_HPD_INIT_SETUP, EV_HPD_PLUG_INT, EV_IRQ_HPD_INT, EV_HPD_UNPLUG_INT, @@ -281,11 +279,6 @@ static int dp_display_bind(struct device *dev, struct device *master, dp->dp_display.drm_dev = drm; priv->dp[dp->id] = &dp->dp_display; - rc = dp->parser->parse(dp->parser); - if (rc) { - DRM_ERROR("device tree parsing failed\n"); - goto end; - } dp->drm_dev = drm; @@ -296,11 +289,6 @@ static int dp_display_bind(struct device *dev, struct device *master, goto end; } - rc = dp_power_client_init(dp->power); - if (rc) { - DRM_ERROR("Power client create failed\n"); - goto end; - } rc = dp_register_audio_driver(dev, dp->audio); if (rc) { @@ -325,15 +313,10 @@ static void dp_display_unbind(struct device *dev, struct device *master, struct dp_display_private *dp = dev_get_dp_display_private(dev); struct msm_drm_private *priv = dev_get_drvdata(master); - /* disable all HPD interrupts */ - if (dp->core_initialized) - dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false); - kthread_stop(dp->ev_tsk); of_dp_aux_depopulate_bus(dp->aux); - dp_power_client_deinit(dp->power); dp_unregister_audio_driver(dev, dp->audio); dp_aux_unregister(dp->aux); dp->drm_dev = NULL; @@ -357,12 +340,11 @@ static void dp_display_send_hpd_event(struct msm_dp *dp_display) drm_helper_hpd_irq_event(connector->dev); } - static int dp_display_send_hpd_notification(struct dp_display_private *dp, bool hpd) { - if ((hpd && dp->dp_display.is_connected) || - (!hpd && !dp->dp_display.is_connected)) { + if ((hpd && dp->dp_display.link_ready) || + (!hpd && !dp->dp_display.link_ready)) { drm_dbg_dp(dp->drm_dev, "HPD already %s\n", (hpd ? "on" : "off")); return 0; @@ -378,7 +360,7 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp, dp->panel->downstream_ports); } - dp->dp_display.is_connected = hpd; + dp->dp_display.link_ready = hpd; drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n", dp->dp_display.connector_type, hpd); @@ -581,6 +563,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) { u32 state; int ret; + struct platform_device *pdev = dp->dp_display.pdev; mutex_lock(&dp->event_mutex); @@ -588,7 +571,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n", dp->dp_display.connector_type, state); - if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) { + if (state == ST_DISPLAY_OFF) { mutex_unlock(&dp->event_mutex); return 0; } @@ -605,9 +588,17 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) return 0; } - ret = dp_display_usbpd_configure_cb(&dp->dp_display.pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret) { + DRM_ERROR("failed to pm_runtime_resume\n"); + mutex_unlock(&dp->event_mutex); + return ret; + } + + ret = dp_display_usbpd_configure_cb(&pdev->dev); if (ret) { /* link train failed */ dp->hpd_state = ST_DISCONNECTED; + pm_runtime_put_sync(&pdev->dev); } else { dp->hpd_state = ST_MAINLINK_READY; } @@ -637,6 +628,7 @@ static void dp_display_handle_plugged_change(struct msm_dp *dp_display, static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) { u32 state; + struct platform_device *pdev = dp->dp_display.pdev; mutex_lock(&dp->event_mutex); @@ -664,6 +656,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) dp_display_host_phy_exit(dp); dp->hpd_state = ST_DISCONNECTED; dp_display_notify_disconnect(&dp->dp_display.pdev->dev); + pm_runtime_put_sync(&pdev->dev); mutex_unlock(&dp->event_mutex); return 0; } @@ -687,6 +680,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) dp->dp_display.connector_type, state); /* uevent will complete disconnection part */ + pm_runtime_put_sync(&pdev->dev); mutex_unlock(&dp->event_mutex); return 0; } @@ -702,7 +696,7 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n", dp->dp_display.connector_type, state); - if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) { + if (state == ST_DISPLAY_OFF) { mutex_unlock(&dp->event_mutex); return 0; } @@ -726,7 +720,6 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) static void dp_display_deinit_sub_modules(struct dp_display_private *dp) { - dp_debug_put(dp->debug); dp_audio_put(dp->audio); dp_panel_put(dp->panel); dp_aux_put(dp->aux); @@ -924,7 +917,7 @@ int dp_display_set_plugged_cb(struct msm_dp *dp_display, dp_display->plugged_cb = fn; dp_display->codec_dev = codec_dev; - plugged = dp_display->is_connected; + plugged = dp_display->link_ready; dp_display_handle_plugged_change(dp_display, plugged); return 0; @@ -1114,9 +1107,6 @@ static int hpd_event_thread(void *data) spin_unlock_irqrestore(&dp_priv->event_lock, flag); switch (todo->event_id) { - case EV_HPD_INIT_SETUP: - dp_display_host_init(dp_priv); - break; case EV_HPD_PLUG_INT: dp_hpd_plug_handle(dp_priv, todo->data); break; @@ -1195,27 +1185,21 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id) return ret; } -int dp_display_request_irq(struct msm_dp *dp_display) +static int dp_display_request_irq(struct dp_display_private *dp) { int rc = 0; - struct dp_display_private *dp; + struct platform_device *pdev = dp->dp_display.pdev; - if (!dp_display) { - DRM_ERROR("invalid input\n"); - return -EINVAL; - } - - dp = container_of(dp_display, struct dp_display_private, dp_display); - - dp->irq = irq_of_parse_and_map(dp->dp_display.pdev->dev.of_node, 0); - if (!dp->irq) { + dp->irq = platform_get_irq(pdev, 0); + if (dp->irq < 0) { DRM_ERROR("failed to get irq\n"); - return -EINVAL; + return dp->irq; } - rc = devm_request_irq(dp_display->drm_dev->dev, dp->irq, - dp_display_irq_handler, - IRQF_TRIGGER_HIGH, "dp_display_isr", dp); + rc = devm_request_irq(&pdev->dev, dp->irq, dp_display_irq_handler, + IRQF_TRIGGER_HIGH|IRQF_NO_AUTOEN, + "dp_display_isr", dp); + if (rc < 0) { DRM_ERROR("failed to request IRQ%u: %d\n", dp->irq, rc); @@ -1244,6 +1228,29 @@ static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pde return NULL; } +static int dp_display_get_next_bridge(struct msm_dp *dp); + +static int dp_display_probe_tail(struct device *dev) +{ + struct msm_dp *dp = dev_get_drvdata(dev); + int ret; + + ret = dp_display_get_next_bridge(dp); + if (ret) + return ret; + + ret = component_add(dev, &dp_display_comp_ops); + if (ret) + DRM_ERROR("component add failed, rc=%d\n", ret); + + return ret; +} + +static int dp_auxbus_done_probe(struct drm_dp_aux *aux) +{ + return dp_display_probe_tail(aux->dev); +} + static int dp_display_probe(struct platform_device *pdev) { int rc = 0; @@ -1277,6 +1284,18 @@ static int dp_display_probe(struct platform_device *pdev) return -EPROBE_DEFER; } + rc = dp->parser->parse(dp->parser); + if (rc) { + DRM_ERROR("device tree parsing failed\n"); + goto err; + } + + rc = dp_power_client_init(dp->power); + if (rc) { + DRM_ERROR("Power client create failed\n"); + goto err; + } + /* setup event q */ mutex_init(&dp->event_mutex); init_waitqueue_head(&dp->event_q); @@ -1289,13 +1308,31 @@ static int dp_display_probe(struct platform_device *pdev) platform_set_drvdata(pdev, &dp->dp_display); - rc = component_add(&pdev->dev, &dp_display_comp_ops); - if (rc) { - DRM_ERROR("component add failed, rc=%d\n", rc); - dp_display_deinit_sub_modules(dp); + rc = devm_pm_runtime_enable(&pdev->dev); + if (rc) + goto err; + + rc = dp_display_request_irq(dp); + if (rc) + goto err; + + if (dp->dp_display.is_edp) { + rc = devm_of_dp_aux_populate_bus(dp->aux, dp_auxbus_done_probe); + if (rc) { + DRM_ERROR("eDP auxbus population failed, rc=%d\n", rc); + goto err; + } + } else { + rc = dp_display_probe_tail(&pdev->dev); + if (rc) + goto err; } return rc; + +err: + dp_display_deinit_sub_modules(dp); + return rc; } static void dp_display_remove(struct platform_device *pdev) @@ -1304,113 +1341,50 @@ static void dp_display_remove(struct platform_device *pdev) component_del(&pdev->dev, &dp_display_comp_ops); dp_display_deinit_sub_modules(dp); - platform_set_drvdata(pdev, NULL); } -static int dp_pm_resume(struct device *dev) +static int dp_pm_runtime_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct msm_dp *dp_display = platform_get_drvdata(pdev); - struct dp_display_private *dp; - int sink_count = 0; - - dp = container_of(dp_display, struct dp_display_private, dp_display); - - mutex_lock(&dp->event_mutex); - - drm_dbg_dp(dp->drm_dev, - "Before, type=%d core_inited=%d phy_inited=%d power_on=%d\n", - dp->dp_display.connector_type, dp->core_initialized, - dp->phy_initialized, dp_display->power_on); - - /* start from disconnected state */ - dp->hpd_state = ST_DISCONNECTED; - - /* turn on dp ctrl/phy */ - dp_display_host_init(dp); - - if (dp_display->is_edp) - dp_catalog_ctrl_hpd_enable(dp->catalog); + struct dp_display_private *dp = dev_get_dp_display_private(dev); - if (dp_catalog_link_is_connected(dp->catalog)) { - /* - * set sink to normal operation mode -- D0 - * before dpcd read - */ - dp_display_host_phy_init(dp); - dp_link_psm_config(dp->link, &dp->panel->link_info, false); - sink_count = drm_dp_read_sink_count(dp->aux); - if (sink_count < 0) - sink_count = 0; + disable_irq(dp->irq); + if (dp->dp_display.is_edp) { dp_display_host_phy_exit(dp); + dp_catalog_ctrl_hpd_disable(dp->catalog); } - - dp->link->sink_count = sink_count; - /* - * can not declared display is connected unless - * HDMI cable is plugged in and sink_count of - * dongle become 1 - * also only signal audio when disconnected - */ - if (dp->link->sink_count) { - dp->dp_display.is_connected = true; - } else { - dp->dp_display.is_connected = false; - dp_display_handle_plugged_change(dp_display, false); - } - - drm_dbg_dp(dp->drm_dev, - "After, type=%d sink=%d conn=%d core_init=%d phy_init=%d power=%d\n", - dp->dp_display.connector_type, dp->link->sink_count, - dp->dp_display.is_connected, dp->core_initialized, - dp->phy_initialized, dp_display->power_on); - - mutex_unlock(&dp->event_mutex); + dp_display_host_deinit(dp); return 0; } -static int dp_pm_suspend(struct device *dev) +static int dp_pm_runtime_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct msm_dp *dp_display = platform_get_drvdata(pdev); - struct dp_display_private *dp; - - dp = container_of(dp_display, struct dp_display_private, dp_display); - - mutex_lock(&dp->event_mutex); - - drm_dbg_dp(dp->drm_dev, - "Before, type=%d core_inited=%d phy_inited=%d power_on=%d\n", - dp->dp_display.connector_type, dp->core_initialized, - dp->phy_initialized, dp_display->power_on); - - /* mainlink enabled */ - if (dp_power_clk_status(dp->power, DP_CTRL_PM)) - dp_ctrl_off_link_stream(dp->ctrl); - - dp_display_host_phy_exit(dp); - - /* host_init will be called at pm_resume */ - dp_display_host_deinit(dp); - - dp->hpd_state = ST_SUSPENDED; - - drm_dbg_dp(dp->drm_dev, - "After, type=%d core_inited=%d phy_inited=%d power_on=%d\n", - dp->dp_display.connector_type, dp->core_initialized, - dp->phy_initialized, dp_display->power_on); + struct dp_display_private *dp = dev_get_dp_display_private(dev); - mutex_unlock(&dp->event_mutex); + /* + * for eDP, host cotroller, HPD block and PHY are enabled here + * but with HPD irq disabled + * + * for DP, only host controller is enabled here. + * HPD block is enabled at dp_bridge_hpd_enable() + * PHY will be enabled at plugin handler later + */ + dp_display_host_init(dp); + if (dp->dp_display.is_edp) { + dp_catalog_ctrl_hpd_enable(dp->catalog); + dp_display_host_phy_init(dp); + } + enable_irq(dp->irq); return 0; } static const struct dev_pm_ops dp_pm_ops = { - .suspend = dp_pm_suspend, - .resume = dp_pm_resume, + SET_RUNTIME_PM_OPS(dp_pm_runtime_suspend, dp_pm_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) }; static struct platform_driver dp_display_driver = { @@ -1440,19 +1414,6 @@ void __exit msm_dp_unregister(void) platform_driver_unregister(&dp_display_driver); } -void msm_dp_irq_postinstall(struct msm_dp *dp_display) -{ - struct dp_display_private *dp; - - if (!dp_display) - return; - - dp = container_of(dp_display, struct dp_display_private, dp_display); - - if (!dp_display->is_edp) - dp_add_event(dp, EV_HPD_INIT_SETUP, 0, 0); -} - bool msm_dp_wide_bus_available(const struct msm_dp *dp_display) { struct dp_display_private *dp; @@ -1462,7 +1423,7 @@ bool msm_dp_wide_bus_available(const struct msm_dp *dp_display) return dp->wide_bus_en; } -void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor) +void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *root, bool is_edp) { struct dp_display_private *dp; struct device *dev; @@ -1473,7 +1434,7 @@ void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor) dp->debug = dp_debug_get(dev, dp->panel, dp->link, dp->dp_display.connector, - minor); + root, is_edp); if (IS_ERR(dp->debug)) { rc = PTR_ERR(dp->debug); DRM_ERROR("failed to initialize debug, rc = %d\n", rc); @@ -1485,33 +1446,8 @@ static int dp_display_get_next_bridge(struct msm_dp *dp) { int rc; struct dp_display_private *dp_priv; - struct device_node *aux_bus; - struct device *dev; dp_priv = container_of(dp, struct dp_display_private, dp_display); - dev = &dp_priv->dp_display.pdev->dev; - aux_bus = of_get_child_by_name(dev->of_node, "aux-bus"); - - if (aux_bus && dp->is_edp) { - dp_display_host_init(dp_priv); - dp_catalog_ctrl_hpd_enable(dp_priv->catalog); - dp_display_host_phy_init(dp_priv); - - /* - * The code below assumes that the panel will finish probing - * by the time devm_of_dp_aux_populate_ep_devices() returns. - * This isn't a great assumption since it will fail if the - * panel driver is probed asynchronously but is the best we - * can do without a bigger driver reorganization. - */ - rc = of_dp_aux_populate_bus(dp_priv->aux, NULL); - of_node_put(aux_bus); - if (rc) - goto error; - } else if (dp->is_edp) { - DRM_ERROR("eDP aux_bus not found\n"); - return -ENODEV; - } /* * External bridges are mandatory for eDP interfaces: one has to @@ -1520,21 +1456,13 @@ static int dp_display_get_next_bridge(struct msm_dp *dp) * For DisplayPort interfaces external bridges are optional, so * silently ignore an error if one is not present (-ENODEV). */ - rc = devm_dp_parser_find_next_bridge(dp->drm_dev->dev, dp_priv->parser); + rc = devm_dp_parser_find_next_bridge(&dp->pdev->dev, dp_priv->parser); if (!dp->is_edp && rc == -ENODEV) return 0; - if (!rc) { + if (!rc) dp->next_bridge = dp_priv->parser->next_bridge; - return 0; - } -error: - if (dp->is_edp) { - of_dp_aux_depopulate_bus(dp_priv->aux); - dp_display_host_phy_exit(dp_priv); - dp_display_host_deinit(dp_priv); - } return rc; } @@ -1548,16 +1476,6 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, dp_priv = container_of(dp_display, struct dp_display_private, dp_display); - ret = dp_display_request_irq(dp_display); - if (ret) { - DRM_ERROR("request_irq failed, ret=%d\n", ret); - return ret; - } - - ret = dp_display_get_next_bridge(dp_display); - if (ret) - return ret; - ret = dp_bridge_init(dp_display, dev, encoder); if (ret) { DRM_DEV_ERROR(dev->dev, @@ -1599,6 +1517,11 @@ void dp_bridge_atomic_enable(struct drm_bridge *drm_bridge, dp_hpd_plug_handle(dp_display, 0); mutex_lock(&dp_display->event_mutex); + if (pm_runtime_resume_and_get(&dp->pdev->dev)) { + DRM_ERROR("failed to pm_runtime_resume\n"); + mutex_unlock(&dp_display->event_mutex); + return; + } state = dp_display->hpd_state; if (state != ST_DISPLAY_OFF && state != ST_MAINLINK_READY) { @@ -1663,10 +1586,9 @@ void dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, mutex_lock(&dp_display->event_mutex); state = dp_display->hpd_state; - if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED) { - mutex_unlock(&dp_display->event_mutex); - return; - } + if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED) + drm_dbg_dp(dp->drm_dev, "type=%d wrong hpd_state=%d\n", + dp->connector_type, state); dp_display_disable(dp_display); @@ -1679,6 +1601,8 @@ void dp_bridge_atomic_post_disable(struct drm_bridge *drm_bridge, } drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type); + + pm_runtime_put_sync(&dp->pdev->dev); mutex_unlock(&dp_display->event_mutex); } @@ -1717,7 +1641,21 @@ void dp_bridge_hpd_enable(struct drm_bridge *bridge) struct msm_dp *dp_display = dp_bridge->dp_display; struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display); + /* + * this is for external DP with hpd irq enabled case, + * step-1: dp_pm_runtime_resume() enable dp host only + * step-2: enable hdp block and have hpd irq enabled here + * step-3: waiting for plugin irq while phy is not initialized + * step-4: DP PHY is initialized at plugin handler before link training + * + */ mutex_lock(&dp->event_mutex); + if (pm_runtime_resume_and_get(&dp_display->pdev->dev)) { + DRM_ERROR("failed to resume power\n"); + mutex_unlock(&dp->event_mutex); + return; + } + dp_catalog_ctrl_hpd_enable(dp->catalog); /* enable HDP interrupts */ @@ -1739,6 +1677,8 @@ void dp_bridge_hpd_disable(struct drm_bridge *bridge) dp_catalog_ctrl_hpd_disable(dp->catalog); dp_display->internal_hpd = false; + + pm_runtime_put_sync(&dp_display->pdev->dev); mutex_unlock(&dp->event_mutex); } @@ -1753,13 +1693,8 @@ void dp_bridge_hpd_notify(struct drm_bridge *bridge, if (dp_display->internal_hpd) return; - if (!dp->core_initialized) { - drm_dbg_dp(dp->drm_dev, "not initialized\n"); - return; - } - - if (!dp_display->is_connected && status == connector_status_connected) + if (!dp_display->link_ready && status == connector_status_connected) dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0); - else if (dp_display->is_connected && status == connector_status_disconnected) + else if (dp_display->link_ready && status == connector_status_disconnected) dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); } diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h index f66cdbc357..102f3507d8 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.h +++ b/drivers/gpu/drm/msm/dp/dp_display.h @@ -17,7 +17,7 @@ struct msm_dp { struct drm_bridge *bridge; struct drm_connector *connector; struct drm_bridge *next_bridge; - bool is_connected; + bool link_ready; bool audio_enabled; bool power_on; unsigned int connector_type; @@ -36,11 +36,11 @@ struct msm_dp { int dp_display_set_plugged_cb(struct msm_dp *dp_display, hdmi_codec_plugged_cb fn, struct device *codec_dev); int dp_display_get_modes(struct msm_dp *dp_display); -int dp_display_request_irq(struct msm_dp *dp_display); bool dp_display_check_video_test(struct msm_dp *dp_display); int dp_display_get_test_bpp(struct msm_dp *dp_display); void dp_display_signal_audio_start(struct msm_dp *dp_display); void dp_display_signal_audio_complete(struct msm_dp *dp_display); void dp_display_set_psr(struct msm_dp *dp, bool enter); +void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *dentry, bool is_edp); #endif /* _DP_DISPLAY_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c index e3bdd7dd4c..46e6889037 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.c +++ b/drivers/gpu/drm/msm/dp/dp_drm.c @@ -24,10 +24,10 @@ static enum drm_connector_status dp_bridge_detect(struct drm_bridge *bridge) dp = to_dp_bridge(bridge)->dp_display; - drm_dbg_dp(dp->drm_dev, "is_connected = %s\n", - (dp->is_connected) ? "true" : "false"); + drm_dbg_dp(dp->drm_dev, "link_ready = %s\n", + (dp->link_ready) ? "true" : "false"); - return (dp->is_connected) ? connector_status_connected : + return (dp->link_ready) ? connector_status_connected : connector_status_disconnected; } @@ -40,8 +40,8 @@ static int dp_bridge_atomic_check(struct drm_bridge *bridge, dp = to_dp_bridge(bridge)->dp_display; - drm_dbg_dp(dp->drm_dev, "is_connected = %s\n", - (dp->is_connected) ? "true" : "false"); + drm_dbg_dp(dp->drm_dev, "link_ready = %s\n", + (dp->link_ready) ? "true" : "false"); /* * There is no protection in the DRM framework to check if the display @@ -55,7 +55,7 @@ static int dp_bridge_atomic_check(struct drm_bridge *bridge, * After that this piece of code can be removed. */ if (bridge->ops & DRM_BRIDGE_OP_HPD) - return (dp->is_connected) ? 0 : -ENOTCONN; + return (dp->link_ready) ? 0 : -ENOTCONN; return 0; } @@ -78,7 +78,7 @@ static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector * dp = to_dp_bridge(bridge)->dp_display; /* pluggable case assumes EDID is read when HPD */ - if (dp->is_connected) { + if (dp->link_ready) { rc = dp_display_get_modes(dp); if (rc <= 0) { DRM_ERROR("failed to get DP sink modes, rc=%d\n", rc); @@ -90,6 +90,13 @@ static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector * return rc; } +static void dp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root) +{ + struct msm_dp *dp = to_dp_bridge(bridge)->dp_display; + + dp_display_debugfs_init(dp, root, false); +} + static const struct drm_bridge_funcs dp_bridge_ops = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, @@ -105,6 +112,7 @@ static const struct drm_bridge_funcs dp_bridge_ops = { .hpd_enable = dp_bridge_hpd_enable, .hpd_disable = dp_bridge_hpd_disable, .hpd_notify = dp_bridge_hpd_notify, + .debugfs_init = dp_bridge_debugfs_init, }; static int edp_bridge_atomic_check(struct drm_bridge *drm_bridge, @@ -260,6 +268,13 @@ static enum drm_mode_status edp_bridge_mode_valid(struct drm_bridge *bridge, return MODE_OK; } +static void edp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root) +{ + struct msm_dp *dp = to_dp_bridge(bridge)->dp_display; + + dp_display_debugfs_init(dp, root, true); +} + static const struct drm_bridge_funcs edp_bridge_ops = { .atomic_enable = edp_bridge_atomic_enable, .atomic_disable = edp_bridge_atomic_disable, @@ -270,6 +285,7 @@ static const struct drm_bridge_funcs edp_bridge_ops = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_check = edp_bridge_atomic_check, + .debugfs_init = edp_bridge_debugfs_init, }; int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c index 5cb84ca40e..c4843dd69f 100644 --- a/drivers/gpu/drm/msm/dp/dp_power.c +++ b/drivers/gpu/drm/msm/dp/dp_power.c @@ -152,45 +152,17 @@ int dp_power_client_init(struct dp_power *dp_power) power = container_of(dp_power, struct dp_power_private, dp_power); - pm_runtime_enable(power->dev); - return dp_power_clk_init(power); } -void dp_power_client_deinit(struct dp_power *dp_power) -{ - struct dp_power_private *power; - - power = container_of(dp_power, struct dp_power_private, dp_power); - - pm_runtime_disable(power->dev); -} - int dp_power_init(struct dp_power *dp_power) { - int rc = 0; - struct dp_power_private *power = NULL; - - power = container_of(dp_power, struct dp_power_private, dp_power); - - pm_runtime_get_sync(power->dev); - - rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true); - if (rc) - pm_runtime_put_sync(power->dev); - - return rc; + return dp_power_clk_enable(dp_power, DP_CORE_PM, true); } int dp_power_deinit(struct dp_power *dp_power) { - struct dp_power_private *power; - - power = container_of(dp_power, struct dp_power_private, dp_power); - - dp_power_clk_enable(dp_power, DP_CORE_PM, false); - pm_runtime_put_sync(power->dev); - return 0; + return dp_power_clk_enable(dp_power, DP_CORE_PM, false); } struct dp_power *dp_power_get(struct device *dev, struct dp_parser *parser) diff --git a/drivers/gpu/drm/msm/dp/dp_power.h b/drivers/gpu/drm/msm/dp/dp_power.h index a3dec20078..55ada51edb 100644 --- a/drivers/gpu/drm/msm/dp/dp_power.h +++ b/drivers/gpu/drm/msm/dp/dp_power.h @@ -80,17 +80,6 @@ int dp_power_clk_enable(struct dp_power *power, enum dp_pm_type pm_type, */ int dp_power_client_init(struct dp_power *power); -/** - * dp_power_clinet_deinit() - de-initialize clock and regulator modules - * - * @power: instance of power module - * return: 0 for success, error for failure. - * - * This API will de-initialize the DisplayPort's clocks and regulator - * modules. - */ -void dp_power_client_deinit(struct dp_power *power); - /** * dp_power_get() - configure and get the DisplayPort power module data * diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c index 1f98ff74ce..10ba7d153d 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -190,6 +190,21 @@ static const struct msm_dsi_config sm8550_dsi_cfg = { }, }; +static const struct regulator_bulk_data sm8650_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 16600 }, /* 1.2 V */ +}; + +static const struct msm_dsi_config sm8650_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = sm8650_dsi_regulators, + .num_regulators = ARRAY_SIZE(sm8650_dsi_regulators), + .bus_clk_names = dsi_v2_4_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_v2_4_clk_names), + .io_start = { + { 0xae94000, 0xae96000 }, + }, +}; + static const struct regulator_bulk_data sc7280_dsi_regulators[] = { { .supply = "vdda", .init_load_uA = 8350 }, /* 1.2 V */ { .supply = "refgen" }, @@ -281,6 +296,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_7_0, &sm8550_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_8_0, + &sm8650_dsi_cfg, &msm_dsi_6g_v2_host_ops}, }; const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor) diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h index 43f0dd74ed..4c9b4b3768 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h @@ -28,6 +28,7 @@ #define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000 #define MSM_DSI_6G_VER_MINOR_V2_6_0 0x20060000 #define MSM_DSI_6G_VER_MINOR_V2_7_0 0x20070000 +#define MSM_DSI_6G_VER_MINOR_V2_8_0 0x20080000 #define MSM_DSI_V2_VER_MINOR_8064 0x0 diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index e49ebd9f63..24a347fe29 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -587,6 +587,8 @@ static const struct of_device_id dsi_phy_dt_match[] = { .data = &dsi_phy_5nm_8450_cfgs }, { .compatible = "qcom,sm8550-dsi-phy-4nm", .data = &dsi_phy_4nm_8550_cfgs }, + { .compatible = "qcom,sm8650-dsi-phy-4nm", + .data = &dsi_phy_4nm_8650_cfgs }, #endif {} }; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index 8b640d1747..e4275d3ad5 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -62,6 +62,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8350_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8450_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs; struct msm_dsi_dphy_timing { u32 clk_zero; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c index 89a6344bc8..82d015aa2d 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -1121,6 +1121,10 @@ static const struct regulator_bulk_data dsi_phy_7nm_37750uA_regulators[] = { { .supply = "vdds", .init_load_uA = 37550 }, }; +static const struct regulator_bulk_data dsi_phy_7nm_98000uA_regulators[] = { + { .supply = "vdds", .init_load_uA = 98000 }, +}; + static const struct regulator_bulk_data dsi_phy_7nm_97800uA_regulators[] = { { .supply = "vdds", .init_load_uA = 97800 }, }; @@ -1281,3 +1285,26 @@ const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs = { .num_dsi_phy = 2, .quirks = DSI_PHY_7NM_QUIRK_V5_2, }; + +const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs = { + .has_phy_lane = true, + .regulator_data = dsi_phy_7nm_98000uA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_7nm_98000uA_regulators), + .ops = { + .enable = dsi_7nm_phy_enable, + .disable = dsi_7nm_phy_disable, + .pll_init = dsi_pll_7nm_init, + .save_pll_state = dsi_7nm_pll_save_state, + .restore_pll_state = dsi_7nm_pll_restore_state, + .set_continuous_clock = dsi_7nm_set_continuous_clock, + }, + .min_pll_rate = 600000000UL, +#ifdef CONFIG_64BIT + .max_pll_rate = 5000000000UL, +#else + .max_pll_rate = ULONG_MAX, +#endif + .io_start = { 0xae95000, 0xae97000 }, + .num_dsi_phy = 2, + .quirks = DSI_PHY_7NM_QUIRK_V5_2, +}; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c index de182c0048..7aa500d242 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c @@ -249,7 +249,6 @@ struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi) i2c->owner = THIS_MODULE; - i2c->class = I2C_CLASS_DDC; snprintf(i2c->name, sizeof(i2c->name), "msm hdmi i2c"); i2c->dev.parent = &hdmi->pdev->dev; i2c->algo = &msm_hdmi_i2c_algorithm; diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 04d304eed2..4494f6d1c7 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -304,36 +304,21 @@ int msm_debugfs_late_init(struct drm_device *dev) return ret; } -void msm_debugfs_init(struct drm_minor *minor) +static void msm_debugfs_gpu_init(struct drm_minor *minor) { struct drm_device *dev = minor->dev; struct msm_drm_private *priv = dev->dev_private; struct dentry *gpu_devfreq; - drm_debugfs_create_files(msm_debugfs_list, - ARRAY_SIZE(msm_debugfs_list), - minor->debugfs_root, minor); - debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root, dev, &msm_gpu_fops); - if (priv->kms) { - drm_debugfs_create_files(msm_kms_debugfs_list, - ARRAY_SIZE(msm_kms_debugfs_list), - minor->debugfs_root, minor); - debugfs_create_file("kms", S_IRUSR, minor->debugfs_root, - dev, &msm_kms_fops); - } - debugfs_create_u32("hangcheck_period_ms", 0600, minor->debugfs_root, &priv->hangcheck_period); debugfs_create_bool("disable_err_irq", 0600, minor->debugfs_root, &priv->disable_err_irq); - debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root, - dev, &shrink_fops); - gpu_devfreq = debugfs_create_dir("devfreq", minor->debugfs_root); debugfs_create_bool("idle_clamp",0600, gpu_devfreq, @@ -344,6 +329,30 @@ void msm_debugfs_init(struct drm_minor *minor) debugfs_create_u32("downdifferential",0600, gpu_devfreq, &priv->gpu_devfreq_config.downdifferential); +} + +void msm_debugfs_init(struct drm_minor *minor) +{ + struct drm_device *dev = minor->dev; + struct msm_drm_private *priv = dev->dev_private; + + drm_debugfs_create_files(msm_debugfs_list, + ARRAY_SIZE(msm_debugfs_list), + minor->debugfs_root, minor); + + if (priv->gpu_pdev) + msm_debugfs_gpu_init(minor); + + if (priv->kms) { + drm_debugfs_create_files(msm_kms_debugfs_list, + ARRAY_SIZE(msm_kms_debugfs_list), + minor->debugfs_root, minor); + debugfs_create_file("kms", S_IRUSR, minor->debugfs_root, + dev, &msm_kms_fops); + } + + debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root, + dev, &shrink_fops); if (priv->kms && priv->kms->funcs->debugfs_init) priv->kms->funcs->debugfs_init(priv->kms, minor); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 3f217b5782..50b65ffc24 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -37,9 +37,10 @@ * - 1.9.0 - Add MSM_SUBMIT_FENCE_SN_IN * - 1.10.0 - Add MSM_SUBMIT_BO_NO_IMPLICIT * - 1.11.0 - Add wait boost (MSM_WAIT_FENCE_BOOST, MSM_PREP_BOOST) + * - 1.12.0 - Add MSM_INFO_SET_METADATA and MSM_INFO_GET_METADATA */ #define MSM_VERSION_MAJOR 1 -#define MSM_VERSION_MINOR 10 +#define MSM_VERSION_MINOR 12 #define MSM_VERSION_PATCHLEVEL 0 static void msm_deinit_vram(struct drm_device *ddev); @@ -544,6 +545,85 @@ static int msm_ioctl_gem_info_set_iova(struct drm_device *dev, return msm_gem_set_iova(obj, ctx->aspace, iova); } +static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object *obj, + __user void *metadata, + u32 metadata_size) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + void *buf; + int ret; + + /* Impose a moderate upper bound on metadata size: */ + if (metadata_size > 128) { + return -EOVERFLOW; + } + + /* Use a temporary buf to keep copy_from_user() outside of gem obj lock: */ + buf = memdup_user(metadata, metadata_size); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + ret = msm_gem_lock_interruptible(obj); + if (ret) + goto out; + + msm_obj->metadata = + krealloc(msm_obj->metadata, metadata_size, GFP_KERNEL); + msm_obj->metadata_size = metadata_size; + memcpy(msm_obj->metadata, buf, metadata_size); + + msm_gem_unlock(obj); + +out: + kfree(buf); + + return ret; +} + +static int msm_ioctl_gem_info_get_metadata(struct drm_gem_object *obj, + __user void *metadata, + u32 *metadata_size) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + void *buf; + int ret, len; + + if (!metadata) { + /* + * Querying the size is inherently racey, but + * EXT_external_objects expects the app to confirm + * via device and driver UUIDs that the exporter and + * importer versions match. All we can do from the + * kernel side is check the length under obj lock + * when userspace tries to retrieve the metadata + */ + *metadata_size = msm_obj->metadata_size; + return 0; + } + + ret = msm_gem_lock_interruptible(obj); + if (ret) + return ret; + + /* Avoid copy_to_user() under gem obj lock: */ + len = msm_obj->metadata_size; + buf = kmemdup(msm_obj->metadata, len, GFP_KERNEL); + + msm_gem_unlock(obj); + + if (*metadata_size < len) { + ret = -ETOOSMALL; + } else if (copy_to_user(metadata, buf, len)) { + ret = -EFAULT; + } else { + *metadata_size = len; + } + + kfree(buf); + + return 0; +} + static int msm_ioctl_gem_info(struct drm_device *dev, void *data, struct drm_file *file) { @@ -566,6 +646,8 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, break; case MSM_INFO_SET_NAME: case MSM_INFO_GET_NAME: + case MSM_INFO_SET_METADATA: + case MSM_INFO_GET_METADATA: break; default: return -EINVAL; @@ -618,7 +700,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, break; case MSM_INFO_GET_NAME: if (args->value && (args->len < strlen(msm_obj->name))) { - ret = -EINVAL; + ret = -ETOOSMALL; break; } args->len = strlen(msm_obj->name); @@ -628,6 +710,14 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, ret = -EFAULT; } break; + case MSM_INFO_SET_METADATA: + ret = msm_ioctl_gem_info_set_metadata( + obj, u64_to_user_ptr(args->value), args->len); + break; + case MSM_INFO_GET_METADATA: + ret = msm_ioctl_gem_info_get_metadata( + obj, u64_to_user_ptr(args->value), &args->len); + break; } drm_gem_object_put(obj); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index cd5bf658df..16a7cbc0b7 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -78,12 +78,10 @@ enum msm_dsi_controller { * enum msm_event_wait - type of HW events to wait for * @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW * @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel - * @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters) */ enum msm_event_wait { MSM_ENC_COMMIT_DONE = 0, MSM_ENC_TX_COMPLETE, - MSM_ENC_VBLANK, }; /** @@ -92,12 +90,14 @@ enum msm_event_wait { * @num_intf: number of interfaces the panel is mounted on * @num_dspp: number of dspp blocks used * @num_dsc: number of Display Stream Compression (DSC) blocks used + * @needs_cdm: indicates whether cdm block is needed for this display topology */ struct msm_display_topology { u32 num_lm; u32 num_intf; u32 num_dspp; u32 num_dsc; + bool needs_cdm; }; /* Commit/Event thread specific structure */ @@ -386,10 +386,8 @@ int __init msm_dp_register(void); void __exit msm_dp_unregister(void); int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, struct drm_encoder *encoder); -void msm_dp_irq_postinstall(struct msm_dp *dp_display); void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display); -void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor); bool msm_dp_wide_bus_available(const struct msm_dp *dp_display); #else @@ -407,19 +405,10 @@ static inline int msm_dp_modeset_init(struct msm_dp *dp_display, return -EINVAL; } -static inline void msm_dp_irq_postinstall(struct msm_dp *dp_display) -{ -} - static inline void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display) { } -static inline void msm_dp_debugfs_init(struct msm_dp *dp_display, - struct drm_minor *minor) -{ -} - static inline bool msm_dp_wide_bus_available(const struct msm_dp *dp_display) { return false; diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index e3f61c39df..80166f702a 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -89,7 +89,7 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, for (i = 0; i < n; i++) { ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &msm_fb->iova[i]); - drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)", + drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)\n", fb->base.id, i, msm_fb->iova[i], ret); if (ret) return ret; @@ -176,7 +176,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, const struct msm_format *format; int ret, i, n; - drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%4.4s)", + drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%4.4s)\n", mode_cmd, mode_cmd->width, mode_cmd->height, (char *)&mode_cmd->pixel_format); @@ -232,7 +232,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, refcount_set(&msm_fb->dirtyfb, 1); - drm_dbg_state(dev, "create: FB ID: %d (%p)", fb->base.id, fb); + drm_dbg_state(dev, "create: FB ID: %d (%p)\n", fb->base.id, fb); return fb; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index db1e748daa..175ee4ab8a 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -226,9 +226,9 @@ static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj, msm_gem_assert_locked(obj); - if (GEM_WARN_ON(msm_obj->madv > madv)) { - DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n", - msm_obj->madv, madv); + if (msm_obj->madv > madv) { + DRM_DEV_DEBUG_DRIVER(obj->dev->dev, "Invalid madv state: %u vs %u\n", + msm_obj->madv, madv); return ERR_PTR(-EBUSY); } @@ -1058,6 +1058,7 @@ static void msm_gem_free_object(struct drm_gem_object *obj) drm_gem_object_release(obj); + kfree(msm_obj->metadata); kfree(msm_obj); } diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 8ddef54431..8d414b072c 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -9,6 +9,7 @@ #include #include +#include "drm/drm_exec.h" #include "drm/gpu_scheduler.h" #include "msm_drv.h" @@ -108,6 +109,10 @@ struct msm_gem_object { char name[32]; /* Identifier to print for the debugfs files */ + /* userspace metadata backchannel */ + void *metadata; + u32 metadata_size; + /** * pin_count: Number of times the pages are pinned * @@ -254,7 +259,7 @@ struct msm_gem_submit { struct msm_gpu *gpu; struct msm_gem_address_space *aspace; struct list_head node; /* node in ring submit list */ - struct ww_acquire_ctx ticket; + struct drm_exec exec; uint32_t seqno; /* Sequence number of the submit on the ring */ /* Hw fence, which is created when the scheduler executes the job, and @@ -270,9 +275,9 @@ struct msm_gem_submit { int fence_id; /* key into queue->fence_idr */ struct msm_gpu_submitqueue *queue; struct pid *pid; /* submitting process */ - bool fault_dumped; /* Limit devcoredump dumping to one per submit */ - bool valid; /* true if no cmdstream patching needed */ - bool in_rb; /* "sudo" mode, copy cmds into RB */ + bool bos_pinned : 1; + bool fault_dumped:1;/* Limit devcoredump dumping to one per submit */ + bool in_rb : 1; /* "sudo" mode, copy cmds into RB */ struct msm_ringbuffer *ring; unsigned int nr_cmds; unsigned int nr_bos; @@ -287,10 +292,6 @@ struct msm_gem_submit { struct drm_msm_gem_submit_reloc *relocs; } *cmd; /* array of size nr_cmds */ struct { -/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */ -#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */ -#define BO_LOCKED 0x4000 /* obj lock is held */ -#define BO_PINNED 0x2000 /* obj (pages) is pinned and on active list */ uint32_t flags; union { struct drm_gem_object *obj; diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 5a7d48c02c..07ca4ddfe4 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -75,7 +75,7 @@ static bool wait_for_idle(struct drm_gem_object *obj) { enum dma_resv_usage usage = dma_resv_usage_rw(true); - return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0; + return dma_resv_wait_timeout(obj->resv, usage, false, 10) > 0; } static bool diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 99744de6c0..fba7819312 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -17,6 +17,12 @@ #include "msm_gem.h" #include "msm_gpu_trace.h" +/* For userspace errors, use DRM_UT_DRIVER.. so that userspace can enable + * error msgs for debugging, but we don't spam dmesg by default + */ +#define SUBMIT_ERROR(submit, fmt, ...) \ + DRM_DEV_DEBUG_DRIVER((submit)->dev->dev, fmt, ##__VA_ARGS__) + /* * Cmdstream submission: */ @@ -37,7 +43,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, if (sz > SIZE_MAX) return ERR_PTR(-ENOMEM); - submit = kzalloc(sz, GFP_KERNEL); + submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN); if (!submit) return ERR_PTR(-ENOMEM); @@ -48,7 +54,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, return ERR_PTR(ret); } - ret = drm_sched_job_init(&submit->base, queue->entity, queue); + ret = drm_sched_job_init(&submit->base, queue->entity, 1, queue); if (ret) { kfree(submit->hw_fence); kfree(submit); @@ -136,7 +142,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) || !(submit_bo.flags & MANDATORY_FLAGS)) { - DRM_ERROR("invalid flags: %x\n", submit_bo.flags); + SUBMIT_ERROR(submit, "invalid flags: %x\n", submit_bo.flags); ret = -EINVAL; i = 0; goto out; @@ -144,8 +150,6 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, submit->bos[i].handle = submit_bo.handle; submit->bos[i].flags = submit_bo.flags; - /* in validate_objects() we figure out if this is true: */ - submit->bos[i].iova = submit_bo.presumed; } spin_lock(&file->table_lock); @@ -158,7 +162,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, */ obj = idr_find(&file->object_idr, submit->bos[i].handle); if (!obj) { - DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i); + SUBMIT_ERROR(submit, "invalid handle %u at index %u\n", submit->bos[i].handle, i); ret = -EINVAL; goto out_unlock; } @@ -202,13 +206,13 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit, case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: break; default: - DRM_ERROR("invalid type: %08x\n", submit_cmd.type); + SUBMIT_ERROR(submit, "invalid type: %08x\n", submit_cmd.type); return -EINVAL; } if (submit_cmd.size % 4) { - DRM_ERROR("non-aligned cmdstream buffer size: %u\n", - submit_cmd.size); + SUBMIT_ERROR(submit, "non-aligned cmdstream buffer size: %u\n", + submit_cmd.size); ret = -EINVAL; goto out; } @@ -228,7 +232,7 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit, ret = -ENOMEM; goto out; } - submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL); + submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN); if (!submit->cmd[i].relocs) { ret = -ENOMEM; goto out; @@ -244,101 +248,30 @@ out: return ret; } -/* Unwind bo state, according to cleanup_flags. In the success case, only - * the lock is dropped at the end of the submit (and active/pin ref is dropped - * later when the submit is retired). - */ -static void submit_cleanup_bo(struct msm_gem_submit *submit, int i, - unsigned cleanup_flags) -{ - struct drm_gem_object *obj = submit->bos[i].obj; - unsigned flags = submit->bos[i].flags & cleanup_flags; - - /* - * Clear flags bit before dropping lock, so that the msm_job_run() - * path isn't racing with submit_cleanup() (ie. the read/modify/ - * write is protected by the obj lock in all paths) - */ - submit->bos[i].flags &= ~cleanup_flags; - - if (flags & BO_PINNED) - msm_gem_unpin_locked(obj); - - if (flags & BO_LOCKED) - dma_resv_unlock(obj->resv); -} - -static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i) -{ - unsigned cleanup_flags = BO_PINNED | BO_LOCKED; - submit_cleanup_bo(submit, i, cleanup_flags); - - if (!(submit->bos[i].flags & BO_VALID)) - submit->bos[i].iova = 0; -} - /* This is where we make sure all the bo's are reserved and pin'd: */ static int submit_lock_objects(struct msm_gem_submit *submit) { - int contended, slow_locked = -1, i, ret = 0; - -retry: - for (i = 0; i < submit->nr_bos; i++) { - struct drm_gem_object *obj = submit->bos[i].obj; - - if (slow_locked == i) - slow_locked = -1; + int ret; - contended = i; + drm_exec_init(&submit->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, submit->nr_bos); - if (!(submit->bos[i].flags & BO_LOCKED)) { - ret = dma_resv_lock_interruptible(obj->resv, - &submit->ticket); + drm_exec_until_all_locked (&submit->exec) { + for (unsigned i = 0; i < submit->nr_bos; i++) { + struct drm_gem_object *obj = submit->bos[i].obj; + ret = drm_exec_prepare_obj(&submit->exec, obj, 1); + drm_exec_retry_on_contention(&submit->exec); if (ret) - goto fail; - submit->bos[i].flags |= BO_LOCKED; + goto error; } } - ww_acquire_done(&submit->ticket); - return 0; -fail: - if (ret == -EALREADY) { - DRM_ERROR("handle %u at index %u already on submit list\n", - submit->bos[i].handle, i); - ret = -EINVAL; - } - - for (; i >= 0; i--) - submit_unlock_unpin_bo(submit, i); - - if (slow_locked > 0) - submit_unlock_unpin_bo(submit, slow_locked); - - if (ret == -EDEADLK) { - struct drm_gem_object *obj = submit->bos[contended].obj; - /* we lost out in a seqno race, lock and retry.. */ - ret = dma_resv_lock_slow_interruptible(obj->resv, - &submit->ticket); - if (!ret) { - submit->bos[contended].flags |= BO_LOCKED; - slow_locked = contended; - goto retry; - } - - /* Not expecting -EALREADY here, if the bo was already - * locked, we should have gotten -EALREADY already from - * the dma_resv_lock_interruptable() call. - */ - WARN_ON_ONCE(ret == -EALREADY); - } - +error: return ret; } -static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit) +static int submit_fence_sync(struct msm_gem_submit *submit) { int i, ret = 0; @@ -346,22 +279,6 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit) struct drm_gem_object *obj = submit->bos[i].obj; bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; - /* NOTE: _reserve_shared() must happen before - * _add_shared_fence(), which makes this a slightly - * strange place to call it. OTOH this is a - * convenient can-fail point to hook it in. - */ - ret = dma_resv_reserve_fences(obj->resv, 1); - if (ret) - return ret; - - /* If userspace has determined that explicit fencing is - * used, it can disable implicit sync on the entire - * submit: - */ - if (no_implicit) - continue; - /* Otherwise userspace can ask for implicit sync to be * disabled on specific buffers. This is useful for internal * usermode driver managed buffers, suballocation, etc. @@ -384,8 +301,6 @@ static int submit_pin_objects(struct msm_gem_submit *submit) struct msm_drm_private *priv = submit->dev->dev_private; int i, ret = 0; - submit->valid = true; - for (i = 0; i < submit->nr_bos; i++) { struct drm_gem_object *obj = submit->bos[i].obj; struct msm_gem_vma *vma; @@ -401,14 +316,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit) if (ret) break; - if (vma->iova == submit->bos[i].iova) { - submit->bos[i].flags |= BO_VALID; - } else { - submit->bos[i].iova = vma->iova; - /* iova changed, so address in cmdstream is not valid: */ - submit->bos[i].flags &= ~BO_VALID; - submit->valid = false; - } + submit->bos[i].iova = vma->iova; } /* @@ -421,13 +329,28 @@ static int submit_pin_objects(struct msm_gem_submit *submit) mutex_lock(&priv->lru.lock); for (i = 0; i < submit->nr_bos; i++) { msm_gem_pin_obj_locked(submit->bos[i].obj); - submit->bos[i].flags |= BO_PINNED; } mutex_unlock(&priv->lru.lock); + submit->bos_pinned = true; + return ret; } +static void submit_unpin_objects(struct msm_gem_submit *submit) +{ + if (!submit->bos_pinned) + return; + + for (int i = 0; i < submit->nr_bos; i++) { + struct drm_gem_object *obj = submit->bos[i].obj; + + msm_gem_unpin_locked(obj); + } + + submit->bos_pinned = false; +} + static void submit_attach_object_fences(struct msm_gem_submit *submit) { int i; @@ -445,11 +368,11 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit) } static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, - struct drm_gem_object **obj, uint64_t *iova, bool *valid) + struct drm_gem_object **obj, uint64_t *iova) { if (idx >= submit->nr_bos) { - DRM_ERROR("invalid buffer index: %u (out of %u)\n", - idx, submit->nr_bos); + SUBMIT_ERROR(submit, "invalid buffer index: %u (out of %u)\n", + idx, submit->nr_bos); return -EINVAL; } @@ -457,8 +380,6 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, *obj = submit->bos[idx].obj; if (iova) *iova = submit->bos[idx].iova; - if (valid) - *valid = !!(submit->bos[idx].flags & BO_VALID); return 0; } @@ -471,11 +392,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *ob uint32_t *ptr; int ret = 0; - if (!nr_relocs) - return 0; - if (offset % 4) { - DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset); + SUBMIT_ERROR(submit, "non-aligned cmdstream buffer: %u\n", offset); return -EINVAL; } @@ -494,11 +412,10 @@ static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *ob struct drm_msm_gem_submit_reloc submit_reloc = relocs[i]; uint32_t off; uint64_t iova; - bool valid; if (submit_reloc.submit_offset % 4) { - DRM_ERROR("non-aligned reloc offset: %u\n", - submit_reloc.submit_offset); + SUBMIT_ERROR(submit, "non-aligned reloc offset: %u\n", + submit_reloc.submit_offset); ret = -EINVAL; goto out; } @@ -508,18 +425,15 @@ static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *ob if ((off >= (obj->size / 4)) || (off < last_offset)) { - DRM_ERROR("invalid offset %u at reloc %u\n", off, i); + SUBMIT_ERROR(submit, "invalid offset %u at reloc %u\n", off, i); ret = -EINVAL; goto out; } - ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid); + ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova); if (ret) goto out; - if (valid) - continue; - iova += submit_reloc.reloc_offset; if (submit_reloc.shift < 0) @@ -544,18 +458,14 @@ out: */ static void submit_cleanup(struct msm_gem_submit *submit, bool error) { - unsigned cleanup_flags = BO_LOCKED; - unsigned i; - - if (error) - cleanup_flags |= BO_PINNED; - - for (i = 0; i < submit->nr_bos; i++) { - struct drm_gem_object *obj = submit->bos[i].obj; - submit_cleanup_bo(submit, i, cleanup_flags); - if (error) - drm_gem_object_put(obj); + if (error) { + submit_unpin_objects(submit); + /* job wasn't enqueued to scheduler, so early retirement: */ + msm_submit_retire(submit); } + + if (submit->exec.objects) + drm_exec_fini(&submit->exec); } void msm_submit_retire(struct msm_gem_submit *submit) @@ -749,7 +659,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct msm_submit_post_dep *post_deps = NULL; struct drm_syncobj **syncobjs_to_reset = NULL; int out_fence_fd = -1; - bool has_ww_ticket = false; unsigned i; int ret; @@ -855,15 +764,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, goto out; /* copy_*_user while holding a ww ticket upsets lockdep */ - ww_acquire_init(&submit->ticket, &reservation_ww_class); - has_ww_ticket = true; ret = submit_lock_objects(submit); if (ret) goto out; - ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT)); - if (ret) - goto out; + if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) { + ret = submit_fence_sync(submit); + if (ret) + goto out; + } ret = submit_pin_objects(submit); if (ret) @@ -873,32 +782,27 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_gem_object *obj; uint64_t iova; - ret = submit_bo(submit, submit->cmd[i].idx, - &obj, &iova, NULL); + ret = submit_bo(submit, submit->cmd[i].idx, &obj, &iova); if (ret) goto out; if (!submit->cmd[i].size || ((submit->cmd[i].size + submit->cmd[i].offset) > obj->size / 4)) { - DRM_ERROR("invalid cmdstream size: %u\n", submit->cmd[i].size * 4); + SUBMIT_ERROR(submit, "invalid cmdstream size: %u\n", submit->cmd[i].size * 4); ret = -EINVAL; goto out; } submit->cmd[i].iova = iova + (submit->cmd[i].offset * 4); - if (submit->valid) + if (likely(!submit->cmd[i].nr_relocs)) continue; if (!gpu->allow_relocs) { - if (submit->cmd[i].nr_relocs) { - DRM_ERROR("relocs not allowed\n"); - ret = -EINVAL; - goto out; - } - - continue; + SUBMIT_ERROR(submit, "relocs not allowed\n"); + ret = -EINVAL; + goto out; } ret = submit_reloc(submit, obj, submit->cmd[i].offset * 4, @@ -974,6 +878,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, } } + if (ret) + goto out; + submit_attach_object_fences(submit); /* The scheduler owns a ref now: */ @@ -993,8 +900,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, out: submit_cleanup(submit, !!ret); - if (has_ww_ticket) - ww_acquire_fini(&submit->ticket); out_unlock: mutex_unlock(&queue->lock); out_post_unlock: diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 5c10b559a5..655002b21b 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -292,8 +292,7 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, /* Set the active crash state to be dumped on failure */ gpu->crashstate = state; - /* FIXME: Release the crashstate if this errors out? */ - dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL, + dev_coredumpm(&gpu->pdev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL, msm_gpu_devcoredump_read, msm_gpu_devcoredump_free); } #else @@ -366,29 +365,31 @@ static void recover_worker(struct kthread_work *work) DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name); submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1); - if (submit) { - /* Increment the fault counts */ - submit->queue->faults++; - if (submit->aspace) - submit->aspace->faults++; - get_comm_cmdline(submit, &comm, &cmd); + /* + * If the submit retired while we were waiting for the worker to run, + * or waiting to acquire the gpu lock, then nothing more to do. + */ + if (!submit) + goto out_unlock; - if (comm && cmd) { - DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n", - gpu->name, comm, cmd); + /* Increment the fault counts */ + submit->queue->faults++; + if (submit->aspace) + submit->aspace->faults++; - msm_rd_dump_submit(priv->hangrd, submit, - "offending task: %s (%s)", comm, cmd); - } else { - msm_rd_dump_submit(priv->hangrd, submit, NULL); - } + get_comm_cmdline(submit, &comm, &cmd); + + if (comm && cmd) { + DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n", + gpu->name, comm, cmd); + + msm_rd_dump_submit(priv->hangrd, submit, + "offending task: %s (%s)", comm, cmd); } else { - /* - * We couldn't attribute this fault to any particular context, - * so increment the global fault count instead. - */ - gpu->global_faults++; + DRM_DEV_ERROR(dev->dev, "%s: offending task: unknown\n", gpu->name); + + msm_rd_dump_submit(priv->hangrd, submit, NULL); } /* Record the crash state */ @@ -441,6 +442,7 @@ static void recover_worker(struct kthread_work *work) pm_runtime_put(&gpu->pdev->dev); +out_unlock: mutex_unlock(&gpu->lock); msm_gpu_retire(gpu); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 4252e3839f..2bfcb222e3 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -347,7 +347,7 @@ struct msm_gpu_perfcntr { * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some * cases, so we don't use it (no need for kernel generated jobs). */ -#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN) +#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_LOW - DRM_SCHED_PRIORITY_HIGH) /** * struct msm_file_private - per-drm_file context diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c index 84c21ec2ce..af6a6fcb11 100644 --- a/drivers/gpu/drm/msm/msm_kms.c +++ b/drivers/gpu/drm/msm/msm_kms.c @@ -149,7 +149,7 @@ int msm_crtc_enable_vblank(struct drm_crtc *crtc) struct msm_kms *kms = priv->kms; if (!kms) return -ENXIO; - drm_dbg_vbl(dev, "crtc=%u", crtc->base.id); + drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id); return vblank_ctrl_queue_work(priv, crtc, true); } @@ -160,7 +160,7 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc) struct msm_kms *kms = priv->kms; if (!kms) return; - drm_dbg_vbl(dev, "crtc=%u", crtc->base.id); + drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id); vblank_ctrl_queue_work(priv, crtc, false); } diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c index 29bb38f0bb..35423d10aa 100644 --- a/drivers/gpu/drm/msm/msm_mdss.c +++ b/drivers/gpu/drm/msm/msm_mdss.c @@ -28,6 +28,8 @@ #define MIN_IB_BW 400000000UL /* Min ib vote 400MB */ +#define DEFAULT_REG_BW 153600 /* Used in mdss fbdev driver */ + struct msm_mdss { struct device *dev; @@ -40,8 +42,9 @@ struct msm_mdss { struct irq_domain *domain; } irq_controller; const struct msm_mdss_data *mdss_data; - struct icc_path *path[2]; - u32 num_paths; + struct icc_path *mdp_path[2]; + u32 num_mdp_paths; + struct icc_path *reg_bus_path; }; static int msm_mdss_parse_data_bus_icc_path(struct device *dev, @@ -49,38 +52,26 @@ static int msm_mdss_parse_data_bus_icc_path(struct device *dev, { struct icc_path *path0; struct icc_path *path1; + struct icc_path *reg_bus_path; - path0 = of_icc_get(dev, "mdp0-mem"); + path0 = devm_of_icc_get(dev, "mdp0-mem"); if (IS_ERR_OR_NULL(path0)) return PTR_ERR_OR_ZERO(path0); - msm_mdss->path[0] = path0; - msm_mdss->num_paths = 1; + msm_mdss->mdp_path[0] = path0; + msm_mdss->num_mdp_paths = 1; - path1 = of_icc_get(dev, "mdp1-mem"); + path1 = devm_of_icc_get(dev, "mdp1-mem"); if (!IS_ERR_OR_NULL(path1)) { - msm_mdss->path[1] = path1; - msm_mdss->num_paths++; + msm_mdss->mdp_path[1] = path1; + msm_mdss->num_mdp_paths++; } - return 0; -} - -static void msm_mdss_put_icc_path(void *data) -{ - struct msm_mdss *msm_mdss = data; - int i; - - for (i = 0; i < msm_mdss->num_paths; i++) - icc_put(msm_mdss->path[i]); -} + reg_bus_path = of_icc_get(dev, "cpu-cfg"); + if (!IS_ERR_OR_NULL(reg_bus_path)) + msm_mdss->reg_bus_path = reg_bus_path; -static void msm_mdss_icc_request_bw(struct msm_mdss *msm_mdss, unsigned long bw) -{ - int i; - - for (i = 0; i < msm_mdss->num_paths; i++) - icc_set_bw(msm_mdss->path[i], 0, Bps_to_icc(bw)); + return 0; } static void msm_mdss_irq(struct irq_desc *desc) @@ -236,14 +227,22 @@ const struct msm_mdss_data *msm_mdss_get_mdss_data(struct device *dev) static int msm_mdss_enable(struct msm_mdss *msm_mdss) { - int ret; + int ret, i; /* * Several components have AXI clocks that can only be turned on if * the interconnect is enabled (non-zero bandwidth). Let's make sure * that the interconnects are at least at a minimum amount. */ - msm_mdss_icc_request_bw(msm_mdss, MIN_IB_BW); + for (i = 0; i < msm_mdss->num_mdp_paths; i++) + icc_set_bw(msm_mdss->mdp_path[i], 0, Bps_to_icc(MIN_IB_BW)); + + if (msm_mdss->mdss_data && msm_mdss->mdss_data->reg_bus_bw) + icc_set_bw(msm_mdss->reg_bus_path, 0, + msm_mdss->mdss_data->reg_bus_bw); + else + icc_set_bw(msm_mdss->reg_bus_path, 0, + DEFAULT_REG_BW); ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks); if (ret) { @@ -295,8 +294,15 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss) static int msm_mdss_disable(struct msm_mdss *msm_mdss) { + int i; + clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks); - msm_mdss_icc_request_bw(msm_mdss, 0); + + for (i = 0; i < msm_mdss->num_mdp_paths; i++) + icc_set_bw(msm_mdss->mdp_path[i], 0, 0); + + if (msm_mdss->reg_bus_path) + icc_set_bw(msm_mdss->reg_bus_path, 0, 0); return 0; } @@ -384,6 +390,8 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5 if (!msm_mdss) return ERR_PTR(-ENOMEM); + msm_mdss->mdss_data = of_device_get_match_data(&pdev->dev); + msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss"); if (IS_ERR(msm_mdss->mmio)) return ERR_CAST(msm_mdss->mmio); @@ -391,9 +399,6 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5 dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio); ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss); - if (ret) - return ERR_PTR(ret); - ret = devm_add_action_or_reset(&pdev->dev, msm_mdss_put_icc_path, msm_mdss); if (ret) return ERR_PTR(ret); @@ -477,8 +482,6 @@ static int mdss_probe(struct platform_device *pdev) if (IS_ERR(mdss)) return PTR_ERR(mdss); - mdss->mdss_data = of_device_get_match_data(&pdev->dev); - platform_set_drvdata(pdev, mdss); /* @@ -510,11 +513,13 @@ static const struct msm_mdss_data msm8998_data = { .ubwc_enc_version = UBWC_1_0, .ubwc_dec_version = UBWC_1_0, .highest_bank_bit = 2, + .reg_bus_bw = 76800, }; static const struct msm_mdss_data qcm2290_data = { /* no UBWC */ .highest_bank_bit = 0x2, + .reg_bus_bw = 76800, }; static const struct msm_mdss_data sc7180_data = { @@ -522,6 +527,7 @@ static const struct msm_mdss_data sc7180_data = { .ubwc_dec_version = UBWC_2_0, .ubwc_static = 0x1e, .highest_bank_bit = 0x3, + .reg_bus_bw = 76800, }; static const struct msm_mdss_data sc7280_data = { @@ -531,6 +537,7 @@ static const struct msm_mdss_data sc7280_data = { .ubwc_static = 1, .highest_bank_bit = 1, .macrotile_mode = 1, + .reg_bus_bw = 74000, }; static const struct msm_mdss_data sc8180x_data = { @@ -538,6 +545,7 @@ static const struct msm_mdss_data sc8180x_data = { .ubwc_dec_version = UBWC_3_0, .highest_bank_bit = 3, .macrotile_mode = 1, + .reg_bus_bw = 76800, }; static const struct msm_mdss_data sc8280xp_data = { @@ -547,12 +555,21 @@ static const struct msm_mdss_data sc8280xp_data = { .ubwc_static = 1, .highest_bank_bit = 3, .macrotile_mode = 1, + .reg_bus_bw = 76800, +}; + +static const struct msm_mdss_data sdm670_data = { + .ubwc_enc_version = UBWC_2_0, + .ubwc_dec_version = UBWC_2_0, + .highest_bank_bit = 1, + .reg_bus_bw = 76800, }; static const struct msm_mdss_data sdm845_data = { .ubwc_enc_version = UBWC_2_0, .ubwc_dec_version = UBWC_2_0, .highest_bank_bit = 2, + .reg_bus_bw = 76800, }; static const struct msm_mdss_data sm6350_data = { @@ -561,12 +578,14 @@ static const struct msm_mdss_data sm6350_data = { .ubwc_swizzle = 6, .ubwc_static = 0x1e, .highest_bank_bit = 1, + .reg_bus_bw = 76800, }; static const struct msm_mdss_data sm8150_data = { .ubwc_enc_version = UBWC_3_0, .ubwc_dec_version = UBWC_3_0, .highest_bank_bit = 2, + .reg_bus_bw = 76800, }; static const struct msm_mdss_data sm6115_data = { @@ -575,6 +594,7 @@ static const struct msm_mdss_data sm6115_data = { .ubwc_swizzle = 7, .ubwc_static = 0x11f, .highest_bank_bit = 0x1, + .reg_bus_bw = 76800, }; static const struct msm_mdss_data sm6125_data = { @@ -592,6 +612,18 @@ static const struct msm_mdss_data sm8250_data = { /* TODO: highest_bank_bit = 2 for LP_DDR4 */ .highest_bank_bit = 3, .macrotile_mode = 1, + .reg_bus_bw = 76800, +}; + +static const struct msm_mdss_data sm8350_data = { + .ubwc_enc_version = UBWC_4_0, + .ubwc_dec_version = UBWC_4_0, + .ubwc_swizzle = 6, + .ubwc_static = 1, + /* TODO: highest_bank_bit = 2 for LP_DDR4 */ + .highest_bank_bit = 3, + .macrotile_mode = 1, + .reg_bus_bw = 74000, }; static const struct msm_mdss_data sm8550_data = { @@ -602,11 +634,13 @@ static const struct msm_mdss_data sm8550_data = { /* TODO: highest_bank_bit = 2 for LP_DDR4 */ .highest_bank_bit = 3, .macrotile_mode = 1, + .reg_bus_bw = 57000, }; static const struct of_device_id mdss_dt_match[] = { { .compatible = "qcom,mdss" }, { .compatible = "qcom,msm8998-mdss", .data = &msm8998_data }, { .compatible = "qcom,qcm2290-mdss", .data = &qcm2290_data }, + { .compatible = "qcom,sdm670-mdss", .data = &sdm670_data }, { .compatible = "qcom,sdm845-mdss", .data = &sdm845_data }, { .compatible = "qcom,sc7180-mdss", .data = &sc7180_data }, { .compatible = "qcom,sc7280-mdss", .data = &sc7280_data }, @@ -618,9 +652,10 @@ static const struct of_device_id mdss_dt_match[] = { { .compatible = "qcom,sm6375-mdss", .data = &sm6350_data }, { .compatible = "qcom,sm8150-mdss", .data = &sm8150_data }, { .compatible = "qcom,sm8250-mdss", .data = &sm8250_data }, - { .compatible = "qcom,sm8350-mdss", .data = &sm8250_data }, - { .compatible = "qcom,sm8450-mdss", .data = &sm8250_data }, + { .compatible = "qcom,sm8350-mdss", .data = &sm8350_data }, + { .compatible = "qcom,sm8450-mdss", .data = &sm8350_data }, { .compatible = "qcom,sm8550-mdss", .data = &sm8550_data }, + { .compatible = "qcom,sm8650-mdss", .data = &sm8550_data}, {} }; MODULE_DEVICE_TABLE(of, mdss_dt_match); diff --git a/drivers/gpu/drm/msm/msm_mdss.h b/drivers/gpu/drm/msm/msm_mdss.h index 02bbab42ad..3afef4b178 100644 --- a/drivers/gpu/drm/msm/msm_mdss.h +++ b/drivers/gpu/drm/msm/msm_mdss.h @@ -14,6 +14,7 @@ struct msm_mdss_data { u32 ubwc_static; u32 highest_bank_bit; u32 macrotile_mode; + u32 reg_bus_bw; }; #define UBWC_1_0 0x10000000 diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index 5adc51f7ab..ca44fd291c 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -270,6 +270,9 @@ int msm_rd_debugfs_init(struct drm_minor *minor) struct msm_rd_state *rd; int ret; + if (!priv->gpu_pdev) + return 0; + /* only create on first minor: */ if (priv->rd) return 0; diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index a7e152f659..9d6655f96f 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -27,9 +27,10 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job) struct drm_gem_object *obj = submit->bos[i].obj; msm_gem_unpin_active(obj); - submit->bos[i].flags &= ~BO_PINNED; } + submit->bos_pinned = false; + mutex_unlock(&priv->lru.lock); /* TODO move submit path over to using a per-ring lock.. */ @@ -97,7 +98,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, /* currently managing hangcheck ourselves: */ sched_timeout = MAX_SCHEDULE_TIMEOUT; - ret = drm_sched_init(&ring->sched, &msm_sched_ops, + ret = drm_sched_init(&ring->sched, &msm_sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT, num_hw_submissions, 0, sched_timeout, NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev); diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index 625c1bfc41..b483ef4821 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -11,9 +11,10 @@ #include #include #include +#include #include -#include #include +#include #include #include @@ -346,18 +347,13 @@ MODULE_DEVICE_TABLE(of, mxsfb_dt_ids); static int mxsfb_probe(struct platform_device *pdev) { struct drm_device *drm; - const struct of_device_id *of_id = - of_match_device(mxsfb_dt_ids, &pdev->dev); int ret; - if (!pdev->dev.of_node) - return -ENODEV; - drm = drm_dev_alloc(&mxsfb_driver, &pdev->dev); if (IS_ERR(drm)) return PTR_ERR(drm); - ret = mxsfb_load(drm, of_id->data); + ret = mxsfb_load(drm, device_get_match_data(&pdev->dev)); if (ret) goto err_free; diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index 1e6aaf95ff..ceef470c9f 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -100,3 +100,11 @@ config DRM_NOUVEAU_SVM help Say Y here if you want to enable experimental support for Shared Virtual Memory (SVM). + +config DRM_NOUVEAU_GSP_DEFAULT + bool "Use GSP firmware for Turing/Ampere (needs firmware installed)" + depends on DRM_NOUVEAU + default n + help + Say Y here if you want to use the GSP codepaths by default on + Turing and Ampere GPUs. diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index d093549f6e..8d37a694b7 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -38,7 +38,9 @@ #include #include #include +#include #include +#include #include #include @@ -945,7 +947,8 @@ nv50_msto_prepare(struct drm_atomic_state *state, if (ret == 0) { nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, payload->vc_start_slot, payload->time_slots, - payload->pbn, payload->time_slots * mst_state->pbn_div); + payload->pbn, + payload->time_slots * dfixed_trunc(mst_state->pbn_div)); } else { nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0); } @@ -989,7 +992,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder, if (IS_ERR(mst_state)) return PTR_ERR(mst_state); - if (!mst_state->pbn_div) { + if (!mst_state->pbn_div.full) { struct nouveau_encoder *outp = mstc->mstm->outp; mst_state->pbn_div = drm_dp_get_vc_payload_bw(&mstm->mgr, diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 2edd7bb13f..80f74ee0fc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -127,21 +127,16 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16, { struct nouveau_abi16_ntfy *ntfy, *temp; - /* When a client exits without waiting for it's queued up jobs to - * finish it might happen that we fault the channel. This is due to - * drm_file_free() calling drm_gem_release() before the postclose() - * callback. Hence, we can't tear down this scheduler entity before - * uvmm mappings are unmapped. Currently, we can't detect this case. - * - * However, this should be rare and harmless, since the channel isn't - * needed anymore. - */ - nouveau_sched_entity_fini(&chan->sched_entity); + /* Cancel all jobs from the entity's queue. */ + if (chan->sched) + drm_sched_entity_fini(&chan->sched->entity); - /* wait for all activity to stop before cleaning up */ if (chan->chan) nouveau_channel_idle(chan->chan); + if (chan->sched) + nouveau_sched_destroy(&chan->sched); + /* cleanup notifier state */ list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) { nouveau_abi16_ntfy_fini(chan, ntfy); @@ -204,6 +199,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_drm *drm = nouveau_drm(dev); struct nvif_device *device = &drm->client.device; + struct nvkm_device *nvkm_device = nvxx_device(&drm->client.device); struct nvkm_gr *gr = nvxx_gr(device); struct drm_nouveau_getparam *getparam = data; struct pci_dev *pdev = to_pci_dev(dev->dev); @@ -268,6 +264,14 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) getparam->value = nouveau_exec_push_max_from_ib_max(ib_max); break; } + case NOUVEAU_GETPARAM_VRAM_BAR_SIZE: + getparam->value = nvkm_device->func->resource_size(nvkm_device, 1); + break; + case NOUVEAU_GETPARAM_VRAM_USED: { + struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM); + getparam->value = (u64)ttm_resource_manager_usage(vram_mgr); + break; + } default: NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param); return -EINVAL; @@ -344,10 +348,16 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) if (ret) goto done; - ret = nouveau_sched_entity_init(&chan->sched_entity, &drm->sched, - drm->sched_wq); - if (ret) - goto done; + /* If we're not using the VM_BIND uAPI, we don't need a scheduler. + * + * The client lock is already acquired by nouveau_abi16_get(). + */ + if (nouveau_cli_uvmm(cli)) { + ret = nouveau_sched_create(&chan->sched, drm, drm->sched_wq, + chan->chan->dma.ib_max); + if (ret) + goto done; + } init->channel = chan->chan->chid; diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h index 9f538486c1..11c8c4a800 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.h +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h @@ -26,7 +26,7 @@ struct nouveau_abi16_chan { struct nouveau_bo *ntfy; struct nouveau_vma *ntfy_vma; struct nvkm_mm heap; - struct nouveau_sched_entity sched_entity; + struct nouveau_sched *sched; }; struct nouveau_abi16 { diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 479effcf60..79cfab53f8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -23,6 +23,7 @@ */ #include "nouveau_drv.h" +#include "nouveau_bios.h" #include "nouveau_reg.h" #include "dispnv04/hw.h" #include "nouveau_encoder.h" @@ -1677,7 +1678,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) */ if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) { if (*conn == 0xf2005014 && *conf == 0xffffffff) { - fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1); + fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B); return false; } } @@ -1763,26 +1764,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios) #ifdef __powerpc__ /* Apple iMac G4 NV17 */ if (of_machine_is_compatible("PowerMac4,5")) { - fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1); - fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2); + fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B); + fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C); return; } #endif /* Make up some sane defaults */ fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, - bios->legacy.i2c_indices.crt, 1, 1); + bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B); if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0) fabricate_dcb_output(dcb, DCB_OUTPUT_TV, bios->legacy.i2c_indices.tv, - all_heads, 0); + all_heads, DCB_OUTPUT_A); else if (bios->tmds.output0_script_ptr || bios->tmds.output1_script_ptr) fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, bios->legacy.i2c_indices.panel, - all_heads, 1); + all_heads, DCB_OUTPUT_B); } static int diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 254d6c9ef2..5d8ee17295 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -148,10 +148,17 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) * If nouveau_bo_new() allocated this buffer, the GEM object was never * initialized, so don't attempt to release it. */ - if (bo->base.dev) + if (bo->base.dev) { + /* Gem objects not being shared with other VMs get their + * dma_resv from a root GEM object. + */ + if (nvbo->no_share) + drm_gem_object_put(nvbo->r_obj); + drm_gem_object_release(&bo->base); - else + } else { dma_resv_fini(&bo->base._resv); + } kfree(nvbo); } @@ -1055,17 +1062,18 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); + struct drm_gem_object *obj = &bo->base; struct ttm_resource *old_reg = bo->resource; struct nouveau_drm_tile *new_tile = NULL; int ret = 0; - if (new_reg->mem_type == TTM_PL_TT) { ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg); if (ret) return ret; } + drm_gpuvm_bo_gem_evict(obj, evict); nouveau_bo_move_ntfy(bo, new_reg); ret = ttm_bo_wait_ctx(bo, ctx); if (ret) @@ -1130,6 +1138,7 @@ out: out_ntfy: if (ret) { nouveau_bo_move_ntfy(bo, bo->resource); + drm_gpuvm_bo_gem_evict(obj, !evict); } return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index 07f671cf89..70c551921a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h @@ -26,6 +26,11 @@ struct nouveau_bo { struct list_head entry; int pbbo_index; bool validate_mapped; + + /* Root GEM object we derive the dma_resv of in case this BO is not + * shared between VMs. + */ + struct drm_gem_object *r_obj; bool no_share; /* GPU address space is independent of CPU word size */ diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 75545da9d1..a947e1d5f3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -190,6 +190,8 @@ nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence, static void nouveau_cli_fini(struct nouveau_cli *cli) { + struct nouveau_uvmm *uvmm = nouveau_cli_uvmm_locked(cli); + /* All our channels are dead now, which means all the fences they * own are signalled, and all callback functions have been called. * @@ -199,8 +201,10 @@ nouveau_cli_fini(struct nouveau_cli *cli) WARN_ON(!list_empty(&cli->worker)); usif_client_fini(cli); - nouveau_uvmm_fini(&cli->uvmm); - nouveau_sched_entity_fini(&cli->sched_entity); + if (cli->sched) + nouveau_sched_destroy(&cli->sched); + if (uvmm) + nouveau_uvmm_fini(uvmm); nouveau_vmm_fini(&cli->svm); nouveau_vmm_fini(&cli->vmm); nvif_mmu_dtor(&cli->mmu); @@ -307,8 +311,17 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname, cli->mem = &mems[ret]; - ret = nouveau_sched_entity_init(&cli->sched_entity, &drm->sched, - drm->sched_wq); + /* Don't pass in the (shared) sched_wq in order to let + * nouveau_sched_create() create a dedicated one for VM_BIND jobs. + * + * This is required to ensure that for VM_BIND jobs free_job() work and + * run_job() work can always run concurrently and hence, free_job() work + * can never stall run_job() work. For EXEC jobs we don't have this + * requirement, since EXEC job's free_job() does not require to take any + * locks which indirectly or directly are held for allocations + * elsewhere. + */ + ret = nouveau_sched_create(&cli->sched, drm, NULL, 1); if (ret) goto done; @@ -579,13 +592,16 @@ nouveau_drm_device_init(struct drm_device *dev) nvif_parent_ctor(&nouveau_parent, &drm->parent); drm->master.base.object.parent = &drm->parent; - ret = nouveau_sched_init(drm); - if (ret) + drm->sched_wq = alloc_workqueue("nouveau_sched_wq_shared", 0, + WQ_MAX_ACTIVE); + if (!drm->sched_wq) { + ret = -ENOMEM; goto fail_alloc; + } ret = nouveau_cli_init(drm, "DRM-master", &drm->master); if (ret) - goto fail_sched; + goto fail_wq; ret = nouveau_cli_init(drm, "DRM", &drm->client); if (ret) @@ -655,8 +671,8 @@ fail_ttm: nouveau_cli_fini(&drm->client); fail_master: nouveau_cli_fini(&drm->master); -fail_sched: - nouveau_sched_fini(drm); +fail_wq: + destroy_workqueue(drm->sched_wq); fail_alloc: nvif_parent_dtor(&drm->parent); kfree(drm); @@ -710,9 +726,7 @@ nouveau_drm_device_fini(struct drm_device *dev) nouveau_cli_fini(&drm->client); nouveau_cli_fini(&drm->master); - - nouveau_sched_fini(drm); - + destroy_workqueue(drm->sched_wq); nvif_parent_dtor(&drm->parent); mutex_destroy(&drm->clients_lock); kfree(drm); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index e73a233c65..e239c6bf4a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -93,9 +93,12 @@ struct nouveau_cli { struct nvif_mmu mmu; struct nouveau_vmm vmm; struct nouveau_vmm svm; - struct nouveau_uvmm uvmm; + struct { + struct nouveau_uvmm *ptr; + bool disabled; + } uvmm; - struct nouveau_sched_entity sched_entity; + struct nouveau_sched *sched; const struct nvif_mclass *mem; @@ -121,10 +124,7 @@ struct nouveau_cli_work { static inline struct nouveau_uvmm * nouveau_cli_uvmm(struct nouveau_cli *cli) { - if (!cli || !cli->uvmm.vmm.cli) - return NULL; - - return &cli->uvmm; + return cli ? cli->uvmm.ptr : NULL; } static inline struct nouveau_uvmm * @@ -258,6 +258,9 @@ struct nouveau_drm { u64 context_base; } *runl; + /* Workqueue used for channel schedulers. */ + struct workqueue_struct *sched_wq; + /* context for accelerated drm-internal operations */ struct nouveau_channel *cechan; struct nouveau_channel *channel; @@ -298,10 +301,6 @@ struct nouveau_drm { struct mutex lock; bool component_registered; } audio; - - struct drm_gpu_scheduler sched; - struct workqueue_struct *sched_wq; - }; static inline struct nouveau_drm * diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c index 9a5ef57474..e65c0ef23b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.c +++ b/drivers/gpu/drm/nouveau/nouveau_exec.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: MIT -#include - #include "nouveau_drv.h" #include "nouveau_gem.h" #include "nouveau_mem.h" @@ -86,14 +84,12 @@ */ static int -nouveau_exec_job_submit(struct nouveau_job *job) +nouveau_exec_job_submit(struct nouveau_job *job, + struct drm_gpuvm_exec *vme) { struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job); struct nouveau_cli *cli = job->cli; struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli); - struct drm_exec *exec = &job->exec; - struct drm_gem_object *obj; - unsigned long index; int ret; /* Create a new fence, but do not emit yet. */ @@ -102,52 +98,29 @@ nouveau_exec_job_submit(struct nouveau_job *job) return ret; nouveau_uvmm_lock(uvmm); - drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT | - DRM_EXEC_IGNORE_DUPLICATES); - drm_exec_until_all_locked(exec) { - struct drm_gpuva *va; - - drm_gpuvm_for_each_va(va, &uvmm->base) { - if (unlikely(va == &uvmm->base.kernel_alloc_node)) - continue; - - ret = drm_exec_prepare_obj(exec, va->gem.obj, 1); - drm_exec_retry_on_contention(exec); - if (ret) - goto err_uvmm_unlock; - } + ret = drm_gpuvm_exec_lock(vme); + if (ret) { + nouveau_uvmm_unlock(uvmm); + return ret; } nouveau_uvmm_unlock(uvmm); - drm_exec_for_each_locked_object(exec, index, obj) { - struct nouveau_bo *nvbo = nouveau_gem_object(obj); - - ret = nouveau_bo_validate(nvbo, true, false); - if (ret) - goto err_exec_fini; + ret = drm_gpuvm_exec_validate(vme); + if (ret) { + drm_gpuvm_exec_unlock(vme); + return ret; } return 0; - -err_uvmm_unlock: - nouveau_uvmm_unlock(uvmm); -err_exec_fini: - drm_exec_fini(exec); - return ret; - } static void -nouveau_exec_job_armed_submit(struct nouveau_job *job) +nouveau_exec_job_armed_submit(struct nouveau_job *job, + struct drm_gpuvm_exec *vme) { - struct drm_exec *exec = &job->exec; - struct drm_gem_object *obj; - unsigned long index; - - drm_exec_for_each_locked_object(exec, index, obj) - dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage); - - drm_exec_fini(exec); + drm_gpuvm_exec_resv_add_fence(vme, job->done_fence, + job->resv_usage, job->resv_usage); + drm_gpuvm_exec_unlock(vme); } static struct dma_fence * @@ -192,6 +165,7 @@ nouveau_exec_job_free(struct nouveau_job *job) { struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job); + nouveau_job_done(job); nouveau_job_free(job); kfree(exec_job->fence); @@ -211,8 +185,6 @@ nouveau_exec_job_timeout(struct nouveau_job *job) NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n", chan->chid); - nouveau_sched_entity_fini(job->entity); - return DRM_GPU_SCHED_STAT_NOMINAL; } @@ -259,10 +231,12 @@ nouveau_exec_job_init(struct nouveau_exec_job **pjob, } } + args.file_priv = __args->file_priv; job->chan = __args->chan; - args.sched_entity = __args->sched_entity; - args.file_priv = __args->file_priv; + args.sched = __args->sched; + /* Plus one to account for the HW fence. */ + args.credits = job->push.count + 1; args.in_sync.count = __args->in_sync.count; args.in_sync.s = __args->in_sync.s; @@ -415,7 +389,7 @@ nouveau_exec_ioctl_exec(struct drm_device *dev, if (ret) goto out; - args.sched_entity = &chan16->sched_entity; + args.sched = chan16->sched; args.file_priv = file_priv; args.chan = chan; diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.h b/drivers/gpu/drm/nouveau/nouveau_exec.h index 5488d337bc..9b3b151fac 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.h +++ b/drivers/gpu/drm/nouveau/nouveau_exec.h @@ -3,16 +3,12 @@ #ifndef __NOUVEAU_EXEC_H__ #define __NOUVEAU_EXEC_H__ -#include - #include "nouveau_drv.h" #include "nouveau_sched.h" struct nouveau_exec_job_args { struct drm_file *file_priv; - struct nouveau_sched_entity *sched_entity; - - struct drm_exec exec; + struct nouveau_sched *sched; struct nouveau_channel *chan; struct { diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 7b69e6df57..5a887d67dc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -111,7 +111,8 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50) return 0; - if (nvbo->no_share && uvmm && &uvmm->resv != nvbo->bo.base.resv) + if (nvbo->no_share && uvmm && + drm_gpuvm_resv(&uvmm->base) != nvbo->bo.base.resv) return -EPERM; ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); @@ -245,7 +246,7 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, if (unlikely(!uvmm)) return -EINVAL; - resv = &uvmm->resv; + resv = drm_gpuvm_resv(&uvmm->base); } if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART))) @@ -288,6 +289,11 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) nvbo->valid_domains &= domain; + if (nvbo->no_share) { + nvbo->r_obj = drm_gpuvm_resv_obj(&uvmm->base); + drm_gem_object_get(nvbo->r_obj); + } + *pnvbo = nvbo; return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c index 23cd43a7fd..bf2dc7567e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_platform.c +++ b/drivers/gpu/drm/nouveau/nouveau_platform.c @@ -43,11 +43,10 @@ static int nouveau_platform_probe(struct platform_device *pdev) return 0; } -static int nouveau_platform_remove(struct platform_device *pdev) +static void nouveau_platform_remove(struct platform_device *pdev) { struct drm_device *dev = platform_get_drvdata(pdev); nouveau_drm_device_remove(dev); - return 0; } #if IS_ENABLED(CONFIG_OF) @@ -93,5 +92,5 @@ struct platform_driver nouveau_platform_driver = { .of_match_table = of_match_ptr(nouveau_platform_match), }, .probe = nouveau_platform_probe, - .remove = nouveau_platform_remove, + .remove_new = nouveau_platform_remove, }; diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c index 7c376c4ccd..32fa2e2739 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.c +++ b/drivers/gpu/drm/nouveau/nouveau_sched.c @@ -12,30 +12,28 @@ #include "nouveau_abi16.h" #include "nouveau_sched.h" -/* FIXME - * - * We want to make sure that jobs currently executing can't be deferred by - * other jobs competing for the hardware. Otherwise we might end up with job - * timeouts just because of too many clients submitting too many jobs. We don't - * want jobs to time out because of system load, but because of the job being - * too bulky. - * - * For now allow for up to 16 concurrent jobs in flight until we know how many - * rings the hardware can process in parallel. - */ -#define NOUVEAU_SCHED_HW_SUBMISSIONS 16 #define NOUVEAU_SCHED_JOB_TIMEOUT_MS 10000 +/* Starts at 0, since the DRM scheduler interprets those parameters as (initial) + * index to the run-queue array. + */ +enum nouveau_sched_priority { + NOUVEAU_SCHED_PRIORITY_SINGLE = DRM_SCHED_PRIORITY_KERNEL, + NOUVEAU_SCHED_PRIORITY_COUNT, +}; + int nouveau_job_init(struct nouveau_job *job, struct nouveau_job_args *args) { - struct nouveau_sched_entity *entity = args->sched_entity; + struct nouveau_sched *sched = args->sched; int ret; + INIT_LIST_HEAD(&job->entry); + job->file_priv = args->file_priv; job->cli = nouveau_cli(args->file_priv); - job->entity = entity; + job->sched = sched; job->sync = args->sync; job->resv_usage = args->resv_usage; @@ -86,10 +84,10 @@ nouveau_job_init(struct nouveau_job *job, ret = -ENOMEM; goto err_free_objs; } - } - ret = drm_sched_job_init(&job->base, &entity->base, NULL); + ret = drm_sched_job_init(&job->base, &sched->entity, + args->credits, NULL); if (ret) goto err_free_chains; @@ -108,6 +106,27 @@ err_free_in_sync: return ret; } +void +nouveau_job_fini(struct nouveau_job *job) +{ + dma_fence_put(job->done_fence); + drm_sched_job_cleanup(&job->base); + + job->ops->free(job); +} + +void +nouveau_job_done(struct nouveau_job *job) +{ + struct nouveau_sched *sched = job->sched; + + spin_lock(&sched->job.list.lock); + list_del(&job->entry); + spin_unlock(&sched->job.list.lock); + + wake_up(&sched->job.wq); +} + void nouveau_job_free(struct nouveau_job *job) { @@ -117,13 +136,6 @@ nouveau_job_free(struct nouveau_job *job) kfree(job->out_sync.chains); } -void nouveau_job_fini(struct nouveau_job *job) -{ - dma_fence_put(job->done_fence); - drm_sched_job_cleanup(&job->base); - job->ops->free(job); -} - static int sync_find_fence(struct nouveau_job *job, struct drm_nouveau_sync *sync, @@ -261,8 +273,13 @@ nouveau_job_fence_attach(struct nouveau_job *job) int nouveau_job_submit(struct nouveau_job *job) { - struct nouveau_sched_entity *entity = to_nouveau_sched_entity(job->base.entity); + struct nouveau_sched *sched = job->sched; struct dma_fence *done_fence = NULL; + struct drm_gpuvm_exec vm_exec = { + .vm = &nouveau_cli_uvmm(job->cli)->base, + .flags = DRM_EXEC_IGNORE_DUPLICATES, + .num_fences = 1, + }; int ret; ret = nouveau_job_add_deps(job); @@ -276,46 +293,29 @@ nouveau_job_submit(struct nouveau_job *job) /* Make sure the job appears on the sched_entity's queue in the same * order as it was submitted. */ - mutex_lock(&entity->mutex); + mutex_lock(&sched->mutex); /* Guarantee we won't fail after the submit() callback returned * successfully. */ if (job->ops->submit) { - ret = job->ops->submit(job); + ret = job->ops->submit(job, &vm_exec); if (ret) goto err_cleanup; } + /* Submit was successful; add the job to the schedulers job list. */ + spin_lock(&sched->job.list.lock); + list_add(&job->entry, &sched->job.list.head); + spin_unlock(&sched->job.list.lock); + drm_sched_job_arm(&job->base); job->done_fence = dma_fence_get(&job->base.s_fence->finished); if (job->sync) done_fence = dma_fence_get(job->done_fence); - /* If a sched job depends on a dma-fence from a job from the same GPU - * scheduler instance, but a different scheduler entity, the GPU - * scheduler does only wait for the particular job to be scheduled, - * rather than for the job to fully complete. This is due to the GPU - * scheduler assuming that there is a scheduler instance per ring. - * However, the current implementation, in order to avoid arbitrary - * amounts of kthreads, has a single scheduler instance while scheduler - * entities represent rings. - * - * As a workaround, set the DRM_SCHED_FENCE_DONT_PIPELINE for all - * out-fences in order to force the scheduler to wait for full job - * completion for dependent jobs from different entities and same - * scheduler instance. - * - * There is some work in progress [1] to address the issues of firmware - * schedulers; once it is in-tree the scheduler topology in Nouveau - * should be re-worked accordingly. - * - * [1] https://lore.kernel.org/dri-devel/20230801205103.627779-1-matthew.brost@intel.com/ - */ - set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &job->done_fence->flags); - if (job->ops->armed_submit) - job->ops->armed_submit(job); + job->ops->armed_submit(job, &vm_exec); nouveau_job_fence_attach(job); @@ -326,7 +326,7 @@ nouveau_job_submit(struct nouveau_job *job) drm_sched_entity_push_job(&job->base); - mutex_unlock(&entity->mutex); + mutex_unlock(&sched->mutex); if (done_fence) { dma_fence_wait(done_fence, true); @@ -336,20 +336,13 @@ nouveau_job_submit(struct nouveau_job *job) return 0; err_cleanup: - mutex_unlock(&entity->mutex); + mutex_unlock(&sched->mutex); nouveau_job_fence_attach_cleanup(job); err: job->state = NOUVEAU_JOB_SUBMIT_FAILED; return ret; } -bool -nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity, - struct work_struct *work) -{ - return queue_work(entity->sched_wq, work); -} - static struct dma_fence * nouveau_job_run(struct nouveau_job *job) { @@ -399,50 +392,116 @@ nouveau_sched_free_job(struct drm_sched_job *sched_job) nouveau_job_fini(job); } -int nouveau_sched_entity_init(struct nouveau_sched_entity *entity, - struct drm_gpu_scheduler *sched, - struct workqueue_struct *sched_wq) -{ - mutex_init(&entity->mutex); - spin_lock_init(&entity->job.list.lock); - INIT_LIST_HEAD(&entity->job.list.head); - init_waitqueue_head(&entity->job.wq); - - entity->sched_wq = sched_wq; - return drm_sched_entity_init(&entity->base, - DRM_SCHED_PRIORITY_NORMAL, - &sched, 1, NULL); -} - -void -nouveau_sched_entity_fini(struct nouveau_sched_entity *entity) -{ - drm_sched_entity_destroy(&entity->base); -} - static const struct drm_sched_backend_ops nouveau_sched_ops = { .run_job = nouveau_sched_run_job, .timedout_job = nouveau_sched_timedout_job, .free_job = nouveau_sched_free_job, }; -int nouveau_sched_init(struct nouveau_drm *drm) +static int +nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm, + struct workqueue_struct *wq, u32 credit_limit) { - struct drm_gpu_scheduler *sched = &drm->sched; + struct drm_gpu_scheduler *drm_sched = &sched->base; + struct drm_sched_entity *entity = &sched->entity; long job_hang_limit = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS); + int ret; - drm->sched_wq = create_singlethread_workqueue("nouveau_sched_wq"); - if (!drm->sched_wq) + if (!wq) { + wq = alloc_workqueue("nouveau_sched_wq_%d", 0, WQ_MAX_ACTIVE, + current->pid); + if (!wq) + return -ENOMEM; + + sched->wq = wq; + } + + ret = drm_sched_init(drm_sched, &nouveau_sched_ops, wq, + NOUVEAU_SCHED_PRIORITY_COUNT, + credit_limit, 0, job_hang_limit, + NULL, NULL, "nouveau_sched", drm->dev->dev); + if (ret) + goto fail_wq; + + /* Using DRM_SCHED_PRIORITY_KERNEL, since that's what we're required to use + * when we want to have a single run-queue only. + * + * It's not documented, but one will find out when trying to use any + * other priority running into faults, because the scheduler uses the + * priority as array index. + * + * Can't use NOUVEAU_SCHED_PRIORITY_SINGLE either, because it's not + * matching the enum type used in drm_sched_entity_init(). + */ + ret = drm_sched_entity_init(entity, DRM_SCHED_PRIORITY_KERNEL, + &drm_sched, 1, NULL); + if (ret) + goto fail_sched; + + mutex_init(&sched->mutex); + spin_lock_init(&sched->job.list.lock); + INIT_LIST_HEAD(&sched->job.list.head); + init_waitqueue_head(&sched->job.wq); + + return 0; + +fail_sched: + drm_sched_fini(drm_sched); +fail_wq: + if (sched->wq) + destroy_workqueue(sched->wq); + return ret; +} + +int +nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm, + struct workqueue_struct *wq, u32 credit_limit) +{ + struct nouveau_sched *sched; + int ret; + + sched = kzalloc(sizeof(*sched), GFP_KERNEL); + if (!sched) return -ENOMEM; - return drm_sched_init(sched, &nouveau_sched_ops, - DRM_SCHED_PRIORITY_COUNT, - NOUVEAU_SCHED_HW_SUBMISSIONS, 0, job_hang_limit, - NULL, NULL, "nouveau_sched", drm->dev->dev); + ret = nouveau_sched_init(sched, drm, wq, credit_limit); + if (ret) { + kfree(sched); + return ret; + } + + *psched = sched; + + return 0; +} + + +static void +nouveau_sched_fini(struct nouveau_sched *sched) +{ + struct drm_gpu_scheduler *drm_sched = &sched->base; + struct drm_sched_entity *entity = &sched->entity; + + rmb(); /* for list_empty to work without lock */ + wait_event(sched->job.wq, list_empty(&sched->job.list.head)); + + drm_sched_entity_fini(entity); + drm_sched_fini(drm_sched); + + /* Destroy workqueue after scheduler tear down, otherwise it might still + * be in use. + */ + if (sched->wq) + destroy_workqueue(sched->wq); } -void nouveau_sched_fini(struct nouveau_drm *drm) +void +nouveau_sched_destroy(struct nouveau_sched **psched) { - destroy_workqueue(drm->sched_wq); - drm_sched_fini(&drm->sched); + struct nouveau_sched *sched = *psched; + + nouveau_sched_fini(sched); + kfree(sched); + + *psched = NULL; } diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h index 27ac197925..e1f01a23e6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.h +++ b/drivers/gpu/drm/nouveau/nouveau_sched.h @@ -5,7 +5,7 @@ #include -#include +#include #include #include "nouveau_drv.h" @@ -26,7 +26,8 @@ enum nouveau_job_state { struct nouveau_job_args { struct drm_file *file_priv; - struct nouveau_sched_entity *sched_entity; + struct nouveau_sched *sched; + u32 credits; enum dma_resv_usage resv_usage; bool sync; @@ -49,12 +50,12 @@ struct nouveau_job { enum nouveau_job_state state; - struct nouveau_sched_entity *entity; + struct nouveau_sched *sched; + struct list_head entry; struct drm_file *file_priv; struct nouveau_cli *cli; - struct drm_exec exec; enum dma_resv_usage resv_usage; struct dma_fence *done_fence; @@ -76,8 +77,8 @@ struct nouveau_job { /* If .submit() returns without any error, it is guaranteed that * armed_submit() is called. */ - int (*submit)(struct nouveau_job *); - void (*armed_submit)(struct nouveau_job *); + int (*submit)(struct nouveau_job *, struct drm_gpuvm_exec *); + void (*armed_submit)(struct nouveau_job *, struct drm_gpuvm_exec *); struct dma_fence *(*run)(struct nouveau_job *); void (*free)(struct nouveau_job *); enum drm_gpu_sched_stat (*timeout)(struct nouveau_job *); @@ -90,20 +91,17 @@ int nouveau_job_ucopy_syncs(struct nouveau_job_args *args, int nouveau_job_init(struct nouveau_job *job, struct nouveau_job_args *args); -void nouveau_job_free(struct nouveau_job *job); - -int nouveau_job_submit(struct nouveau_job *job); void nouveau_job_fini(struct nouveau_job *job); +int nouveau_job_submit(struct nouveau_job *job); +void nouveau_job_done(struct nouveau_job *job); +void nouveau_job_free(struct nouveau_job *job); -#define to_nouveau_sched_entity(entity) \ - container_of((entity), struct nouveau_sched_entity, base) - -struct nouveau_sched_entity { - struct drm_sched_entity base; +struct nouveau_sched { + struct drm_gpu_scheduler base; + struct drm_sched_entity entity; + struct workqueue_struct *wq; struct mutex mutex; - struct workqueue_struct *sched_wq; - struct { struct { struct list_head head; @@ -113,15 +111,8 @@ struct nouveau_sched_entity { } job; }; -int nouveau_sched_entity_init(struct nouveau_sched_entity *entity, - struct drm_gpu_scheduler *sched, - struct workqueue_struct *sched_wq); -void nouveau_sched_entity_fini(struct nouveau_sched_entity *entity); - -bool nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity, - struct work_struct *work); - -int nouveau_sched_init(struct nouveau_drm *drm); -void nouveau_sched_fini(struct nouveau_drm *drm); +int nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm, + struct workqueue_struct *wq, u32 credit_limit); +void nouveau_sched_destroy(struct nouveau_sched **psched); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index 5cf892c50f..ee02cd833c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -62,6 +62,8 @@ struct bind_job_op { enum vm_bind_op op; u32 flags; + struct drm_gpuvm_bo *vm_bo; + struct { u64 addr; u64 range; @@ -436,8 +438,9 @@ nouveau_uvma_region_complete(struct nouveau_uvma_region *reg) static void op_map_prepare_unwind(struct nouveau_uvma *uvma) { + struct drm_gpuva *va = &uvma->va; nouveau_uvma_gem_put(uvma); - drm_gpuva_remove(&uvma->va); + drm_gpuva_remove(va); nouveau_uvma_free(uvma); } @@ -466,6 +469,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm, break; case DRM_GPUVA_OP_REMAP: { struct drm_gpuva_op_remap *r = &op->remap; + struct drm_gpuva *va = r->unmap->va; if (r->next) op_map_prepare_unwind(new->next); @@ -473,7 +477,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm, if (r->prev) op_map_prepare_unwind(new->prev); - op_unmap_prepare_unwind(r->unmap->va); + op_unmap_prepare_unwind(va); break; } case DRM_GPUVA_OP_UNMAP: @@ -604,6 +608,9 @@ op_unmap_prepare(struct drm_gpuva_op_unmap *u) drm_gpuva_unmap(u); } +/* + * Note: @args should not be NULL when calling for a map operation. + */ static int nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm, struct nouveau_uvma_prealloc *new, @@ -624,7 +631,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm, if (ret) goto unwind; - if (args && vmm_get_range) { + if (vmm_get_range) { ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start, vmm_get_range); if (ret) { @@ -632,6 +639,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm, goto unwind; } } + break; } case DRM_GPUVA_OP_REMAP: { @@ -804,15 +812,15 @@ op_remap(struct drm_gpuva_op_remap *r, struct drm_gpuva_op_unmap *u = r->unmap; struct nouveau_uvma *uvma = uvma_from_va(u->va); u64 addr = uvma->va.va.addr; - u64 range = uvma->va.va.range; + u64 end = uvma->va.va.addr + uvma->va.va.range; if (r->prev) addr = r->prev->va.addr + r->prev->va.range; if (r->next) - range = r->next->va.addr - addr; + end = r->next->va.addr; - op_unmap_range(u, addr, range); + op_unmap_range(u, addr, end - addr); } static int @@ -929,25 +937,13 @@ nouveau_uvmm_sm_unmap_cleanup(struct nouveau_uvmm *uvmm, static int nouveau_uvmm_validate_range(struct nouveau_uvmm *uvmm, u64 addr, u64 range) { - u64 end = addr + range; - u64 kernel_managed_end = uvmm->kernel_managed_addr + - uvmm->kernel_managed_size; - if (addr & ~PAGE_MASK) return -EINVAL; if (range & ~PAGE_MASK) return -EINVAL; - if (end <= addr) - return -EINVAL; - - if (addr < NOUVEAU_VA_SPACE_START || - end > NOUVEAU_VA_SPACE_END) - return -EINVAL; - - if (addr < kernel_managed_end && - end > uvmm->kernel_managed_addr) + if (!drm_gpuvm_range_valid(&uvmm->base, addr, range)) return -EINVAL; return 0; @@ -970,6 +966,12 @@ nouveau_uvmm_bind_job_free(struct kref *kref) { struct nouveau_uvmm_bind_job *job = container_of(kref, struct nouveau_uvmm_bind_job, kref); + struct bind_job_op *op, *next; + + list_for_each_op_safe(op, next, &job->ops) { + list_del(&op->entry); + kfree(op); + } nouveau_job_free(&job->base); kfree(job); @@ -1011,14 +1013,16 @@ bind_validate_op(struct nouveau_job *job, static void bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range) { - struct nouveau_uvmm_bind_job *bind_job; - struct nouveau_sched_entity *entity = job->entity; + struct nouveau_sched *sched = job->sched; + struct nouveau_job *__job; struct bind_job_op *op; u64 end = addr + range; again: - spin_lock(&entity->job.list.lock); - list_for_each_entry(bind_job, &entity->job.list.head, entry) { + spin_lock(&sched->job.list.lock); + list_for_each_entry(__job, &sched->job.list.head, entry) { + struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(__job); + list_for_each_op(op, &bind_job->ops) { if (op->op == OP_UNMAP) { u64 op_addr = op->va.addr; @@ -1026,7 +1030,7 @@ again: if (!(end <= op_addr || addr >= op_end)) { nouveau_uvmm_bind_job_get(bind_job); - spin_unlock(&entity->job.list.lock); + spin_unlock(&sched->job.list.lock); wait_for_completion(&bind_job->complete); nouveau_uvmm_bind_job_put(bind_job); goto again; @@ -1034,7 +1038,7 @@ again: } } } - spin_unlock(&entity->job.list.lock); + spin_unlock(&sched->job.list.lock); } static int @@ -1113,22 +1117,28 @@ bind_validate_region(struct nouveau_job *job) } static void -bind_link_gpuvas(struct drm_gpuva_ops *ops, struct nouveau_uvma_prealloc *new) +bind_link_gpuvas(struct bind_job_op *bop) { + struct nouveau_uvma_prealloc *new = &bop->new; + struct drm_gpuvm_bo *vm_bo = bop->vm_bo; + struct drm_gpuva_ops *ops = bop->ops; struct drm_gpuva_op *op; drm_gpuva_for_each_op(op, ops) { switch (op->op) { case DRM_GPUVA_OP_MAP: - drm_gpuva_link(&new->map->va); + drm_gpuva_link(&new->map->va, vm_bo); break; - case DRM_GPUVA_OP_REMAP: + case DRM_GPUVA_OP_REMAP: { + struct drm_gpuva *va = op->remap.unmap->va; + if (op->remap.prev) - drm_gpuva_link(&new->prev->va); + drm_gpuva_link(&new->prev->va, va->vm_bo); if (op->remap.next) - drm_gpuva_link(&new->next->va); - drm_gpuva_unlink(op->remap.unmap->va); + drm_gpuva_link(&new->next->va, va->vm_bo); + drm_gpuva_unlink(va); break; + } case DRM_GPUVA_OP_UNMAP: drm_gpuva_unlink(op->unmap.va); break; @@ -1139,21 +1149,70 @@ bind_link_gpuvas(struct drm_gpuva_ops *ops, struct nouveau_uvma_prealloc *new) } static int -nouveau_uvmm_bind_job_submit(struct nouveau_job *job) +bind_lock_validate(struct nouveau_job *job, struct drm_exec *exec, + unsigned int num_fences) +{ + struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job); + struct bind_job_op *op; + int ret; + + list_for_each_op(op, &bind_job->ops) { + struct drm_gpuva_op *va_op; + + if (!op->ops) + continue; + + drm_gpuva_for_each_op(va_op, op->ops) { + struct drm_gem_object *obj = op_gem_obj(va_op); + + if (unlikely(!obj)) + continue; + + ret = drm_exec_prepare_obj(exec, obj, num_fences); + if (ret) + return ret; + + /* Don't validate GEMs backing mappings we're about to + * unmap, it's not worth the effort. + */ + if (va_op->op == DRM_GPUVA_OP_UNMAP) + continue; + + ret = nouveau_bo_validate(nouveau_gem_object(obj), + true, false); + if (ret) + return ret; + } + } + + return 0; +} + +static int +nouveau_uvmm_bind_job_submit(struct nouveau_job *job, + struct drm_gpuvm_exec *vme) { struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli); struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job); - struct nouveau_sched_entity *entity = job->entity; - struct drm_exec *exec = &job->exec; + struct drm_exec *exec = &vme->exec; struct bind_job_op *op; int ret; list_for_each_op(op, &bind_job->ops) { if (op->op == OP_MAP) { - op->gem.obj = drm_gem_object_lookup(job->file_priv, - op->gem.handle); - if (!op->gem.obj) + struct drm_gem_object *obj = op->gem.obj = + drm_gem_object_lookup(job->file_priv, + op->gem.handle); + if (!obj) return -ENOENT; + + dma_resv_lock(obj->resv, NULL); + op->vm_bo = drm_gpuvm_bo_obtain(&uvmm->base, obj); + dma_resv_unlock(obj->resv); + if (IS_ERR(op->vm_bo)) + return PTR_ERR(op->vm_bo); + + drm_gpuvm_bo_extobj_add(op->vm_bo); } ret = bind_validate_op(job, op); @@ -1176,6 +1235,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) * unwind all GPU VA space changes on failure. */ nouveau_uvmm_lock(uvmm); + list_for_each_op(op, &bind_job->ops) { switch (op->op) { case OP_MAP_SPARSE: @@ -1287,55 +1347,13 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) } } - drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT | - DRM_EXEC_IGNORE_DUPLICATES); + drm_exec_init(exec, vme->flags, 0); drm_exec_until_all_locked(exec) { - list_for_each_op(op, &bind_job->ops) { - struct drm_gpuva_op *va_op; - - if (IS_ERR_OR_NULL(op->ops)) - continue; - - drm_gpuva_for_each_op(va_op, op->ops) { - struct drm_gem_object *obj = op_gem_obj(va_op); - - if (unlikely(!obj)) - continue; - - ret = drm_exec_prepare_obj(exec, obj, 1); - drm_exec_retry_on_contention(exec); - if (ret) { - op = list_last_op(&bind_job->ops); - goto unwind; - } - } - } - } - - list_for_each_op(op, &bind_job->ops) { - struct drm_gpuva_op *va_op; - - if (IS_ERR_OR_NULL(op->ops)) - continue; - - drm_gpuva_for_each_op(va_op, op->ops) { - struct drm_gem_object *obj = op_gem_obj(va_op); - - if (unlikely(!obj)) - continue; - - /* Don't validate GEMs backing mappings we're about to - * unmap, it's not worth the effort. - */ - if (unlikely(va_op->op == DRM_GPUVA_OP_UNMAP)) - continue; - - ret = nouveau_bo_validate(nouveau_gem_object(obj), - true, false); - if (ret) { - op = list_last_op(&bind_job->ops); - goto unwind; - } + ret = bind_lock_validate(job, exec, vme->num_fences); + drm_exec_retry_on_contention(exec); + if (ret) { + op = list_last_op(&bind_job->ops); + goto unwind; } } @@ -1364,7 +1382,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) case OP_UNMAP_SPARSE: case OP_MAP: case OP_UNMAP: - bind_link_gpuvas(op->ops, &op->new); + bind_link_gpuvas(op); break; default: break; @@ -1372,10 +1390,6 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) } nouveau_uvmm_unlock(uvmm); - spin_lock(&entity->job.list.lock); - list_add(&bind_job->entry, &entity->job.list.head); - spin_unlock(&entity->job.list.lock); - return 0; unwind_continue: @@ -1410,21 +1424,17 @@ unwind: } nouveau_uvmm_unlock(uvmm); - drm_exec_fini(exec); + drm_gpuvm_exec_unlock(vme); return ret; } static void -nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job) +nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job, + struct drm_gpuvm_exec *vme) { - struct drm_exec *exec = &job->exec; - struct drm_gem_object *obj; - unsigned long index; - - drm_exec_for_each_locked_object(exec, index, obj) - dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage); - - drm_exec_fini(exec); + drm_gpuvm_exec_resv_add_fence(vme, job->done_fence, + job->resv_usage, job->resv_usage); + drm_gpuvm_exec_unlock(vme); } static struct dma_fence * @@ -1462,14 +1472,11 @@ out: } static void -nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work) +nouveau_uvmm_bind_job_cleanup(struct nouveau_job *job) { - struct nouveau_uvmm_bind_job *bind_job = - container_of(work, struct nouveau_uvmm_bind_job, work); - struct nouveau_job *job = &bind_job->base; + struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job); struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli); - struct nouveau_sched_entity *entity = job->entity; - struct bind_job_op *op, *next; + struct bind_job_op *op; list_for_each_op(op, &bind_job->ops) { struct drm_gem_object *obj = op->gem.obj; @@ -1511,42 +1518,27 @@ nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work) if (!IS_ERR_OR_NULL(op->ops)) drm_gpuva_ops_free(&uvmm->base, op->ops); + if (!IS_ERR_OR_NULL(op->vm_bo)) { + dma_resv_lock(obj->resv, NULL); + drm_gpuvm_bo_put(op->vm_bo); + dma_resv_unlock(obj->resv); + } + if (obj) drm_gem_object_put(obj); } - spin_lock(&entity->job.list.lock); - list_del(&bind_job->entry); - spin_unlock(&entity->job.list.lock); - + nouveau_job_done(job); complete_all(&bind_job->complete); - wake_up(&entity->job.wq); - - /* Remove and free ops after removing the bind job from the job list to - * avoid races against bind_validate_map_sparse(). - */ - list_for_each_op_safe(op, next, &bind_job->ops) { - list_del(&op->entry); - kfree(op); - } nouveau_uvmm_bind_job_put(bind_job); } -static void -nouveau_uvmm_bind_job_free_qwork(struct nouveau_job *job) -{ - struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job); - struct nouveau_sched_entity *entity = job->entity; - - nouveau_sched_entity_qwork(entity, &bind_job->work); -} - static struct nouveau_job_ops nouveau_bind_job_ops = { .submit = nouveau_uvmm_bind_job_submit, .armed_submit = nouveau_uvmm_bind_job_armed_submit, .run = nouveau_uvmm_bind_job_run, - .free = nouveau_uvmm_bind_job_free_qwork, + .free = nouveau_uvmm_bind_job_cleanup, }; static int @@ -1607,7 +1599,6 @@ nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob, return ret; INIT_LIST_HEAD(&job->ops); - INIT_LIST_HEAD(&job->entry); for (i = 0; i < __args->op.count; i++) { ret = bind_job_op_from_uop(&op, &__args->op.s[i]); @@ -1618,11 +1609,12 @@ nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob, } init_completion(&job->complete); - INIT_WORK(&job->work, nouveau_uvmm_bind_job_free_work_fn); - args.sched_entity = __args->sched_entity; args.file_priv = __args->file_priv; + args.sched = __args->sched; + args.credits = 1; + args.in_sync.count = __args->in_sync.count; args.in_sync.s = __args->in_sync.s; @@ -1648,18 +1640,6 @@ err_free: return ret; } -int -nouveau_uvmm_ioctl_vm_init(struct drm_device *dev, - void *data, - struct drm_file *file_priv) -{ - struct nouveau_cli *cli = nouveau_cli(file_priv); - struct drm_nouveau_vm_init *init = data; - - return nouveau_uvmm_init(&cli->uvmm, cli, init->kernel_managed_addr, - init->kernel_managed_size); -} - static int nouveau_uvmm_vm_bind(struct nouveau_uvmm_bind_job_args *args) { @@ -1760,7 +1740,7 @@ nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev, if (ret) return ret; - args.sched_entity = &cli->sched_entity; + args.sched = cli->sched; args.file_priv = file_priv; ret = nouveau_uvmm_vm_bind(&args); @@ -1776,15 +1756,18 @@ void nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbo, struct nouveau_mem *mem) { struct drm_gem_object *obj = &nvbo->bo.base; + struct drm_gpuvm_bo *vm_bo; struct drm_gpuva *va; dma_resv_assert_held(obj->resv); - drm_gem_for_each_gpuva(va, obj) { - struct nouveau_uvma *uvma = uvma_from_va(va); + drm_gem_for_each_gpuvm_bo(vm_bo, obj) { + drm_gpuvm_bo_for_each_va(va, vm_bo) { + struct nouveau_uvma *uvma = uvma_from_va(va); - nouveau_uvma_map(uvma, mem); - drm_gpuva_invalidate(va, false); + nouveau_uvma_map(uvma, mem); + drm_gpuva_invalidate(va, false); + } } } @@ -1792,29 +1775,62 @@ void nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo) { struct drm_gem_object *obj = &nvbo->bo.base; + struct drm_gpuvm_bo *vm_bo; struct drm_gpuva *va; dma_resv_assert_held(obj->resv); - drm_gem_for_each_gpuva(va, obj) { - struct nouveau_uvma *uvma = uvma_from_va(va); + drm_gem_for_each_gpuvm_bo(vm_bo, obj) { + drm_gpuvm_bo_for_each_va(va, vm_bo) { + struct nouveau_uvma *uvma = uvma_from_va(va); - nouveau_uvma_unmap(uvma); - drm_gpuva_invalidate(va, true); + nouveau_uvma_unmap(uvma); + drm_gpuva_invalidate(va, true); + } } } +static void +nouveau_uvmm_free(struct drm_gpuvm *gpuvm) +{ + struct nouveau_uvmm *uvmm = uvmm_from_gpuvm(gpuvm); + + kfree(uvmm); +} + +static int +nouveau_uvmm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec) +{ + struct nouveau_bo *nvbo = nouveau_gem_object(vm_bo->obj); + + return nouveau_bo_validate(nvbo, true, false); +} + +static const struct drm_gpuvm_ops gpuvm_ops = { + .vm_free = nouveau_uvmm_free, + .vm_bo_validate = nouveau_uvmm_bo_validate, +}; + int -nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli, - u64 kernel_managed_addr, u64 kernel_managed_size) +nouveau_uvmm_ioctl_vm_init(struct drm_device *dev, + void *data, + struct drm_file *file_priv) { + struct nouveau_uvmm *uvmm; + struct nouveau_cli *cli = nouveau_cli(file_priv); + struct drm_device *drm = cli->drm->dev; + struct drm_gem_object *r_obj; + struct drm_nouveau_vm_init *init = data; + u64 kernel_managed_end; int ret; - u64 kernel_managed_end = kernel_managed_addr + kernel_managed_size; - mutex_init(&uvmm->mutex); - dma_resv_init(&uvmm->resv); - mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN); - mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex); + if (check_add_overflow(init->kernel_managed_addr, + init->kernel_managed_size, + &kernel_managed_end)) + return -EINVAL; + + if (kernel_managed_end > NOUVEAU_VA_SPACE_END) + return -EINVAL; mutex_lock(&cli->mutex); @@ -1823,39 +1839,48 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli, goto out_unlock; } - if (kernel_managed_end <= kernel_managed_addr) { - ret = -EINVAL; + uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL); + if (!uvmm) { + ret = -ENOMEM; goto out_unlock; } - if (kernel_managed_end > NOUVEAU_VA_SPACE_END) { - ret = -EINVAL; + r_obj = drm_gpuvm_resv_object_alloc(drm); + if (!r_obj) { + kfree(uvmm); + ret = -ENOMEM; goto out_unlock; } - uvmm->kernel_managed_addr = kernel_managed_addr; - uvmm->kernel_managed_size = kernel_managed_size; + mutex_init(&uvmm->mutex); + mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN); + mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex); - drm_gpuvm_init(&uvmm->base, cli->name, + drm_gpuvm_init(&uvmm->base, cli->name, 0, drm, r_obj, NOUVEAU_VA_SPACE_START, NOUVEAU_VA_SPACE_END, - kernel_managed_addr, kernel_managed_size, - NULL); + init->kernel_managed_addr, + init->kernel_managed_size, + &gpuvm_ops); + /* GPUVM takes care from here on. */ + drm_gem_object_put(r_obj); ret = nvif_vmm_ctor(&cli->mmu, "uvmm", cli->vmm.vmm.object.oclass, RAW, - kernel_managed_addr, kernel_managed_size, - NULL, 0, &cli->uvmm.vmm.vmm); + init->kernel_managed_addr, + init->kernel_managed_size, + NULL, 0, &uvmm->vmm.vmm); if (ret) - goto out_free_gpuva_mgr; + goto out_gpuvm_fini; - cli->uvmm.vmm.cli = cli; + uvmm->vmm.cli = cli; + cli->uvmm.ptr = uvmm; mutex_unlock(&cli->mutex); return 0; -out_free_gpuva_mgr: - drm_gpuvm_destroy(&uvmm->base); +out_gpuvm_fini: + drm_gpuvm_put(&uvmm->base); out_unlock: mutex_unlock(&cli->mutex); return ret; @@ -1867,15 +1892,8 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm) MA_STATE(mas, &uvmm->region_mt, 0, 0); struct nouveau_uvma_region *reg; struct nouveau_cli *cli = uvmm->vmm.cli; - struct nouveau_sched_entity *entity = &cli->sched_entity; struct drm_gpuva *va, *next; - if (!cli) - return; - - rmb(); /* for list_empty to work without lock */ - wait_event(entity->job.wq, list_empty(&entity->job.list.head)); - nouveau_uvmm_lock(uvmm); drm_gpuvm_for_each_va_safe(va, next, &uvmm->base) { struct nouveau_uvma *uvma = uvma_from_va(va); @@ -1910,8 +1928,6 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm) mutex_lock(&cli->mutex); nouveau_vmm_fini(&uvmm->vmm); - drm_gpuvm_destroy(&uvmm->base); + drm_gpuvm_put(&uvmm->base); mutex_unlock(&cli->mutex); - - dma_resv_fini(&uvmm->resv); } diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.h b/drivers/gpu/drm/nouveau/nouveau_uvmm.h index a308c59760..9d3c348581 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.h +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.h @@ -12,12 +12,6 @@ struct nouveau_uvmm { struct nouveau_vmm vmm; struct maple_tree region_mt; struct mutex mutex; - struct dma_resv resv; - - u64 kernel_managed_addr; - u64 kernel_managed_size; - - bool disabled; }; struct nouveau_uvma_region { @@ -50,8 +44,6 @@ struct nouveau_uvmm_bind_job { struct nouveau_job base; struct kref kref; - struct list_head entry; - struct work_struct work; struct completion complete; /* struct bind_job_op */ @@ -60,7 +52,7 @@ struct nouveau_uvmm_bind_job { struct nouveau_uvmm_bind_job_args { struct drm_file *file_priv; - struct nouveau_sched_entity *sched_entity; + struct nouveau_sched *sched; unsigned int flags; @@ -82,8 +74,6 @@ struct nouveau_uvmm_bind_job_args { #define to_uvmm_bind_job(job) container_of((job), struct nouveau_uvmm_bind_job, base) -int nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli, - u64 kernel_managed_addr, u64 kernel_managed_size); void nouveau_uvmm_fini(struct nouveau_uvmm *uvmm); void nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbov, struct nouveau_mem *mem); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c index 87a62d4ff4..7d4716dcd5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c @@ -24,7 +24,6 @@ #include "chan.h" #include "chid.h" #include "cgrp.h" -#include "chid.h" #include "runl.h" #include "priv.h" diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c index 4bf486b571..cb05f7f48a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c @@ -66,11 +66,16 @@ of_init(struct nvkm_bios *bios, const char *name) return ERR_PTR(-EINVAL); } +static void of_fini(void *p) +{ + kfree(p); +} + const struct nvbios_source nvbios_of = { .name = "OpenFirmware", .init = of_init, - .fini = (void(*)(void *))kfree, + .fini = of_fini, .read = of_read, .size = of_size, .rw = false, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c index 7bcbc4895e..271bfa038f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c @@ -25,6 +25,7 @@ #include #include +#include void gm107_devinit_disable(struct nvkm_devinit *init) @@ -33,10 +34,13 @@ gm107_devinit_disable(struct nvkm_devinit *init) u32 r021c00 = nvkm_rd32(device, 0x021c00); u32 r021c04 = nvkm_rd32(device, 0x021c04); - if (r021c00 & 0x00000001) - nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0); - if (r021c00 & 0x00000004) - nvkm_subdev_disable(device, NVKM_ENGINE_CE, 2); + /* gsp only wants to enable/disable display */ + if (!nvkm_gsp_rm(device->gsp)) { + if (r021c00 & 0x00000001) + nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0); + if (r021c00 & 0x00000004) + nvkm_subdev_disable(device, NVKM_ENGINE_CE, 2); + } if (r021c04 & 0x00000001) nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c index 11b4c9c274..666eb93b17 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c @@ -41,6 +41,7 @@ r535_devinit_new(const struct nvkm_devinit_func *hw, rm->dtor = r535_devinit_dtor; rm->post = hw->post; + rm->disable = hw->disable; ret = nv50_devinit_new_(rm, device, type, inst, pdevinit); if (ret) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index d66fc35706..a73a5b5897 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -2312,8 +2312,12 @@ r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif) { struct nvkm_subdev *subdev = &gsp->subdev; int ret; + bool enable_gsp = fwif->enable; - if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", fwif->enable)) +#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT) + enable_gsp = true; +#endif + if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp)) return -EINVAL; if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) || diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c index a7f3fc342d..dd5b5a17ec 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c @@ -222,8 +222,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory) void __iomem *map = NULL; /* Already mapped? */ - if (refcount_inc_not_zero(&iobj->maps)) + if (refcount_inc_not_zero(&iobj->maps)) { + /* read barrier match the wmb on refcount set */ + smp_rmb(); return iobj->map; + } /* Take the lock, and re-check that another thread hasn't * already mapped the object in the meantime. @@ -250,6 +253,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory) iobj->base.memory.ptrs = &nv50_instobj_fast; else iobj->base.memory.ptrs = &nv50_instobj_slow; + /* barrier to ensure the ptrs are written before refcount is set */ + smp_wmb(); refcount_set(&iobj->maps, 1); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c index e7e8fdf3ad..29682722b0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp10b.c @@ -28,19 +28,14 @@ static void gp10b_ltc_init(struct nvkm_ltc *ltc) { struct nvkm_device *device = ltc->subdev.device; - struct iommu_fwspec *spec; + u32 sid; nvkm_wr32(device, 0x17e27c, ltc->ltc_nr); nvkm_wr32(device, 0x17e000, ltc->ltc_nr); nvkm_wr32(device, 0x100800, ltc->ltc_nr); - spec = dev_iommu_fwspec_get(device->dev); - if (spec) { - u32 sid = spec->ids[0] & 0xffff; - - /* stream ID */ + if (tegra_dev_iommu_get_stream_id(device->dev, &sid)) nvkm_wr32(device, 0x160000, sid << 2); - } } static const struct nvkm_ltc_func diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index c26aab4939..993691b3cc 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -22,11 +22,11 @@ #include #include #include +#include #include #include #include #include -#include #include #include #include @@ -4765,7 +4765,7 @@ static int dispc_bind(struct device *dev, struct device *master, void *data) if (soc) dispc->feat = soc->data; else - dispc->feat = of_match_device(dispc_of_match, &pdev->dev)->data; + dispc->feat = device_get_match_data(&pdev->dev); r = dispc_errata_i734_wa_init(dispc); if (r) diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 02955f9768..988888e164 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -22,12 +22,13 @@ #include #include #include +#include #include #include #include #include #include -#include +#include #include #include #include @@ -1445,7 +1446,7 @@ static int dss_probe(struct platform_device *pdev) if (soc) dss->feat = soc->data; else - dss->feat = of_match_device(dss_of_match, &pdev->dev)->data; + dss->feat = device_get_match_data(&pdev->dev); /* Map I/O registers, get and setup clocks. */ dss->base = devm_platform_ioremap_resource(pdev, 0); diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index c48fa531ca..3421e83892 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -48,7 +48,7 @@ struct omap_gem_object { * OMAP_BO_MEM_DMA_API flag set) * * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set) - * if they are physically contiguous (when sgt->orig_nents == 1) + * if they are physically contiguous * * - buffers mapped through the TILER when pin_cnt is not zero, in which * case the DMA address points to the TILER aperture @@ -148,12 +148,18 @@ u64 omap_gem_mmap_offset(struct drm_gem_object *obj) return drm_vma_node_offset_addr(&obj->vma_node); } +static bool omap_gem_sgt_is_contiguous(struct sg_table *sgt, size_t size) +{ + return !(drm_prime_get_contiguous_size(sgt) < size); +} + static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj) { if (omap_obj->flags & OMAP_BO_MEM_DMA_API) return true; - if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1) + if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && + omap_gem_sgt_is_contiguous(omap_obj->sgt, omap_obj->base.size)) return true; return false; @@ -1385,7 +1391,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, union omap_gem_size gsize; /* Without a DMM only physically contiguous buffers can be supported. */ - if (sgt->orig_nents != 1 && !priv->has_dmm) + if (!omap_gem_sgt_is_contiguous(sgt, size) && !priv->has_dmm) return ERR_PTR(-EINVAL); gsize.bytes = PAGE_ALIGN(size); @@ -1399,7 +1405,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, omap_obj->sgt = sgt; - if (sgt->orig_nents == 1) { + if (omap_gem_sgt_is_contiguous(sgt, size)) { omap_obj->dma_addr = sg_dma_address(sgt->sgl); } else { /* Create pages list from sgt */ diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index a4ac4b4777..8f37837422 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -194,6 +194,15 @@ config DRM_PANEL_ILITEK_ILI9341 QVGA (240x320) RGB panels. support serial & parallel rgb interface. +config DRM_PANEL_ILITEK_ILI9805 + tristate "Ilitek ILI9805-based panels" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y if you want to enable support for panels based on the + Ilitek ILI9805 controller. + config DRM_PANEL_ILITEK_ILI9881C tristate "Ilitek ILI9881C-based panels" depends on OF @@ -737,6 +746,15 @@ config DRM_PANEL_SITRONIX_ST7789V Say Y here if you want to enable support for the Sitronix ST7789V controller for 240x320 LCD panels +config DRM_PANEL_SYNAPTICS_R63353 + tristate "Synaptics R63353-based panels" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y if you want to enable support for panels based on the + Synaptics R63353 controller. + config DRM_PANEL_SONY_ACX565AKM tristate "Sony ACX565AKM panel" depends on GPIOLIB && OF && SPI diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index d10c3de51c..d94a644d0a 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d obj-$(CONFIG_DRM_PANEL_HIMAX_HX8394) += panel-himax-hx8394.o obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9341) += panel-ilitek-ili9341.o +obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9805) += panel-ilitek-ili9805.o obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9882T) += panel-ilitek-ili9882t.o obj-$(CONFIG_DRM_PANEL_INNOLUX_EJ030NA) += panel-innolux-ej030na.o @@ -74,6 +75,7 @@ obj-$(CONFIG_DRM_PANEL_SHARP_LS060T1SX01) += panel-sharp-ls060t1sx01.o obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7701) += panel-sitronix-st7701.o obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7703) += panel-sitronix-st7703.o obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o +obj-$(CONFIG_DRM_PANEL_SYNAPTICS_R63353) += panel-synaptics-r63353.o obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o obj-$(CONFIG_DRM_PANEL_SONY_TD4353_JDI) += panel-sony-td4353-jdi.o obj-$(CONFIG_DRM_PANEL_SONY_TULIP_TRULY_NT35521) += panel-sony-tulip-truly-nt35521.o diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c index bc08814954..0ffe8f8c01 100644 --- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c +++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c @@ -1768,11 +1768,11 @@ static const struct panel_desc starry_qfh032011_53g_desc = { }; static const struct drm_display_mode starry_himax83102_j02_default_mode = { - .clock = 162850, + .clock = 162680, .hdisplay = 1200, - .hsync_start = 1200 + 50, - .hsync_end = 1200 + 50 + 20, - .htotal = 1200 + 50 + 20 + 50, + .hsync_start = 1200 + 60, + .hsync_end = 1200 + 60 + 20, + .htotal = 1200 + 60 + 20 + 40, .vdisplay = 1920, .vsync_start = 1920 + 116, .vsync_end = 1920 + 116 + 8, diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 70feee7876..e5e3f0b9ca 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -588,6 +588,7 @@ static int panel_edp_get_modes(struct drm_panel *panel, { struct panel_edp *p = to_panel_edp(panel); int num = 0; + bool has_hard_coded_modes = p->desc->num_timings || p->desc->num_modes; bool has_override_edid_mode = p->detected_panel && p->detected_panel != ERR_PTR(-EINVAL) && p->detected_panel->override_edid_mode; @@ -598,7 +599,11 @@ static int panel_edp_get_modes(struct drm_panel *panel, if (!p->edid) p->edid = drm_get_edid(connector, p->ddc); - if (p->edid) { + /* + * If both edid and hard-coded modes exists, skip edid modes to + * avoid multiple preferred modes. + */ + if (p->edid && !has_hard_coded_modes) { if (has_override_edid_mode) { /* * override_edid_mode is specified. Use @@ -615,12 +620,7 @@ static int panel_edp_get_modes(struct drm_panel *panel, pm_runtime_put_autosuspend(panel->dev); } - /* - * Add hard-coded panel modes. Don't call this if there are no timings - * and no modes (the generic edp-panel case) because it will clobber - * the display_info that was already set by drm_add_edid_modes(). - */ - if (p->desc->num_timings || p->desc->num_modes) + if (has_hard_coded_modes) num += panel_edp_get_non_edid_modes(p, connector); else if (!num) dev_warn(p->base.dev, "No display modes\n"); @@ -982,6 +982,19 @@ static const struct panel_desc auo_b101ean01 = { }, }; +static const struct drm_display_mode auo_b116xa3_mode = { + .clock = 70589, + .hdisplay = 1366, + .hsync_start = 1366 + 40, + .hsync_end = 1366 + 40 + 40, + .htotal = 1366 + 40 + 40 + 32, + .vdisplay = 768, + .vsync_start = 768 + 10, + .vsync_end = 768 + 10 + 12, + .vtotal = 768 + 10 + 12 + 6, + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, +}; + static const struct drm_display_mode auo_b116xak01_mode = { .clock = 69300, .hdisplay = 1366, @@ -1835,6 +1848,12 @@ static const struct panel_delay delay_200_500_e50 = { .enable = 50, }; +static const struct panel_delay delay_200_500_e80 = { + .hpd_absent = 200, + .unprepare = 500, + .enable = 80, +}; + static const struct panel_delay delay_200_500_e80_d50 = { .hpd_absent = 200, .unprepare = 500, @@ -1854,6 +1873,19 @@ static const struct panel_delay delay_200_500_e200 = { .enable = 200, }; +static const struct panel_delay delay_200_500_e200_d10 = { + .hpd_absent = 200, + .unprepare = 500, + .enable = 200, + .disable = 10, +}; + +static const struct panel_delay delay_200_150_e200 = { + .hpd_absent = 200, + .unprepare = 150, + .enable = 200, +}; + #define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \ { \ .name = _name, \ @@ -1883,39 +1915,76 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('A', 'U', 'O', 0x145c, &delay_200_500_e50, "B116XAB01.4"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x208d, &delay_200_500_e50, "B140HTN02.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x235c, &delay_200_500_e50, "B116XTN02.3"), - EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x255c, &delay_200_500_e50, "B116XTN02.5"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x403d, &delay_200_500_e50, "B140HAN04.0"), + EDP_PANEL_ENTRY2('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0", + &auo_b116xa3_mode), EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"), - EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"), + EDP_PANEL_ENTRY2('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1", + &auo_b116xa3_mode), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x635c, &delay_200_500_e50, "B116XAN06.3"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x639c, &delay_200_500_e50, "B140HAK02.7"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0715, &delay_200_150_e200, "NT116WHM-N21"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0731, &delay_200_500_e80, "NT116WHM-N42"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0741, &delay_200_500_e200, "NT116WHM-N44"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0786, &delay_200_500_p2e80, "NV116WHM-T01"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x07d1, &boe_nv133fhm_n61.delay, "NV133FHM-N61"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x07f6, &delay_200_500_e200, "NT140FHM-N44"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x082d, &boe_nv133fhm_n61.delay, "NV133FHM-N62"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x08b2, &delay_200_500_e200, "NT140WHM-N49"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x09c3, &delay_200_500_e50, "NT116WHM-N21,836X2"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x094b, &delay_200_500_e50, "NT116WHM-N21"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0951, &delay_200_500_e80, "NV116WHM-N47"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x095f, &delay_200_500_e50, "NE135FBM-N41 v8.1"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0979, &delay_200_500_e50, "NV116WHM-N49 V8.0"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x09ae, &delay_200_500_e200, "NT140FHM-N45"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x1132, &delay_200_500_e80_d50, "N116BGE-EA2"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x1138, &innolux_n116bca_ea1.delay, "N116BCA-EA1-RC4"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1139, &delay_200_500_e80_d50, "N116BGE-EA2"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x1145, &delay_200_500_e80_d50, "N116BCN-EB1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x114c, &innolux_n116bca_ea1.delay, "N116BCA-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1152, &delay_200_500_e80_d50, "N116BCN-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1153, &delay_200_500_e80_d50, "N116BGE-EA2"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1154, &delay_200_500_e80_d50, "N116BCA-EA2"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x1157, &delay_200_500_e80_d50, "N116BGE-EA2"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x115b, &delay_200_500_e80_d50, "N116BCN-EB1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x144f, &delay_200_500_e80_d50, "N140HGA-EA1"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x1468, &delay_200_500_e80, "N140HGA-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"), + EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5c, &delay_200_500_e200, "MB116AN01-2"), + + EDP_PANEL_ENTRY('I', 'V', 'O', 0x048e, &delay_200_500_e200_d10, "M116NWR6 R5"), EDP_PANEL_ENTRY('I', 'V', 'O', 0x057d, &delay_200_500_e200, "R140NWF5 RH"), EDP_PANEL_ENTRY('I', 'V', 'O', 0x854a, &delay_200_500_p2e100, "M133NW4J"), EDP_PANEL_ENTRY('I', 'V', 'O', 0x854b, &delay_200_500_p2e100, "R133NW4K-R0"), + EDP_PANEL_ENTRY('I', 'V', 'O', 0x8c4d, &delay_200_150_e200, "R140NWFM R1"), EDP_PANEL_ENTRY('K', 'D', 'B', 0x0624, &kingdisplay_kd116n21_30nv_a010.delay, "116N21-30NV-A010"), EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"), + EDP_PANEL_ENTRY('K', 'D', 'C', 0x0809, &delay_200_500_e50, "KD116N2930A15"), + + EDP_PANEL_ENTRY('S', 'D', 'C', 0x416d, &delay_100_500_e200, "ATNA45AF01"), + EDP_PANEL_ENTRY('S', 'H', 'P', 0x1511, &delay_200_500_e50, "LQ140M1JW48"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &sharp_lq140m1jw46.delay, "LQ140M1JW46"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"), diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c index 6de1172323..00791ea81e 100644 --- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c +++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Elida kd35t133 5.5" MIPI-DSI panel driver + * Elida kd35t133 3.5" MIPI-DSI panel driver * Copyright (C) 2020 Theobroma Systems Design und Consulting GmbH * * based on @@ -43,7 +43,6 @@ struct kd35t133 { struct regulator *vdd; struct regulator *iovcc; enum drm_panel_orientation orientation; - bool prepared; }; static inline struct kd35t133 *panel_to_kd35t133(struct drm_panel *panel) @@ -91,9 +90,6 @@ static int kd35t133_unprepare(struct drm_panel *panel) struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); int ret; - if (!ctx->prepared) - return 0; - ret = mipi_dsi_dcs_set_display_off(dsi); if (ret < 0) dev_err(ctx->dev, "failed to set display off: %d\n", ret); @@ -109,8 +105,6 @@ static int kd35t133_unprepare(struct drm_panel *panel) regulator_disable(ctx->iovcc); regulator_disable(ctx->vdd); - ctx->prepared = false; - return 0; } @@ -120,9 +114,6 @@ static int kd35t133_prepare(struct drm_panel *panel) struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); int ret; - if (ctx->prepared) - return 0; - dev_dbg(ctx->dev, "Resetting the panel\n"); ret = regulator_enable(ctx->vdd); if (ret < 0) { @@ -166,8 +157,6 @@ static int kd35t133_prepare(struct drm_panel *panel) msleep(50); - ctx->prepared = true; - return 0; disable_iovcc: @@ -211,11 +200,6 @@ static int kd35t133_get_modes(struct drm_panel *panel, connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; drm_mode_probed_add(connector, mode); - /* - * TODO: Remove once all drm drivers call - * drm_connector_set_orientation_from_panel() - */ - drm_connector_set_panel_orientation(connector, ctx->orientation); return 1; } @@ -301,27 +285,11 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi) return 0; } -static void kd35t133_shutdown(struct mipi_dsi_device *dsi) -{ - struct kd35t133 *ctx = mipi_dsi_get_drvdata(dsi); - int ret; - - ret = drm_panel_unprepare(&ctx->panel); - if (ret < 0) - dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret); - - ret = drm_panel_disable(&ctx->panel); - if (ret < 0) - dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret); -} - static void kd35t133_remove(struct mipi_dsi_device *dsi) { struct kd35t133 *ctx = mipi_dsi_get_drvdata(dsi); int ret; - kd35t133_shutdown(dsi); - ret = mipi_dsi_detach(dsi); if (ret < 0) dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); @@ -342,7 +310,6 @@ static struct mipi_dsi_driver kd35t133_driver = { }, .probe = kd35t133_probe, .remove = kd35t133_remove, - .shutdown = kd35t133_shutdown, }; module_mipi_dsi_driver(kd35t133_driver); diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c index c73243d85d..ff0dc08b98 100644 --- a/drivers/gpu/drm/panel/panel-himax-hx8394.c +++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c @@ -38,6 +38,7 @@ #define HX8394_CMD_SETMIPI 0xba #define HX8394_CMD_SETOTP 0xbb #define HX8394_CMD_SETREGBANK 0xbd +#define HX8394_CMD_UNKNOWN5 0xbf #define HX8394_CMD_UNKNOWN1 0xc0 #define HX8394_CMD_SETDGCLUT 0xc1 #define HX8394_CMD_SETID 0xc3 @@ -52,6 +53,7 @@ #define HX8394_CMD_SETGIP1 0xd5 #define HX8394_CMD_SETGIP2 0xd6 #define HX8394_CMD_SETGPO 0xd6 +#define HX8394_CMD_UNKNOWN4 0xd8 #define HX8394_CMD_SETSCALING 0xdd #define HX8394_CMD_SETIDLE 0xdf #define HX8394_CMD_SETGAMMA 0xe0 @@ -68,7 +70,7 @@ struct hx8394 { struct gpio_desc *reset_gpio; struct regulator *vcc; struct regulator *iovcc; - bool prepared; + enum drm_panel_orientation orientation; const struct hx8394_panel_desc *desc; }; @@ -203,6 +205,140 @@ static const struct hx8394_panel_desc hsd060bhw4_desc = { .init_sequence = hsd060bhw4_init_sequence, }; +static int powkiddy_x55_init_sequence(struct hx8394 *ctx) +{ + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); + + /* 5.19.8 SETEXTC: Set extension command (B9h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC, + 0xff, 0x83, 0x94); + + /* 5.19.9 SETMIPI: Set MIPI control (BAh) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI, + 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0); + + /* 5.19.2 SETPOWER: Set power (B1h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER, + 0x48, 0x12, 0x72, 0x09, 0x32, 0x54, 0x71, 0x71, 0x57, 0x47); + + /* 5.19.3 SETDISP: Set display related register (B2h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP, + 0x00, 0x80, 0x64, 0x2c, 0x16, 0x2f); + + /* 5.19.4 SETCYC: Set display waveform cycles (B4h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC, + 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, 0x86, 0x75, + 0x00, 0x3f, 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, + 0x86); + + /* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM, + 0x6e, 0x6e); + + /* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0, + 0x00, 0x00, 0x07, 0x07, 0x40, 0x07, 0x0c, 0x00, 0x08, 0x10, + 0x08, 0x00, 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a, 0x02, 0x15, + 0x06, 0x05, 0x06, 0x47, 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07, + 0x07, 0x0c, 0x40); + + /* 5.19.20 Set GIP Option1 (D5h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1, + 0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25, 0x18, 0x18, + 0x26, 0x27, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x20, 0x21, + 0x18, 0x18, 0x18, 0x18); + + /* 5.19.21 Set GIP Option2 (D6h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2, + 0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, + 0x01, 0x00, 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20, 0x18, 0x18, + 0x27, 0x26, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x25, 0x24, + 0x18, 0x18, 0x18, 0x18); + + /* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA, + 0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, + 0x65, 0x66, 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d, 0x98, 0xa8, + 0xb9, 0x5d, 0x5c, 0x61, 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00, + 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, 0x65, + 0x65, 0x6e, 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99, 0xa8, 0xba, + 0x5d, 0x5d, 0x62, 0x67, 0x6b, 0x72, 0x7f, 0x7f); + + /* Unknown command, not listed in the HX8394-F datasheet */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1, + 0x1f, 0x31); + + /* 5.19.17 SETPANEL (CCh) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL, + 0x0b); + + /* Unknown command, not listed in the HX8394-F datasheet */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3, + 0x02); + + /* 5.19.11 Set register bank (BDh) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK, + 0x02); + + /* Unknown command, not listed in the HX8394-F datasheet */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN4, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff); + + /* 5.19.11 Set register bank (BDh) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK, + 0x00); + + /* 5.19.11 Set register bank (BDh) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK, + 0x01); + + /* 5.19.2 SETPOWER: Set power (B1h) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER, + 0x00); + + /* 5.19.11 Set register bank (BDh) */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK, + 0x00); + + /* Unknown command, not listed in the HX8394-F datasheet */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN5, + 0x40, 0x81, 0x50, 0x00, 0x1a, 0xfc, 0x01); + + /* Unknown command, not listed in the HX8394-F datasheet */ + mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN2, + 0xed); + + return 0; +} + +static const struct drm_display_mode powkiddy_x55_mode = { + .hdisplay = 720, + .hsync_start = 720 + 44, + .hsync_end = 720 + 44 + 20, + .htotal = 720 + 44 + 20 + 20, + .vdisplay = 1280, + .vsync_start = 1280 + 12, + .vsync_end = 1280 + 12 + 10, + .vtotal = 1280 + 12 + 10 + 10, + .clock = 63290, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, + .width_mm = 67, + .height_mm = 121, +}; + +static const struct hx8394_panel_desc powkiddy_x55_desc = { + .mode = &powkiddy_x55_mode, + .lanes = 4, + .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET, + .format = MIPI_DSI_FMT_RGB888, + .init_sequence = powkiddy_x55_init_sequence, +}; + static int hx8394_enable(struct drm_panel *panel) { struct hx8394 *ctx = panel_to_hx8394(panel); @@ -262,16 +398,11 @@ static int hx8394_unprepare(struct drm_panel *panel) { struct hx8394 *ctx = panel_to_hx8394(panel); - if (!ctx->prepared) - return 0; - gpiod_set_value_cansleep(ctx->reset_gpio, 1); regulator_disable(ctx->iovcc); regulator_disable(ctx->vcc); - ctx->prepared = false; - return 0; } @@ -280,9 +411,6 @@ static int hx8394_prepare(struct drm_panel *panel) struct hx8394 *ctx = panel_to_hx8394(panel); int ret; - if (ctx->prepared) - return 0; - gpiod_set_value_cansleep(ctx->reset_gpio, 1); ret = regulator_enable(ctx->vcc); @@ -301,8 +429,6 @@ static int hx8394_prepare(struct drm_panel *panel) msleep(180); - ctx->prepared = true; - return 0; disable_vcc: @@ -335,12 +461,20 @@ static int hx8394_get_modes(struct drm_panel *panel, return 1; } +static enum drm_panel_orientation hx8394_get_orientation(struct drm_panel *panel) +{ + struct hx8394 *ctx = panel_to_hx8394(panel); + + return ctx->orientation; +} + static const struct drm_panel_funcs hx8394_drm_funcs = { .disable = hx8394_disable, .unprepare = hx8394_unprepare, .prepare = hx8394_prepare, .enable = hx8394_enable, .get_modes = hx8394_get_modes, + .get_orientation = hx8394_get_orientation, }; static int hx8394_probe(struct mipi_dsi_device *dsi) @@ -358,6 +492,12 @@ static int hx8394_probe(struct mipi_dsi_device *dsi) return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), "Failed to get reset gpio\n"); + ret = of_drm_get_panel_orientation(dev->of_node, &ctx->orientation); + if (ret < 0) { + dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, ret); + return ret; + } + mipi_dsi_set_drvdata(dsi, ctx); ctx->dev = dev; @@ -401,27 +541,11 @@ static int hx8394_probe(struct mipi_dsi_device *dsi) return 0; } -static void hx8394_shutdown(struct mipi_dsi_device *dsi) -{ - struct hx8394 *ctx = mipi_dsi_get_drvdata(dsi); - int ret; - - ret = drm_panel_disable(&ctx->panel); - if (ret < 0) - dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret); - - ret = drm_panel_unprepare(&ctx->panel); - if (ret < 0) - dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret); -} - static void hx8394_remove(struct mipi_dsi_device *dsi) { struct hx8394 *ctx = mipi_dsi_get_drvdata(dsi); int ret; - hx8394_shutdown(dsi); - ret = mipi_dsi_detach(dsi); if (ret < 0) dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); @@ -431,6 +555,7 @@ static void hx8394_remove(struct mipi_dsi_device *dsi) static const struct of_device_id hx8394_of_match[] = { { .compatible = "hannstar,hsd060bhw4", .data = &hsd060bhw4_desc }, + { .compatible = "powkiddy,x55-panel", .data = &powkiddy_x55_desc }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, hx8394_of_match); @@ -438,7 +563,6 @@ MODULE_DEVICE_TABLE(of, hx8394_of_match); static struct mipi_dsi_driver hx8394_driver = { .probe = hx8394_probe, .remove = hx8394_remove, - .shutdown = hx8394_shutdown, .driver = { .name = DRV_NAME, .of_match_table = hx8394_of_match, diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9805.c b/drivers/gpu/drm/panel/panel-ilitek-ili9805.c new file mode 100644 index 0000000000..1cbc25758b --- /dev/null +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9805.c @@ -0,0 +1,405 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 BSH Hausgerate GmbH + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include